├── tsconfig.json ├── src ├── types.ts ├── tool-handlers │ ├── validate-model.ts │ ├── get-model-info.ts │ ├── search-models.ts │ └── chat-completion.ts ├── model-cache.ts ├── index.ts ├── openrouter-api.ts └── tool-handlers.ts ├── .gitignore ├── .dxtignore ├── package.json ├── SOLUTIONS.md ├── CONTRIBUTING.md ├── manifest.json ├── CHANGELOG.md ├── CLAUDE.md ├── README.md ├── LICENSE └── pnpm-lock.yaml /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2022", 4 | "module": "NodeNext", 5 | "moduleResolution": "NodeNext", 6 | "outDir": "./dist", 7 | "rootDir": "./src", 8 | "strict": true, 9 | "esModuleInterop": true, 10 | "skipLibCheck": true, 11 | "forceConsistentCasingInFileNames": true, 12 | "declaration": true, 13 | }, 14 | "include": ["src/**/*"], 15 | "exclude": ["node_modules", "dist"] 16 | } 17 | -------------------------------------------------------------------------------- /src/types.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Represents a single content item in a tool response. 3 | * Currently, only text content is supported. 4 | */ 5 | export interface ResponseContentItem { 6 | type: "text"; 7 | text: string; 8 | } 9 | 10 | /** 11 | * Unified structure for all tool handler responses. 12 | * Follows the principles outlined in the refactoring plan. 13 | */ 14 | export interface ToolResult { 15 | /** Indicates whether the tool execution resulted in an error. */ 16 | isError: boolean; 17 | /** An array of content items, typically containing a single text item with the result or error message. */ 18 | content: ResponseContentItem[]; 19 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | /ref/ 4 | /docs/ 5 | /sourcecode.* 6 | .docs/ 7 | .roo* 8 | .roo/* 9 | *.dxt 10 | 11 | # dependencies 12 | /node_modules 13 | /.pnp 14 | .pnp.* 15 | .yarn/* 16 | !.yarn/patches 17 | !.yarn/plugins 18 | !.yarn/releases 19 | !.yarn/versions 20 | 21 | # testing 22 | /coverage 23 | 24 | # docs 25 | project.* 26 | 27 | # production 28 | /build 29 | /dist 30 | 31 | # misc 32 | .DS_Store 33 | *.pem 34 | 35 | # debug 36 | npm-debug.log* 37 | yarn-debug.log* 38 | yarn-error.log* 39 | 40 | # env files (can opt-in for committing if needed) 41 | .env 42 | 43 | # typescript 44 | *.tsbuildinfo 45 | -------------------------------------------------------------------------------- /.dxtignore: -------------------------------------------------------------------------------- 1 | # Source files 2 | src/ 3 | *.ts 4 | tsconfig.json 5 | 6 | # Test files 7 | *.test.* 8 | test/ 9 | tests/ 10 | coverage/ 11 | 12 | # Development dependencies 13 | node_modules/@types/ 14 | node_modules/typescript/ 15 | node_modules/shx/ 16 | node_modules/@eslint/ 17 | 18 | # Development tools and configs 19 | .eslintrc* 20 | .prettierrc* 21 | jest.config.* 22 | nodemon.json 23 | 24 | # Version control 25 | .git/ 26 | .gitignore 27 | 28 | # Documentation 29 | README.md 30 | CONTRIBUTING.md 31 | CHANGELOG.md 32 | SOLUTIONS.md 33 | *.md 34 | 35 | # IDE and editor files 36 | .vscode/ 37 | .idea/ 38 | *.swp 39 | *.swo 40 | *~ 41 | 42 | # CLI and AI files 43 | .claude/* 44 | 45 | # OS files 46 | .DS_Store 47 | Thumbs.db 48 | 49 | # Logs 50 | *.log 51 | logs/ 52 | 53 | # Temporary files 54 | tmp/ 55 | temp/ 56 | .tmp/ -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@mcpservers/openrouterai", 3 | "version": "2.3.0", 4 | "type": "module", 5 | "main": "dist/index.js", 6 | "bin": { 7 | "openrouterai": "dist/index.js" 8 | }, 9 | "files": [ 10 | "dist", 11 | "README.md", 12 | "LICENSE" 13 | ], 14 | "scripts": { 15 | "build": "tsc && shx chmod +x dist/*.js", 16 | "prepare": "npm run build", 17 | "watch": "tsc --watch" 18 | }, 19 | "keywords": [ 20 | "mcp", 21 | "openrouter", 22 | "ai", 23 | "llm", 24 | "modelcontextprotocol" 25 | ], 26 | "author": "bossying", 27 | "license": "Apache-2.0", 28 | "description": "MCP server for OpenRouter.ai integration", 29 | "repository": { 30 | "type": "git", 31 | "url": "git+https://github.com/heltonteixeira/openrouterai.git" 32 | }, 33 | "bugs": { 34 | "url": "https://github.com/heltonteixeira/openrouterai/issues" 35 | }, 36 | "homepage": "https://github.com/heltonteixeira/openrouterai#readme", 37 | "engines": { 38 | "node": ">=18.0.0" 39 | }, 40 | "dependencies": { 41 | "@modelcontextprotocol/sdk": "1.4.1", 42 | "axios": "^1.7.9", 43 | "openai": "^4.83.0", 44 | "typescript": "^5.7.3" 45 | }, 46 | "devDependencies": { 47 | "@types/node": "^22.13.1", 48 | "shx": "^0.3.4" 49 | }, 50 | "overrides": { 51 | "uri-js": "npm:uri-js-replace", 52 | "whatwg-url": "^14.1.0" 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /src/tool-handlers/validate-model.ts: -------------------------------------------------------------------------------- 1 | import { ModelCache } from '../model-cache.js'; 2 | import { ToolResult } from '../types.js'; // Import the unified type 3 | 4 | export interface ValidateModelToolRequest { 5 | model: string; 6 | } 7 | 8 | // Update function signature to return Promise 9 | export async function handleValidateModel( 10 | request: { params: { arguments: ValidateModelToolRequest } }, 11 | modelCache: ModelCache 12 | ): Promise { 13 | const { model } = request.params.arguments; 14 | 15 | // Wrap core logic in try...catch 16 | try { 17 | const isValid = await modelCache.validateModel(model); 18 | 19 | // Modify return logic based on validity 20 | if (isValid) { 21 | // Return success ToolResult 22 | return { 23 | isError: false, 24 | content: [ 25 | { 26 | type: 'text', 27 | // Keep simple JSON for valid response 28 | text: JSON.stringify({ model: model, valid: true }, null, 2), 29 | }, 30 | ], 31 | }; 32 | } else { 33 | // Return error ToolResult for invalid model 34 | return { 35 | isError: true, 36 | content: [ 37 | { 38 | type: 'text', 39 | // Use simple error string 40 | text: `Error: Model not found: ${model}`, 41 | }, 42 | ], 43 | }; 44 | } 45 | } catch (error) { 46 | // Catch errors during model validation 47 | console.error(`Error validating model ${model}:`, error); 48 | const errorMessage = error instanceof Error ? error.message : 'An unknown error occurred'; 49 | return { 50 | isError: true, 51 | content: [ 52 | { 53 | type: 'text', 54 | text: `Error: Failed to validate model: ${errorMessage}`, 55 | }, 56 | ], 57 | }; 58 | } 59 | } -------------------------------------------------------------------------------- /src/model-cache.ts: -------------------------------------------------------------------------------- 1 | export interface OpenRouterModel { 2 | id: string; 3 | name: string; 4 | description?: string; 5 | context_length: number; 6 | pricing: { 7 | prompt: string; 8 | completion: string; 9 | unit: number; 10 | }; 11 | top_provider?: { 12 | max_completion_tokens?: number; 13 | max_context_length?: number; 14 | }; 15 | capabilities?: { 16 | functions?: boolean; 17 | tools?: boolean; 18 | vision?: boolean; 19 | json_mode?: boolean; 20 | }; 21 | } 22 | 23 | export interface OpenRouterModelResponse { 24 | data: OpenRouterModel[]; 25 | timestamp?: number; 26 | } 27 | 28 | export interface CachedModelResponse extends OpenRouterModelResponse { 29 | timestamp: number; 30 | } 31 | 32 | // Simple in-memory state management 33 | export class ModelCache { 34 | private static instance: ModelCache; 35 | private cachedModels: CachedModelResponse | null = null; 36 | private readonly cacheExpiry = 3600000; // 1 hour in milliseconds 37 | 38 | private constructor() {} 39 | 40 | static getInstance(): ModelCache { 41 | if (!ModelCache.instance) { 42 | ModelCache.instance = new ModelCache(); 43 | } 44 | return ModelCache.instance; 45 | } 46 | 47 | private validateCache(): boolean { 48 | if (!this.cachedModels) return false; 49 | return Date.now() - this.cachedModels.timestamp <= this.cacheExpiry; 50 | } 51 | 52 | setCachedModels(models: OpenRouterModelResponse & { timestamp: number }) { 53 | this.cachedModels = models as CachedModelResponse; 54 | } 55 | 56 | getCachedModels(): CachedModelResponse | null { 57 | return this.validateCache() ? this.cachedModels : null; 58 | } 59 | 60 | clearCache() { 61 | this.cachedModels = null; 62 | } 63 | 64 | async validateModel(model: string): Promise { 65 | const models = this.getCachedModels(); 66 | if (!models) return false; 67 | return models.data.some(m => m.id === model); 68 | } 69 | 70 | async getModelInfo(model: string): Promise { 71 | const models = this.getCachedModels(); 72 | if (!models) return undefined; 73 | return models.data.find(m => m.id === model); 74 | } 75 | } -------------------------------------------------------------------------------- /SOLUTIONS.md: -------------------------------------------------------------------------------- 1 | # Error Prevention Guidelines 2 | 3 | ## Quick Reference 4 | **DXT Properties**: `dxt_version`, `entry_point`, `mcp_config`, `user_config`, `sensitive` 5 | **Valid Platforms**: `win32`, `darwin`, `linux` (NOT `windows`, `macos`) 6 | **CLI Commands**: `dxt validate`, `dxt pack`, `dxt sign`, `dxt verify` 7 | **MCP Tools**: `chat_completion()`, `search_models()`, `get_model_info()`, `validate_model()` 8 | 9 | ## Core Standards 10 | - **DXT Manifest Structure**: Use `server.mcp_config.command` with args array, direct property mapping for `user_config` (no nested properties object) 11 | - **Platform Compatibility**: Use Node.js platform identifiers (`darwin` for macOS), specify minimum versions with `>=` syntax 12 | - **Environment Variables**: Map user config with `${user_config.property_name}` syntax in `mcp_config.env` 13 | - **TypeScript**: Import types with `.js` extensions for ES modules, use strict compilation with declaration files 14 | 15 | ## Error Resolution Process 16 | 1. **Validate Manifest**: Run `dxt validate manifest.json` to catch schema violations early 17 | 2. **Fix Platform Names**: Replace user-friendly names with Node.js platform constants 18 | 3. **Restructure Config**: Move user configuration properties to root level of `user_config` object 19 | 4. **Add Required Fields**: Include `mcp_config.command` and `args` array for server execution 20 | 5. **Remove Unsupported Fields**: Strip `enum` arrays and unsupported validation properties 21 | 22 | ## Architecture Patterns 23 | - **MCP Server Integration**: Package compiled `dist/` files with `node_modules` production dependencies only 24 | - Use `.dxtignore` to exclude source files, tests, and development tools 25 | - Set executable permissions on entry point during build process 26 | - **Configuration Management**: Implement sensitive data handling with OS keychain integration through `sensitive: true` flag 27 | 28 | ## Workflow Standards 29 | - **DXT Packaging**: Build TypeScript first, validate manifest, then pack to avoid runtime errors 30 | - **Dependency Management**: Test production-only installs to verify bundled dependencies are sufficient 31 | 32 | ## Quality Gates 33 | - **Bundle Size**: Target under 50MB for typical MCP servers through selective dependency inclusion 34 | - **Cross-Platform**: Test on Windows, macOS, Linux with Node.js >=18.0.0 requirement -------------------------------------------------------------------------------- /src/tool-handlers/get-model-info.ts: -------------------------------------------------------------------------------- 1 | import { ModelCache } from '../model-cache.js'; 2 | import { ToolResult } from '../types.js'; // Import the unified type 3 | 4 | export interface GetModelInfoToolRequest { 5 | model: string; 6 | } 7 | 8 | // Update function signature to return Promise 9 | export async function handleGetModelInfo( 10 | request: { params: { arguments: GetModelInfoToolRequest } }, 11 | modelCache: ModelCache 12 | ): Promise { 13 | const { model } = request.params.arguments; 14 | 15 | // Wrap core logic in try...catch 16 | try { 17 | const modelInfo = await modelCache.getModelInfo(model); 18 | 19 | if (!modelInfo) { 20 | return { 21 | isError: true, // Ensure isError is present 22 | content: [ 23 | { 24 | type: 'text', 25 | // Add "Error: " prefix 26 | text: `Error: Model not found: ${model}`, 27 | }, 28 | ], 29 | }; 30 | } 31 | 32 | // Format successful response 33 | const response = { 34 | id: `info-${Date.now()}`, 35 | object: 'model', 36 | created: Math.floor(Date.now() / 1000), 37 | owned_by: modelInfo.id.split('/')[0], 38 | permission: [], 39 | root: modelInfo.id, 40 | parent: null, 41 | data: { 42 | id: modelInfo.id, 43 | name: modelInfo.name, 44 | description: modelInfo.description || 'No description available', 45 | context_length: modelInfo.context_length, 46 | pricing: { 47 | prompt: `$${modelInfo.pricing.prompt}/1K tokens`, 48 | completion: `$${modelInfo.pricing.completion}/1K tokens` 49 | }, 50 | capabilities: { 51 | functions: modelInfo.capabilities?.functions || false, 52 | tools: modelInfo.capabilities?.tools || false, 53 | vision: modelInfo.capabilities?.vision || false, 54 | json_mode: modelInfo.capabilities?.json_mode || false 55 | } 56 | } 57 | }; 58 | 59 | // Add isError: false to successful return 60 | return { 61 | isError: false, 62 | content: [ 63 | { 64 | type: 'text', 65 | text: JSON.stringify(response, null, 2), 66 | }, 67 | ], 68 | }; 69 | } catch (error) { 70 | // Catch errors during model info retrieval 71 | console.error(`Error getting model info for ${model}:`, error); 72 | const errorMessage = error instanceof Error ? error.message : 'An unknown error occurred'; 73 | return { 74 | isError: true, 75 | content: [ 76 | { 77 | type: 'text', 78 | text: `Error: Failed to get model info: ${errorMessage}`, 79 | }, 80 | ], 81 | }; 82 | } 83 | } -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | import { Server } from '@modelcontextprotocol/sdk/server/index.js'; 3 | import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js'; 4 | 5 | import { ToolHandlers } from './tool-handlers.js'; 6 | 7 | const OPENROUTER_API_KEY = process.env.OPENROUTER_API_KEY; 8 | const DEFAULT_MODEL = process.env.OPENROUTER_DEFAULT_MODEL; 9 | const DEFAULT_MAX_TOKENS = process.env.OPENROUTER_MAX_TOKENS; // String | undefined 10 | const DEFAULT_QUANTIZATIONS = process.env.OPENROUTER_PROVIDER_QUANTIZATIONS?.split(',').map(q => q.trim()).filter(q => q) || undefined; // string[] | undefined 11 | const DEFAULT_IGNORED_PROVIDERS = process.env.OPENROUTER_PROVIDER_IGNORE?.split(',').map(p => p.trim()).filter(p => p) || undefined; // string[] | undefined 12 | 13 | // Phase 2 Provider Defaults 14 | const DEFAULT_PROVIDER_SORT = process.env.OPENROUTER_PROVIDER_SORT as "price" | "throughput" | "latency" | undefined; // Validate? 15 | const DEFAULT_PROVIDER_ORDER = process.env.OPENROUTER_PROVIDER_ORDER?.split(',').map(p => p.trim()).filter(p => p) || undefined; // string[] | undefined 16 | const DEFAULT_PROVIDER_REQUIRE_PARAMETERS = process.env.OPENROUTER_PROVIDER_REQUIRE_PARAMETERS?.toLowerCase() === 'true' ? true : undefined; // boolean | undefined 17 | const DEFAULT_PROVIDER_DATA_COLLECTION = process.env.OPENROUTER_PROVIDER_DATA_COLLECTION as "allow" | "deny" | undefined; // Validate? 18 | const DEFAULT_PROVIDER_ALLOW_FALLBACKS = process.env.OPENROUTER_PROVIDER_ALLOW_FALLBACKS?.toLowerCase() === 'true' ? true : undefined; // boolean | undefined 19 | 20 | if (!OPENROUTER_API_KEY) { 21 | throw new Error('OPENROUTER_API_KEY environment variable is required'); 22 | } 23 | 24 | class OpenRouterServer { 25 | private server: Server; 26 | private toolHandlers: ToolHandlers; 27 | 28 | constructor() { 29 | this.server = new Server( 30 | { 31 | name: 'openrouter-server', 32 | version: '2.2.0', 33 | }, 34 | { 35 | capabilities: { 36 | tools: {}, 37 | }, 38 | } 39 | ); 40 | 41 | // Initialize tool handlers 42 | this.toolHandlers = new ToolHandlers( 43 | this.server, 44 | OPENROUTER_API_KEY!, 45 | DEFAULT_MODEL, 46 | DEFAULT_MAX_TOKENS, 47 | DEFAULT_QUANTIZATIONS, 48 | DEFAULT_IGNORED_PROVIDERS, 49 | // Pass Phase 2 Defaults 50 | DEFAULT_PROVIDER_SORT, 51 | DEFAULT_PROVIDER_ORDER, 52 | DEFAULT_PROVIDER_REQUIRE_PARAMETERS, 53 | DEFAULT_PROVIDER_DATA_COLLECTION, 54 | DEFAULT_PROVIDER_ALLOW_FALLBACKS 55 | ); 56 | 57 | // Error handling 58 | this.server.onerror = (error) => console.error('[MCP Error]', error); 59 | process.on('SIGINT', async () => { 60 | await this.server.close(); 61 | process.exit(0); 62 | }); 63 | } 64 | 65 | async run() { 66 | const transport = new StdioServerTransport(); 67 | await this.server.connect(transport); 68 | console.error('OpenRouter MCP server running on stdio'); 69 | } 70 | } 71 | 72 | const server = new OpenRouterServer(); 73 | server.run().catch(console.error); 74 | -------------------------------------------------------------------------------- /src/openrouter-api.ts: -------------------------------------------------------------------------------- 1 | import axios, { AxiosError, AxiosInstance } from 'axios'; 2 | import { setTimeout } from 'timers/promises'; 3 | import { OpenRouterModelResponse } from './model-cache.js'; 4 | 5 | export interface RateLimitState { 6 | remaining: number; 7 | reset: number; 8 | total: number; 9 | } 10 | 11 | export const RETRY_DELAYS = [1000, 2000, 4000]; // Exponential backoff delays in ms 12 | 13 | export class OpenRouterAPIClient { 14 | private axiosInstance: AxiosInstance; 15 | private rateLimit: RateLimitState = { 16 | remaining: 50, // Default conservative value 17 | reset: Date.now() + 60000, 18 | total: 50 19 | }; 20 | 21 | constructor(apiKey: string) { 22 | // Initialize axios instance for OpenRouter API 23 | this.axiosInstance = axios.create({ 24 | baseURL: 'https://openrouter.ai/api/v1', 25 | headers: { 26 | 'Authorization': `Bearer ${apiKey}`, 27 | 'HTTP-Referer': 'https://github.com/heltonteixeira/openrouterai', 28 | 'X-Title': 'MCP OpenRouter Server' 29 | } 30 | }); 31 | 32 | // Add response interceptor for rate limit headers 33 | this.axiosInstance.interceptors.response.use( 34 | (response: any) => { 35 | const remaining = parseInt(response.headers['x-ratelimit-remaining'] || '50'); 36 | const reset = parseInt(response.headers['x-ratelimit-reset'] || '60'); 37 | const total = parseInt(response.headers['x-ratelimit-limit'] || '50'); 38 | 39 | this.rateLimit = { 40 | remaining, 41 | reset: Date.now() + (reset * 1000), 42 | total 43 | }; 44 | 45 | return response; 46 | }, 47 | async (error: AxiosError) => { 48 | if (error.response?.status === 429) { 49 | console.error('Rate limit exceeded, waiting for reset...'); 50 | const resetAfter = parseInt(error.response.headers['retry-after'] || '60'); 51 | await setTimeout(resetAfter * 1000); 52 | return this.axiosInstance.request(error.config!); 53 | } 54 | throw error; 55 | } 56 | ); 57 | } 58 | 59 | async fetchModels(): Promise { 60 | // Check rate limits before making request 61 | if (this.rateLimit.remaining <= 0 && Date.now() < this.rateLimit.reset) { 62 | const waitTime = this.rateLimit.reset - Date.now(); 63 | await setTimeout(waitTime); 64 | } 65 | 66 | // Retry mechanism for fetching models 67 | for (let i = 0; i <= RETRY_DELAYS.length; i++) { 68 | try { 69 | const response = await this.axiosInstance.get('/models'); 70 | return { 71 | data: response.data.data, 72 | timestamp: Date.now() 73 | }; 74 | } catch (error) { 75 | if (i === RETRY_DELAYS.length) throw error; 76 | await setTimeout(RETRY_DELAYS[i]); 77 | } 78 | } 79 | 80 | throw new Error('Failed to fetch models after multiple attempts'); 81 | } 82 | 83 | async chatCompletion(params: { 84 | model: string, 85 | messages: any[], 86 | temperature?: number 87 | }) { 88 | return this.axiosInstance.post('/chat/completions', { 89 | model: params.model, 90 | messages: params.messages, 91 | temperature: params.temperature ?? 1 92 | }); 93 | } 94 | 95 | getRateLimit(): RateLimitState { 96 | return { ...this.rateLimit }; 97 | } 98 | } -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to OpenRouter MCP Server 2 | 3 | ## Development Setup 4 | 5 | 1. Clone the repository: 6 | ```bash 7 | git clone https://github.com/heltonteixeira/openrouterai.git 8 | cd openrouterai 9 | ``` 10 | 11 | 2. Install dependencies: 12 | ```bash 13 | npm install 14 | ``` 15 | 16 | 3. Build the project: 17 | ```bash 18 | npm run build 19 | ``` 20 | 21 | ## Features 22 | 23 | - Chat completion support for all OpenRouter.ai models 24 | - Advanced model search and filtering: 25 | - Search by name, description, or provider 26 | - Filter by context length range 27 | - Filter by maximum price per token 28 | - Filter by model capabilities 29 | - Configurable result limits 30 | - Robust API handling: 31 | - Rate limiting with automatic retry 32 | - Exponential backoff for failed requests 33 | - Request caching with automatic expiration 34 | - Performance optimizations: 35 | - Model information caching (1-hour expiry) 36 | - Efficient model capability tracking 37 | - Error handling and reporting: 38 | - Detailed error messages with applied filters 39 | - Rate limit handling 40 | - API error recovery 41 | 42 | ## Model Information 43 | 44 | The server provides comprehensive model information: 45 | 46 | - **Pricing Data**: Accurate cost per token for both prompt and completion 47 | - **Context Length**: Model-specific maximum context window 48 | - **Capabilities**: Support for: 49 | - Function calling 50 | - Tool use 51 | - Vision/image processing 52 | - JSON mode 53 | - **Provider Details**: Maximum completion tokens and context lengths 54 | 55 | ## Rate Limiting 56 | 57 | The server implements intelligent rate limit handling: 58 | 59 | - Tracks remaining requests through response headers 60 | - Automatically waits when rate limits are reached 61 | - Implements exponential backoff for failed requests 62 | - Provides clear error messages for rate limit issues 63 | 64 | ## Caching 65 | 66 | Model information is cached for optimal performance: 67 | 68 | - 1-hour cache duration for model data 69 | - Automatic cache invalidation 70 | - Memory-efficient storage 71 | 72 | ## Error Handling 73 | 74 | Robust error handling for all operations: 75 | 76 | - Detailed error responses with applied filters 77 | - Rate limit detection and recovery 78 | - API error reporting with details 79 | - Model validation failures 80 | - Cache-related issues 81 | - Network timeouts and retries 82 | 83 | ## Tool Implementation Examples 84 | 85 | ### chat_completion 86 | ```typescript 87 | const response = await mcpClient.useTool("openrouterai", "chat_completion", { 88 | model: "anthropic/claude-3-opus-20240229", // Optional if default model is set in config 89 | messages: [ 90 | { role: "user", content: "Hello!" } 91 | ], 92 | temperature: 0.7 93 | }); 94 | ``` 95 | 96 | ### search_models 97 | ```typescript 98 | const models = await mcpClient.useTool("openrouterai", "search_models", { 99 | query: "claude", // Optional: Search in name/description 100 | provider: "anthropic", // Optional: Filter by provider 101 | minContextLength: 10000, // Optional: Minimum context length 102 | maxContextLength: 100000, // Optional: Maximum context length 103 | maxPromptPrice: 0.01, // Optional: Max price per 1K tokens for prompts 104 | maxCompletionPrice: 0.02, // Optional: Max price per 1K tokens for completions 105 | capabilities: { // Optional: Required capabilities 106 | functions: true, 107 | tools: true, 108 | vision: false, 109 | json_mode: true 110 | }, 111 | limit: 10 // Optional: Maximum results (default: 10, max: 50) 112 | }); 113 | ``` 114 | 115 | ### get_model_info 116 | ```typescript 117 | const info = await mcpClient.useTool("openrouterai", "get_model_info", { 118 | model: "anthropic/claude-3-opus-20240229" 119 | }); 120 | ``` 121 | 122 | ### validate_model 123 | ```typescript 124 | const validation = await mcpClient.useTool("openrouterai", "validate_model", { 125 | model: "anthropic/claude-3-opus-20240229" 126 | }); 127 | ``` 128 | 129 | ## License 130 | 131 | Apache License 2.0 - see LICENSE file for details. 132 | -------------------------------------------------------------------------------- /manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "dxt_version": "0.1", 3 | "name": "openrouter-ai", 4 | "version": "2.3.0", 5 | "description": "MCP server for OpenRouter.ai integration with access to diverse AI models, provider routing controls, and unified response format", 6 | "author": { 7 | "name": "bossying" 8 | }, 9 | "server": { 10 | "type": "node", 11 | "entry_point": "dist/index.js", 12 | "mcp_config": { 13 | "command": "node", 14 | "args": ["dist/index.js"], 15 | "env": { 16 | "OPENROUTER_API_KEY": "${user_config.openrouter_api_key}", 17 | "OPENROUTER_DEFAULT_MODEL": "${user_config.default_model}", 18 | "OPENROUTER_MAX_TOKENS": "${user_config.max_tokens}", 19 | "OPENROUTER_PROVIDER_QUANTIZATIONS": "${user_config.provider_quantizations}", 20 | "OPENROUTER_PROVIDER_IGNORE": "${user_config.provider_ignore}", 21 | "OPENROUTER_PROVIDER_SORT": "${user_config.provider_sort}", 22 | "OPENROUTER_PROVIDER_ORDER": "${user_config.provider_order}", 23 | "OPENROUTER_PROVIDER_REQUIRE_PARAMETERS": "${user_config.provider_require_parameters}", 24 | "OPENROUTER_PROVIDER_DATA_COLLECTION": "${user_config.provider_data_collection}", 25 | "OPENROUTER_PROVIDER_ALLOW_FALLBACKS": "${user_config.provider_allow_fallbacks}" 26 | } 27 | } 28 | }, 29 | "compatibility": { 30 | "platforms": ["win32", "darwin", "linux"], 31 | "node_version": ">=18.0.0" 32 | }, 33 | "user_config": { 34 | "openrouter_api_key": { 35 | "type": "string", 36 | "title": "OpenRouter API Key", 37 | "description": "Your OpenRouter.ai API key from https://openrouter.ai/keys", 38 | "sensitive": true 39 | }, 40 | "default_model": { 41 | "type": "string", 42 | "title": "Default Model", 43 | "description": "Default model to use if not specified in requests (e.g., openrouter/auto)", 44 | "default": "openrouter/auto" 45 | }, 46 | "max_tokens": { 47 | "type": "number", 48 | "title": "Maximum Tokens", 49 | "description": "Default maximum number of tokens to generate", 50 | "default": 1024, 51 | "min": 1, 52 | "max": 200000 53 | }, 54 | "provider_quantizations": { 55 | "type": "string", 56 | "title": "Provider Quantizations", 57 | "description": "Comma-separated list of quantization levels (e.g., fp16,int8)", 58 | "default": "" 59 | }, 60 | "provider_ignore": { 61 | "type": "string", 62 | "title": "Ignored Providers", 63 | "description": "Comma-separated list of provider names to ignore (e.g., openai,anthropic)", 64 | "default": "" 65 | }, 66 | "provider_sort": { 67 | "type": "string", 68 | "title": "Provider Sort Order", 69 | "description": "Default sort criteria for providers (price, throughput, or latency)", 70 | "default": "" 71 | }, 72 | "provider_order": { 73 | "type": "string", 74 | "title": "Provider Priority Order", 75 | "description": "Comma-separated list of preferred provider IDs in priority order", 76 | "default": "" 77 | }, 78 | "provider_require_parameters": { 79 | "type": "boolean", 80 | "title": "Require Parameter Support", 81 | "description": "Only use providers that support all specified request parameters", 82 | "default": false 83 | }, 84 | "provider_data_collection": { 85 | "type": "string", 86 | "title": "Data Collection Policy", 87 | "description": "Default data collection preference (allow or deny)", 88 | "default": "" 89 | }, 90 | "provider_allow_fallbacks": { 91 | "type": "boolean", 92 | "title": "Allow Provider Fallbacks", 93 | "description": "Allow fallback to other providers if preferred ones fail", 94 | "default": true 95 | } 96 | }, 97 | "keywords": [ 98 | "mcp", 99 | "openrouter", 100 | "ai", 101 | "llm", 102 | "chat", 103 | "completion", 104 | "model", 105 | "routing" 106 | ], 107 | "license": "Apache-2.0", 108 | "homepage": "https://github.com/heltonteixeira/openrouterai", 109 | "repository": { 110 | "type": "git", 111 | "url": "https://github.com/heltonteixeira/openrouterai.git" 112 | } 113 | } -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | All notable changes to this project will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 5 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 6 | 7 | ## [Unreleased] 8 | ## [2.3.0] - 2025-06-18 9 | ### Added 10 | - (provider-routing) Comprehensive provider routing controls for chat_completion tool 11 | - max_tokens parameter to filter providers by output capacity 12 | - Provider filtering by quantizations and ignore lists 13 | - Provider sorting by price, throughput, or latency 14 | - Explicit provider preference using order 15 | - Model suffix shortcuts (:nitro, :floor) 16 | - Environment variables for default routing behavior 17 | 18 | ### Changed 19 | - Updated descriptions for `chat_completion`, `get_model_info`, and `validate_model` tools for improved clarity and detail. 20 | 21 | ## [2.2.0] - 2025-03-27 22 | ### Added 23 | - Unified ToolResult response format for all tool handlers 24 | - Structured error messages with "Error: " prefix consistency 25 | - Comprehensive error handling documentation in README 26 | 27 | ### Changed 28 | - Updated all tool handlers to use new response format 29 | - Improved error messages with contextual information 30 | - Refactored core error handling infrastructure 31 | 32 | ### Fixed 33 | - Removed residual JSON-RPC error code references 34 | - Standardized success/error response structures 35 | 36 | 37 | ## [2.1.0] - 2025-02-10 38 | ### Added 39 | - Conversation context support 40 | - MCP server badge 41 | ### Refactor 42 | - Modular structure 43 | ### Chore 44 | - Bump version to 2.1.0 45 | - Update dependencies and TypeScript config 46 | - Update .gitignore remove project specific files 47 | ### Documentation 48 | - Improve README with comprehensive documentation and badges 49 | 50 | ## [2.0.3] - 2024-03-22 51 | ### Changed 52 | - Updated dependencies to latest versions: 53 | - axios to ^1.7.9 54 | - openai to ^4.77.0 55 | - typescript to ^5.7.2 56 | - Updated .gitignore file list 57 | - Fixed package.json bin configuration format 58 | ### Added 59 | - Package overrides to fix punycode deprecation warning: 60 | - Added uri-js-replace to replace deprecated uri-js 61 | - Updated whatwg-url to v14.1.0 62 | ### Fixed 63 | - Updated repository URLs to correct GitHub repository 64 | - Fixed Node.js punycode deprecation warning (DEP0040) 65 | 66 | ## [2.0.2] - 2024-03-21 67 | ### Changed 68 | - Simplified binary name to 'openrouterai' 69 | 70 | ## [2.0.1] - 2024-03-21 71 | ### Added 72 | - Complete npm package configuration 73 | - Binary support for CLI installation 74 | - Repository and documentation links 75 | - Node.js engine requirement specification 76 | - PrepublishOnly script for build safety 77 | 78 | ## [2.0.0] - 2024-03-20 79 | ### Breaking Changes 80 | - Remove list_models tool in favor of enhanced search_models 81 | - Remove set_default_model and clear_default_model in favor of config-based default model 82 | - Move default model to MCP configuration via OPENROUTER_DEFAULT_MODEL environment variable 83 | 84 | ### Added 85 | - Comprehensive model filtering capabilities 86 | - Direct OpenRouter /models endpoint integration 87 | - Accurate model data (pricing, context length, capabilities) 88 | - Rate limiting with exponential backoff 89 | - Model capability validation 90 | - Cache invalidation strategy 91 | - Enhanced error handling with detailed feedback 92 | 93 | ### Changed 94 | - Rename StateManager to ModelCache for better clarity 95 | - Update error messages to reference MCP configuration 96 | - Switch from OpenAI SDK models.list() to direct OpenRouter API calls 97 | - Update package name to @mcpservers/openrouterai 98 | - Update documentation to follow MCP server standards 99 | 100 | ## [1.0.0] - 2024-03-15 101 | ### Added 102 | - Initial OpenRouter MCP server implementation 103 | - Basic model management features and state handling 104 | - Core API integration 105 | - Project documentation and configuration files 106 | 107 | ### Changed 108 | - Update license to Apache 2.0 109 | 110 | [Unreleased]: https://github.com/mcpservers/openrouterai/compare/v2.3.0...HEAD 111 | [2.3.0]: https://github.com/mcpservers/openrouterai/compare/v2.2.0...v2.3.0 112 | [2.2.0]: https://github.com/mcpservers/openrouterai/compare/v2.1.0...v2.2.0 113 | [2.0.2]: https://github.com/mcpservers/openrouterai/compare/v2.0.1...v2.0.2 114 | [2.0.1]: https://github.com/mcpservers/openrouterai/compare/v2.0.0...v2.0.1 115 | [2.0.0]: https://github.com/mcpservers/openrouterai/compare/v1.0.0...v2.0.0 116 | [1.0.0]: https://github.com/mcpservers/openrouterai/releases/tag/v1.0.0 117 | -------------------------------------------------------------------------------- /CLAUDE.md: -------------------------------------------------------------------------------- 1 | # CLAUDE.md 2 | 3 | This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. 4 | 5 | ## Error Prevention and Learning 6 | 7 | ### Using SOLUTIONS.md as Reference 8 | - **SOLUTIONS.md** contains documented mistakes and their resolutions to prevent recurrence 9 | - **Always consult SOLUTIONS.md** before implementing changes to avoid past errors 10 | - **Quick Reference Section** provides critical patterns: DXT properties, platform identifiers, CLI commands 11 | - **Core Standards** section outlines established patterns for manifest structure, TypeScript, and environment handling 12 | - **Error Resolution Process** provides step-by-step validation workflow 13 | 14 | ### Error Prevention Workflow 15 | 1. **Check SOLUTIONS.md first** - Review quick reference and relevant error patterns 16 | 2. **Validate Early** - Use `dxt validate manifest.json` before any changes 17 | 3. **Follow Established Patterns** - Use documented solutions from similar past issues 18 | 4. **Update SOLUTIONS.md** - Document new errors and their solutions for future reference 19 | 20 | ## Essential Commands 21 | 22 | ### Build & Development 23 | - `pnpm run build` - TypeScript compilation and makes dist/*.js executable 24 | - `pnpm run watch` - Development with live TypeScript watching 25 | - `pnpm run prepare` - Pre-publish build (runs automatically on npm install) 26 | 27 | ### Testing 28 | - No test framework currently configured 29 | - Manual testing via MCP client integration 30 | 31 | ### Package Management 32 | - Uses `pnpm` for package management 33 | - ES modules with Node.js 18+ required 34 | - Entry point: `dist/index.js` (built from `src/index.ts`) 35 | 36 | ## Architecture Overview 37 | 38 | ### Core MCP Server Structure 39 | - **Entry Point**: `src/index.ts` - Main server initialization with environment variable parsing 40 | - **Tool Registration**: `src/tool-handlers.ts` - Central dispatcher for all MCP tools with comprehensive input schemas 41 | - **API Layer**: `src/openrouter-api.ts` - OpenRouter.ai API client with rate limiting and retry logic 42 | - **Caching**: `src/model-cache.ts` - Singleton model cache with 1-hour expiry 43 | - **Types**: `src/types.ts` - Unified `ToolResult` interface for all tool responses 44 | 45 | ### Tool Handler Pattern 46 | Individual tool handlers in `src/tool-handlers/`: 47 | - `chat-completion.ts` - OpenAI-compatible chat completions with provider routing 48 | - `search-models.ts` - Model discovery with filtering capabilities 49 | - `get-model-info.ts` - Single model metadata retrieval 50 | - `validate-model.ts` - Model ID validation 51 | 52 | ### Response Architecture 53 | All tools return standardized `ToolResult` structure: 54 | ```typescript 55 | interface ToolResult { 56 | isError: boolean; 57 | content: Array<{ type: "text"; text: string }>; 58 | } 59 | ``` 60 | 61 | ## Key Technical Patterns 62 | 63 | ### Environment Configuration 64 | The server reads extensive environment variables for defaults: 65 | - `OPENROUTER_API_KEY` (required) 66 | - `OPENROUTER_DEFAULT_MODEL` 67 | - `OPENROUTER_MAX_TOKENS` 68 | - Phase 1: `OPENROUTER_PROVIDER_QUANTIZATIONS`, `OPENROUTER_PROVIDER_IGNORE` 69 | - Phase 2: `OPENROUTER_PROVIDER_SORT`, `OPENROUTER_PROVIDER_ORDER`, etc. 70 | 71 | ### Error Handling Strategy 72 | - All errors logged to stderr for Claude Desktop visibility 73 | - Structured error responses with `isError: true` 74 | - Rate limit detection with automatic retry and exponential backoff 75 | - Try-catch wrapper in main tool handler dispatcher 76 | 77 | ### Provider Routing System 78 | Two-phase provider routing controls: 79 | - **Phase 1**: Basic filtering (quantizations, ignore lists) 80 | - **Phase 2**: Advanced routing (sort order, parameter requirements, data collection policies) 81 | 82 | ### Caching Implementation 83 | - Singleton `ModelCache` class with 1-hour model data expiry 84 | - Memory-efficient storage of OpenRouter model registry 85 | - Automatic cache invalidation and refresh 86 | 87 | ## Development Constraints 88 | 89 | ### TypeScript Configuration 90 | - ES2022 target with NodeNext modules 91 | - Strict mode enabled 92 | - Declaration files generated in dist/ 93 | - Must use `.js` extensions in imports for ES modules 94 | 95 | ### Dependencies 96 | - `@modelcontextprotocol/sdk` - Core MCP implementation 97 | - `openai` - Chat completions via OpenRouter 98 | - `axios` - HTTP client for model registry API 99 | - `typescript` - Development dependency 100 | 101 | ### OpenRouter Integration 102 | - Base URL: `https://openrouter.ai/api/v1` 103 | - Required headers: `HTTP-Referer`, `X-Title` 104 | - Rate limit headers: `x-ratelimit-remaining`, `x-ratelimit-reset` 105 | - Model registry endpoint: `/models` 106 | -------------------------------------------------------------------------------- /src/tool-handlers/search-models.ts: -------------------------------------------------------------------------------- 1 | import { ModelCache, OpenRouterModel } from '../model-cache.js'; 2 | import { OpenRouterAPIClient } from '../openrouter-api.js'; 3 | import { ToolResult } from '../types.js'; // Import the unified type 4 | 5 | export interface SearchModelsToolRequest { 6 | query?: string; 7 | provider?: string; 8 | minContextLength?: number; 9 | maxContextLength?: number; 10 | maxPromptPrice?: number; 11 | maxCompletionPrice?: number; 12 | capabilities?: { 13 | functions?: boolean; 14 | tools?: boolean; 15 | vision?: boolean; 16 | json_mode?: boolean; 17 | }; 18 | limit?: number; 19 | } 20 | 21 | // Update function signature to return Promise 22 | export async function handleSearchModels( 23 | request: { params: { arguments: SearchModelsToolRequest } }, 24 | apiClient: OpenRouterAPIClient, 25 | modelCache: ModelCache 26 | ): Promise { 27 | const args = request.params.arguments; 28 | 29 | try { 30 | // Use cached models if available 31 | let models = modelCache.getCachedModels(); 32 | if (!models) { 33 | models = await apiClient.fetchModels(); 34 | if (models) { 35 | modelCache.setCachedModels({ ...models, timestamp: Date.now() }); 36 | } 37 | } 38 | 39 | // Simplify the "Failed to fetch models" error return 40 | if (!models) { 41 | return { 42 | isError: true, // Ensure isError is present 43 | content: [ 44 | { 45 | type: 'text', 46 | // Use simple error string 47 | text: 'Error: Failed to fetch models. Please try again.', 48 | }, 49 | ], 50 | }; 51 | } 52 | 53 | // Apply all filters 54 | const searchResults = models.data 55 | .filter(model => { 56 | // Text search 57 | if (args.query) { 58 | const searchTerm = args.query.toLowerCase(); 59 | const matchesQuery = 60 | model.id.toLowerCase().includes(searchTerm) || 61 | (model.name && model.name.toLowerCase().includes(searchTerm)) || 62 | (model.description && model.description.toLowerCase().includes(searchTerm)); 63 | if (!matchesQuery) return false; 64 | } 65 | 66 | // Provider filter 67 | if (args.provider) { 68 | const provider = model.id.split('/')[0]; 69 | if (provider !== args.provider.toLowerCase()) return false; 70 | } 71 | 72 | // Context length filters 73 | if (args.minContextLength && model.context_length < args.minContextLength) return false; 74 | if (args.maxContextLength && model.context_length > args.maxContextLength) return false; 75 | 76 | // Price filters 77 | if (args.maxPromptPrice && parseFloat(model.pricing.prompt) > args.maxPromptPrice) return false; 78 | if (args.maxCompletionPrice && parseFloat(model.pricing.completion) > args.maxCompletionPrice) return false; 79 | 80 | // Capabilities filters 81 | if (args.capabilities) { 82 | if (args.capabilities.functions && !model.capabilities?.functions) return false; 83 | if (args.capabilities.tools && !model.capabilities?.tools) return false; 84 | if (args.capabilities.vision && !model.capabilities?.vision) return false; 85 | if (args.capabilities.json_mode && !model.capabilities?.json_mode) return false; 86 | } 87 | 88 | return true; 89 | }) 90 | // Apply limit 91 | .slice(0, args.limit || 10) 92 | .map(model => ({ 93 | id: model.id, 94 | name: model.name, 95 | description: model.description || 'No description available', 96 | context_length: model.context_length, 97 | pricing: { 98 | prompt: `$${model.pricing.prompt}/1K tokens`, 99 | completion: `$${model.pricing.completion}/1K tokens` 100 | }, 101 | capabilities: { 102 | functions: model.capabilities?.functions || false, 103 | tools: model.capabilities?.tools || false, 104 | vision: model.capabilities?.vision || false, 105 | json_mode: model.capabilities?.json_mode || false 106 | } 107 | })); 108 | 109 | const response = { 110 | id: `search-${Date.now()}`, 111 | object: 'list', 112 | data: searchResults, 113 | created: Math.floor(Date.now() / 1000), 114 | metadata: { 115 | total_models: models.data.length, 116 | filtered_count: searchResults.length, 117 | applied_filters: { 118 | query: args.query, 119 | provider: args.provider, 120 | minContextLength: args.minContextLength, 121 | maxContextLength: args.maxContextLength, 122 | maxPromptPrice: args.maxPromptPrice, 123 | maxCompletionPrice: args.maxCompletionPrice, 124 | capabilities: args.capabilities, 125 | limit: args.limit 126 | } 127 | } 128 | }; 129 | 130 | // Add isError: false to successful return 131 | return { 132 | isError: false, 133 | content: [ 134 | { 135 | type: 'text', 136 | text: JSON.stringify(response, null, 2), 137 | }, 138 | ], 139 | }; 140 | } catch (error) { 141 | console.error('Error during model search:', error); // Log the error 142 | // Handle known and unknown errors, always return ToolResult 143 | if (error instanceof Error) { 144 | return { 145 | isError: true, 146 | content: [ 147 | { 148 | type: 'text', 149 | // Add "Error: " prefix 150 | text: `Error: Failed to search models: ${error.message}`, 151 | }, 152 | ], 153 | }; 154 | } else { 155 | // Handle unknown errors 156 | return { 157 | isError: true, 158 | content: [ 159 | { 160 | type: 'text', 161 | text: 'Error: An unknown error occurred during model search.', 162 | }, 163 | ], 164 | }; 165 | } 166 | // DO NOT throw error; 167 | } 168 | } -------------------------------------------------------------------------------- /src/tool-handlers/chat-completion.ts: -------------------------------------------------------------------------------- 1 | import OpenAI from 'openai'; 2 | import { ChatCompletionMessageParam } from 'openai/resources/chat/completions.js'; 3 | import { ToolResult } from '../types.js'; // Import the unified type 4 | 5 | // Maximum context tokens (matches tool-handlers.ts) 6 | const MAX_CONTEXT_TOKENS = 200000; 7 | 8 | export interface ChatCompletionToolRequest { 9 | model?: string; 10 | messages: ChatCompletionMessageParam[]; 11 | temperature?: number; 12 | max_tokens?: number; // Add max_tokens parameter 13 | provider?: { // Add provider configuration 14 | // Phase 1 15 | quantizations?: string[]; // For quality filtering 16 | ignore?: string[]; // Block specific providers 17 | // Phase 2 18 | sort?: "price" | "throughput" | "latency"; // Sort providers 19 | order?: string[]; // Prioritized list of provider IDs 20 | require_parameters?: boolean; // Only use providers supporting all params 21 | data_collection?: "allow" | "deny"; // Allow/deny data collection 22 | allow_fallbacks?: boolean; // Control fallback behavior 23 | } 24 | } 25 | 26 | // Utility function to estimate token count (simplified) 27 | function estimateTokenCount(text: string): number { 28 | // Rough approximation: 4 characters per token 29 | return Math.ceil(text.length / 4); 30 | } 31 | 32 | // Truncate messages to fit within the context window 33 | function truncateMessagesToFit( 34 | messages: ChatCompletionMessageParam[], 35 | maxTokens: number 36 | ): ChatCompletionMessageParam[] { 37 | const truncated: ChatCompletionMessageParam[] = []; 38 | let currentTokenCount = 0; 39 | 40 | // Always include system message first if present 41 | if (messages[0]?.role === 'system') { 42 | truncated.push(messages[0]); 43 | currentTokenCount += estimateTokenCount(messages[0].content as string); 44 | } 45 | 46 | // Add messages from the end, respecting the token limit 47 | for (let i = messages.length - 1; i >= 0; i--) { 48 | // Skip system message if already added 49 | if (i === 0 && messages[0]?.role === 'system') continue; 50 | 51 | const messageContent = messages[i].content; 52 | // Handle potential null/undefined content safely 53 | const contentString = typeof messageContent === 'string' ? messageContent : ''; 54 | const messageTokens = estimateTokenCount(contentString); 55 | 56 | if (currentTokenCount + messageTokens > maxTokens) break; 57 | 58 | truncated.unshift(messages[i]); 59 | currentTokenCount += messageTokens; 60 | } 61 | 62 | return truncated; 63 | } 64 | 65 | // Update function signature to return Promise 66 | export async function handleChatCompletion( 67 | request: { params: { arguments: ChatCompletionToolRequest } }, 68 | openai: OpenAI, 69 | defaultModel?: string, 70 | defaultMaxTokens?: string, // Note: Comes as string from env var 71 | defaultQuantizations?: string[], 72 | defaultIgnoredProviders?: string[], 73 | // Phase 2 Defaults 74 | defaultSort?: "price" | "throughput" | "latency", 75 | defaultOrder?: string[], 76 | defaultRequireParameters?: boolean, 77 | defaultDataCollection?: "allow" | "deny", 78 | defaultAllowFallbacks?: boolean 79 | ): Promise { 80 | const args = request.params.arguments; 81 | 82 | // Determine effective max_tokens 83 | const maxTokens = args.max_tokens ?? (defaultMaxTokens ? parseInt(defaultMaxTokens, 10) : undefined); 84 | if (maxTokens !== undefined && isNaN(maxTokens)) { 85 | // Handle potential parsing error if defaultMaxTokens is not a valid number string 86 | console.warn(`Invalid OPENROUTER_MAX_TOKENS value: ${defaultMaxTokens}. Ignoring.`); 87 | // Potentially return an error ToolResult here if strict validation is desired 88 | } 89 | 90 | // Determine effective provider config (Phase 1 & 2) 91 | const providerArgs = args.provider ?? {}; 92 | const providerConfig: { 93 | quantizations?: string[]; 94 | ignore?: string[]; 95 | sort?: "price" | "throughput" | "latency"; 96 | order?: string[]; 97 | require_parameters?: boolean; 98 | data_collection?: "allow" | "deny"; 99 | allow_fallbacks?: boolean; 100 | } = {}; 101 | 102 | // Merge Phase 1 103 | const effectiveQuantizations = providerArgs.quantizations ?? defaultQuantizations; 104 | const effectiveIgnore = providerArgs.ignore ?? defaultIgnoredProviders; 105 | if (effectiveQuantizations && effectiveQuantizations.length > 0) { 106 | providerConfig.quantizations = effectiveQuantizations; 107 | } 108 | if (effectiveIgnore && effectiveIgnore.length > 0) { 109 | providerConfig.ignore = effectiveIgnore; 110 | } 111 | 112 | // Merge Phase 2 113 | const effectiveSort = providerArgs.sort ?? defaultSort; 114 | const effectiveOrder = providerArgs.order ?? defaultOrder; 115 | const effectiveRequireParameters = providerArgs.require_parameters ?? defaultRequireParameters; 116 | const effectiveDataCollection = providerArgs.data_collection ?? defaultDataCollection; 117 | const effectiveAllowFallbacks = providerArgs.allow_fallbacks ?? defaultAllowFallbacks; 118 | 119 | if (effectiveSort) providerConfig.sort = effectiveSort; 120 | if (effectiveOrder && effectiveOrder.length > 0) providerConfig.order = effectiveOrder; 121 | if (effectiveRequireParameters !== undefined) providerConfig.require_parameters = effectiveRequireParameters; 122 | if (effectiveDataCollection) providerConfig.data_collection = effectiveDataCollection; 123 | if (effectiveAllowFallbacks !== undefined) providerConfig.allow_fallbacks = effectiveAllowFallbacks; 124 | 125 | // Validate model selection 126 | const model = args.model || defaultModel; 127 | if (!model) { 128 | return { 129 | isError: true, // Ensure isError is present 130 | content: [ 131 | { 132 | type: 'text', 133 | // Add "Error: " prefix 134 | text: 'Error: No model specified and no default model configured in MCP settings. Please specify a model or set OPENROUTER_DEFAULT_MODEL in the MCP configuration.', 135 | }, 136 | ], 137 | }; 138 | } 139 | 140 | // Validate message array 141 | if (!args.messages || args.messages.length === 0) { // Add check for undefined/null messages 142 | return { 143 | isError: true, // Ensure isError is present 144 | content: [ 145 | { 146 | type: 'text', 147 | // Add "Error: " prefix 148 | text: 'Error: Messages array cannot be empty. At least one message is required.', 149 | }, 150 | ], 151 | }; 152 | } 153 | 154 | try { 155 | // Truncate messages to fit within context window 156 | const truncatedMessages = truncateMessagesToFit(args.messages, MAX_CONTEXT_TOKENS); 157 | 158 | const completionRequest: OpenAI.Chat.Completions.ChatCompletionCreateParams = { 159 | model, // Use the validated model 160 | messages: truncatedMessages, 161 | temperature: args.temperature ?? 1, 162 | // Add max_tokens if defined and valid 163 | ...(maxTokens !== undefined && !isNaN(maxTokens) && { max_tokens: maxTokens }), 164 | // Add provider config if it has keys (now includes Phase 2) 165 | ...(Object.keys(providerConfig).length > 0 && { provider: providerConfig }), 166 | }; 167 | 168 | // Log the request being sent (optional, for debugging) 169 | // console.log("Sending request to OpenRouter:", JSON.stringify(completionRequest, null, 2)); 170 | 171 | const completion = await openai.chat.completions.create(completionRequest); 172 | 173 | // Format response to match OpenRouter schema 174 | const response = { 175 | id: `gen-${Date.now()}`, 176 | choices: [{ 177 | finish_reason: completion.choices[0].finish_reason, 178 | message: { 179 | role: completion.choices[0].message.role, 180 | content: completion.choices[0].message.content || '', 181 | tool_calls: completion.choices[0].message.tool_calls 182 | } 183 | }], 184 | created: Math.floor(Date.now() / 1000), 185 | model: model, 186 | object: 'chat.completion', 187 | usage: completion.usage || { 188 | prompt_tokens: 0, 189 | completion_tokens: 0, 190 | total_tokens: 0 191 | } 192 | }; 193 | 194 | // Add isError: false to successful return 195 | return { 196 | isError: false, 197 | content: [ 198 | { 199 | type: 'text', 200 | text: JSON.stringify(response, null, 2), 201 | }, 202 | ], 203 | }; 204 | } catch (error) { 205 | console.error('Error during chat completion:', error); // Log the error 206 | // Handle known and unknown errors, always return ToolResult 207 | if (error instanceof Error) { 208 | return { 209 | isError: true, 210 | content: [ 211 | { 212 | type: 'text', 213 | // Add "Error: " prefix 214 | text: `Error: OpenRouter API error: ${error.message}`, 215 | }, 216 | ], 217 | }; 218 | } else { 219 | // Handle unknown errors 220 | return { 221 | isError: true, 222 | content: [ 223 | { 224 | type: 'text', 225 | text: 'Error: An unknown error occurred during chat completion.', 226 | }, 227 | ], 228 | }; 229 | } 230 | // DO NOT throw error; 231 | } 232 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OpenRouter MCP Server 2 | 3 | [![MCP Server](https://img.shields.io/badge/MCP-Server-green)](https://github.com/heltonteixeira/openrouterai) 4 | [![Version](https://img.shields.io/badge/version-2.2.0-blue)](CHANGELOG.md) 5 | [![TypeScript](https://img.shields.io/badge/language-TypeScript-blue)](https://www.typescriptlang.org/) 6 | [![License](https://img.shields.io/badge/license-Apache%202.0-brightgreen)](LICENSE) 7 | 8 | A Model Context Protocol (MCP) server providing seamless integration with OpenRouter.ai's diverse model ecosystem. Access various AI models through a unified, type-safe interface with built-in caching, rate limiting, and error handling. 9 | 10 | OpenRouter Server MCP server 11 | 12 | ## Features 13 | 14 | - **Model Access** 15 | - Direct access to all OpenRouter.ai models 16 | - Automatic model validation and capability checking 17 | - Default model configuration support 18 | 19 | - **Performance Optimization** 20 | - Smart model information caching (1-hour expiry) 21 | - Automatic rate limit management 22 | - Exponential backoff for failed requests 23 | 24 | - **Unified Response Format** 25 | - Consistent `ToolResult` structure for all responses 26 | - Clear error identification with `isError` flag 27 | - Structured error messages with context 28 | ## Installation 29 | 30 | ```bash 31 | pnpm install @mcpservers/openrouterai 32 | ``` 33 | 34 | ## Configuration 35 | 36 | ### Prerequisites 37 | 38 | 1. Get your OpenRouter API key from [OpenRouter Keys](https://openrouter.ai/keys) 39 | 2. Choose a default model (optional) 40 | 41 | ### Environment Variables 42 | 43 | * `OPENROUTER_API_KEY`: **Required**. Your OpenRouter API key. 44 | * `OPENROUTER_DEFAULT_MODEL`: Optional. The default model to use if not specified in the request (e.g., `openrouter/auto`). 45 | * `OPENROUTER_MAX_TOKENS`: Optional. Default maximum number of tokens to generate if `max_tokens` is not provided in the request. 46 | * `OPENROUTER_PROVIDER_QUANTIZATIONS`: Optional. Comma-separated list of default quantization levels to filter by (e.g., `fp16,int8`) if `provider.quantizations` is not provided in the request. (Phase 1) 47 | * `OPENROUTER_PROVIDER_IGNORE`: Optional. Comma-separated list of default provider names to ignore (e.g., `mistralai,openai`) if `provider.ignore` is not provided in the request. (Phase 1) 48 | * `OPENROUTER_PROVIDER_SORT`: Optional. Default sort order for providers ("price", "throughput", or "latency"). Overridden by `provider.sort` argument. (Phase 2) 49 | * `OPENROUTER_PROVIDER_ORDER`: Optional. Default prioritized list of provider IDs (JSON array string, e.g., `'["openai/gpt-4o", "anthropic/claude-3-opus"]'`). Overridden by `provider.order` argument. (Phase 2) 50 | * `OPENROUTER_PROVIDER_REQUIRE_PARAMETERS`: Optional. Default boolean (`true` or `false`) to only use providers supporting all specified request parameters. Overridden by `provider.require_parameters` argument. (Phase 2) 51 | * `OPENROUTER_PROVIDER_DATA_COLLECTION`: Optional. Default data collection policy ("allow" or "deny"). Overridden by `provider.data_collection` argument. (Phase 2) 52 | * `OPENROUTER_PROVIDER_ALLOW_FALLBACKS`: Optional. Default boolean (`true` or `false`) to control fallback behavior if preferred providers fail. Overridden by `provider.allow_fallbacks` argument. (Phase 2) 53 | 54 | ```env 55 | # Example .env file content 56 | OPENROUTER_API_KEY=your-api-key-here 57 | OPENROUTER_DEFAULT_MODEL=openrouter/auto 58 | OPENROUTER_MAX_TOKENS=1024 59 | OPENROUTER_PROVIDER_QUANTIZATIONS=fp16,int8 60 | OPENROUTER_PROVIDER_IGNORE=openai,anthropic 61 | OPENROUTER_PROVIDER_SORT=price 62 | OPENROUTER_PROVIDER_ORDER='["openai/gpt-4o", "anthropic/claude-3-opus"]' 63 | OPENROUTER_PROVIDER_REQUIRE_PARAMETERS=true 64 | OPENROUTER_PROVIDER_DATA_COLLECTION=deny 65 | OPENROUTER_PROVIDER_ALLOW_FALLBACKS=false 66 | ``` 67 | OPENROUTER_PROVIDER_QUANTIZATIONS=fp16,int8 68 | OPENROUTER_PROVIDER_IGNORE=openai,anthropic 69 | ``` 70 | 71 | ### Setup 72 | 73 | Add to your MCP settings configuration file (`cline_mcp_settings.json` or `claude_desktop_config.json`): 74 | 75 | ```json 76 | { 77 | "mcpServers": { 78 | "openrouterai": { 79 | "command": "npx", 80 | "args": ["@mcpservers/openrouterai"], 81 | "env": { 82 | "OPENROUTER_API_KEY": "your-api-key-here", 83 | "OPENROUTER_DEFAULT_MODEL": "optional-default-model", 84 | "OPENROUTER_MAX_TOKENS": "1024", 85 | "OPENROUTER_PROVIDER_QUANTIZATIONS": "fp16,int8", 86 | "OPENROUTER_PROVIDER_IGNORE": "openai,anthropic" 87 | } 88 | } 89 | } 90 | } 91 | 92 | ## Response Format 93 | 94 | All tools return responses in a standardized structure: 95 | 96 | ```typescript 97 | interface ToolResult { 98 | isError: boolean; 99 | content: Array<{ 100 | type: "text"; 101 | text: string; // JSON string or error message 102 | }>; 103 | } 104 | ``` 105 | 106 | **Success Example:** 107 | ```json 108 | { 109 | "isError": false, 110 | "content": [{ 111 | "type": "text", 112 | "text": "{\"id\": \"gen-123\", ...}" 113 | }] 114 | } 115 | ``` 116 | 117 | **Error Example:** 118 | ```json 119 | { 120 | "isError": true, 121 | "content": [{ 122 | "type": "text", 123 | "text": "Error: Model validation failed - 'invalid-model' not found" 124 | }] 125 | } 126 | ``` 127 | 128 | ## Available Tools 129 | 130 | ### `chat_completion` 131 | 132 | Sends a request to the OpenRouter Chat Completions API. 133 | 134 | **Input Schema:** 135 | 136 | * `model` (string, optional): The model to use (e.g., `openai/gpt-4o`, `google/gemini-pro`). Overrides `OPENROUTER_DEFAULT_MODEL`. Defaults to `openrouter/auto` if neither is set. 137 | * **Model Suffixes:** You can append `:nitro` to a model ID (e.g., `openai/gpt-4o:nitro`) to potentially route to faster, experimental versions if available. Append `:floor` (e.g., `mistralai/mistral-7b-instruct:floor`) to use the cheapest available variant of a model, often useful for testing or low-cost tasks. Note: Availability of `:nitro` and `:floor` variants depends on OpenRouter. 138 | * `messages` (array, required): An array of message objects conforming to the OpenAI chat completion format. 139 | * `temperature` (number, optional): Sampling temperature. Defaults to 1. 140 | * `max_tokens` (number, optional): Maximum number of tokens to generate in the completion. Overrides `OPENROUTER_MAX_TOKENS`. 141 | * `provider` (object, optional): Provider routing configuration. Overrides corresponding `OPENROUTER_PROVIDER_*` environment variables. 142 | * `quantizations` (array of strings, optional): List of quantization levels to filter by (e.g., `["fp16", "int8"]`). Only models matching one of these levels will be considered. Overrides `OPENROUTER_PROVIDER_QUANTIZATIONS`. (Phase 1) 143 | * `ignore` (array of strings, optional): List of provider names to exclude (e.g., `["openai", "anthropic"]`). Models from these providers will not be used. Overrides `OPENROUTER_PROVIDER_IGNORE`. (Phase 1) 144 | * `sort` ("price" | "throughput" | "latency", optional): Sort providers by the specified criteria. Overrides `OPENROUTER_PROVIDER_SORT`. (Phase 2) 145 | * `order` (array of strings, optional): A prioritized list of provider IDs (e.g., `["openai/gpt-4o", "anthropic/claude-3-opus"]`). Overrides `OPENROUTER_PROVIDER_ORDER`. (Phase 2) 146 | * `require_parameters` (boolean, optional): If true, only use providers that support all specified request parameters (like tools, functions, temperature). Overrides `OPENROUTER_PROVIDER_REQUIRE_PARAMETERS`. (Phase 2) 147 | * `data_collection` ("allow" | "deny", optional): Specify whether providers are allowed to collect data from the request. Overrides `OPENROUTER_PROVIDER_DATA_COLLECTION`. (Phase 2) 148 | * `allow_fallbacks` (boolean, optional): If true (default), allows falling back to other providers if the preferred ones fail or are unavailable. If false, fails the request if preferred providers cannot be used. Overrides `OPENROUTER_PROVIDER_ALLOW_FALLBACKS`. (Phase 2) 149 | 150 | **Example Usage:** 151 | 152 | ```json 153 | { 154 | "tool": "chat_completion", 155 | "arguments": { 156 | "model": "anthropic/claude-3-haiku", 157 | "messages": [ 158 | { "role": "user", "content": "Explain the concept of quantization in AI models." } 159 | ], 160 | "max_tokens": 500, 161 | "provider": { 162 | "quantizations": ["fp16"], 163 | "ignore": ["openai"], 164 | "sort": "price", 165 | "order": ["anthropic/claude-3-haiku", "google/gemini-pro"], 166 | "require_parameters": true, 167 | "allow_fallbacks": false 168 | } 169 | } 170 | } 171 | ``` 172 | 173 | This example requests a completion from `anthropic/claude-3-haiku`, limits the response to 500 tokens. It specifies provider routing options: prefer `fp16` quantized models, ignore `openai` providers, sort remaining providers by `price`, prioritize `anthropic/claude-3-haiku` then `google/gemini-pro`, require the chosen provider to support all request parameters (like `max_tokens`), and disable fallbacks (fail if the prioritized providers cannot fulfill the request). 174 | 175 | ### search_models 176 | 177 | Search and filter available models: 178 | 179 | ```typescript 180 | interface ModelSearchRequest { 181 | query?: string; 182 | provider?: string; 183 | minContextLength?: number; 184 | capabilities?: { 185 | functions?: boolean; 186 | vision?: boolean; 187 | }; 188 | } 189 | 190 | // Response: ToolResult with model list or error 191 | ``` 192 | 193 | ### get_model_info 194 | 195 | Get detailed information about a specific model: 196 | 197 | ```typescript 198 | { 199 | model: string; // Model identifier 200 | } 201 | ``` 202 | 203 | ### validate_model 204 | 205 | Check if a model ID is valid: 206 | 207 | ```typescript 208 | interface ModelValidationRequest { 209 | model: string; 210 | } 211 | 212 | // Response: 213 | // Success: { isError: false, valid: true } 214 | // Error: { isError: true, error: "Model not found" } 215 | ``` 216 | 217 | ## Error Handling 218 | 219 | The server provides structured errors with contextual information: 220 | 221 | ```typescript 222 | // Error response structure 223 | { 224 | isError: true, 225 | content: [{ 226 | type: "text", 227 | text: "Error: [Category] - Detailed message" 228 | }] 229 | } 230 | ``` 231 | 232 | **Common Error Categories:** 233 | - `Validation Error`: Invalid input parameters 234 | - `API Error`: OpenRouter API communication issues 235 | - `Rate Limit`: Request throttling detection 236 | - `Internal Error`: Server-side processing failures 237 | 238 | **Handling Responses:** 239 | ```typescript 240 | async function handleResponse(result: ToolResult) { 241 | if (result.isError) { 242 | const errorMessage = result.content[0].text; 243 | if (errorMessage.startsWith('Error: Rate Limit')) { 244 | // Handle rate limiting 245 | } 246 | // Other error handling 247 | } else { 248 | const data = JSON.parse(result.content[0].text); 249 | // Process successful response 250 | } 251 | } 252 | ``` 253 | 254 | ## Development 255 | 256 | See [CONTRIBUTING.md](CONTRIBUTING.md) for detailed information about: 257 | - Development setup 258 | - Project structure 259 | - Feature implementation 260 | - Error handling guidelines 261 | - Tool usage examples 262 | 263 | ```bash 264 | # Install dependencies 265 | pnpm install 266 | 267 | # Build project 268 | pnpm run build 269 | 270 | # Run tests 271 | pnpm test 272 | ``` 273 | 274 | ## Changelog 275 | See [CHANGELOG.md](./CHANGELOG.md) for recent updates including: 276 | - Unified response format implementation 277 | - Enhanced error handling system 278 | - Type-safe interface improvements 279 | 280 | ## License 281 | This project is licensed under the Apache License 2.0 - see the [LICENSE](LICENSE) file for details. -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2024 Cline Bot Inc. 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /src/tool-handlers.ts: -------------------------------------------------------------------------------- 1 | import { Server } from '@modelcontextprotocol/sdk/server/index.js'; 2 | import { 3 | CallToolRequestSchema, 4 | ErrorCode, 5 | ListToolsRequestSchema, 6 | McpError, 7 | } from '@modelcontextprotocol/sdk/types.js'; 8 | import OpenAI from 'openai'; 9 | 10 | import { ModelCache } from './model-cache.js'; 11 | import { OpenRouterAPIClient } from './openrouter-api.js'; 12 | import { ToolResult } from './types.js'; // Import the unified type 13 | import { handleChatCompletion, ChatCompletionToolRequest } from './tool-handlers/chat-completion.js'; 14 | import { handleSearchModels, SearchModelsToolRequest } from './tool-handlers/search-models.js'; 15 | import { handleGetModelInfo, GetModelInfoToolRequest } from './tool-handlers/get-model-info.js'; 16 | import { handleValidateModel, ValidateModelToolRequest } from './tool-handlers/validate-model.js'; 17 | 18 | export class ToolHandlers { 19 | private server: Server; 20 | private openai: OpenAI; 21 | private modelCache: ModelCache; 22 | private apiClient: OpenRouterAPIClient; 23 | private defaultModel?: string; 24 | private defaultMaxTokens?: string; 25 | private defaultQuantizations?: string[]; 26 | private defaultIgnoredProviders?: string[]; 27 | // Phase 2 Defaults 28 | private readonly defaultProviderSort?: "price" | "throughput" | "latency"; 29 | private readonly defaultProviderOrder?: string[]; 30 | private readonly defaultProviderRequireParameters?: boolean; 31 | private readonly defaultProviderDataCollection?: "allow" | "deny"; 32 | private readonly defaultProviderAllowFallbacks?: boolean; 33 | 34 | constructor( 35 | server: Server, 36 | apiKey: string, 37 | defaultModel?: string, 38 | defaultMaxTokens?: string, 39 | defaultQuantizations?: string[], 40 | defaultIgnoredProviders?: string[], 41 | // Phase 2 Defaults 42 | defaultProviderSort?: "price" | "throughput" | "latency", 43 | defaultProviderOrder?: string[], 44 | defaultProviderRequireParameters?: boolean, 45 | defaultProviderDataCollection?: "allow" | "deny", 46 | defaultProviderAllowFallbacks?: boolean 47 | ) { 48 | this.server = server; 49 | this.modelCache = ModelCache.getInstance(); 50 | this.apiClient = new OpenRouterAPIClient(apiKey); 51 | this.defaultModel = defaultModel; 52 | this.defaultMaxTokens = defaultMaxTokens; 53 | this.defaultQuantizations = defaultQuantizations; 54 | this.defaultIgnoredProviders = defaultIgnoredProviders; 55 | // Phase 2 Defaults 56 | this.defaultProviderSort = defaultProviderSort; 57 | this.defaultProviderOrder = defaultProviderOrder; 58 | this.defaultProviderRequireParameters = defaultProviderRequireParameters; 59 | this.defaultProviderDataCollection = defaultProviderDataCollection; 60 | this.defaultProviderAllowFallbacks = defaultProviderAllowFallbacks; 61 | 62 | this.openai = new OpenAI({ 63 | apiKey: apiKey, 64 | baseURL: 'https://openrouter.ai/api/v1', 65 | defaultHeaders: { 66 | 'HTTP-Referer': 'https://github.com/heltonteixeira/openrouterai', 67 | 'X-Title': 'MCP OpenRouter Server', 68 | }, 69 | }); 70 | 71 | this.setupToolHandlers(); 72 | } 73 | 74 | private setupToolHandlers() { 75 | this.server.setRequestHandler(ListToolsRequestSchema, async () => ({ 76 | tools: [ 77 | { 78 | name: 'chat_completion', 79 | description: 'Sends conversational context (messages) to OpenRouter.ai for completion using a specified model. Use this for dialogue, text generation, or instruction-following tasks. Supports advanced provider routing and parameter overrides. Returns the generated text response.', 80 | inputSchema: { 81 | type: 'object', 82 | properties: { 83 | model: { 84 | type: 'string', 85 | description: '(Optional) The specific OpenRouter model ID (e.g., "google/gemini-pro") to use for this completion request. If omitted, the server\'s configured default model will be used.', 86 | }, 87 | messages: { 88 | type: 'array', 89 | description: '(Required) An ordered array of message objects representing the conversation history. Each object must include `role` ("system", "user", or "assistant") and `content` (the text of the message). Minimum 1 message, maximum 100.', 90 | minItems: 1, 91 | maxItems: 100, 92 | items: { 93 | type: 'object', 94 | properties: { 95 | role: { 96 | type: 'string', 97 | enum: ['system', 'user', 'assistant'], 98 | description: 'Indicates the originator of the message. Must be one of: "system", "user", "assistant".', 99 | }, 100 | content: { 101 | type: 'string', 102 | description: 'The textual content of the message.', 103 | }, 104 | }, 105 | required: ['role', 'content'], 106 | } }, 107 | temperature: { 108 | type: 'number', 109 | description: '(Optional) Controls the randomness of the generated output. Ranges from 0.0 (deterministic) to 2.0 (highly random). Affects creativity versus coherence.', 110 | minimum: 0, 111 | maximum: 2, 112 | }, 113 | max_tokens: { 114 | type: 'number', 115 | description: '(Optional) Sets an upper limit on the number of tokens generated in the response. Overrides the server default if specified. Influences provider routing based on model context limits.', 116 | }, 117 | provider: { 118 | type: 'object', 119 | description: '(Optional) An object allowing fine-grained control over how OpenRouter selects the underlying AI provider for this request, overriding any server-level defaults.', 120 | properties: { 121 | quantizations: { 122 | type: 'array', 123 | items: { type: 'string' }, 124 | description: '(Optional) Filters eligible providers to only those supporting the specified quantization levels (e.g., ["fp16", "int8"]). Overrides server default.', 125 | }, 126 | ignore: { 127 | type: 'array', 128 | items: { type: 'string' }, 129 | description: '(Optional) A list of provider IDs (e.g., ["openai", "mistralai"]) to explicitly exclude from consideration for this request. Overrides server default.', 130 | }, 131 | // Phase 2 Options 132 | sort: { 133 | type: 'string', 134 | enum: ['price', 'throughput', 'latency'], 135 | description: '(Optional) Determines the primary criterion ("price", "throughput", or "latency") used to sort eligible providers before selection. Overrides server default.', 136 | }, 137 | order: { 138 | type: 'array', 139 | items: { type: 'string' }, 140 | description: '(Optional) Defines a specific, ordered list of preferred provider IDs. OpenRouter will attempt to use these providers in the given order. Overrides server default.', 141 | }, 142 | require_parameters: { 143 | type: 'boolean', 144 | description: '(Optional) If set to true, restricts selection to only those providers that fully support *all* parameters included in this chat completion request. Overrides server default.', 145 | }, 146 | data_collection: { 147 | type: 'string', 148 | enum: ['allow', 'deny'], 149 | description: '(Optional) Specifies the user\'s preference regarding data collection by the underlying provider ("allow" or "deny"). Overrides server default.', 150 | }, 151 | allow_fallbacks: { 152 | type: 'boolean', 153 | description: '(Optional) If set to true (default), allows OpenRouter to attempt using fallback providers if the initially selected provider(s) fail. Set to false to disable fallbacks. Overrides server default.', 154 | } 155 | }, 156 | additionalProperties: true // Allow future properties for forward compatibility 157 | }, 158 | }, 159 | required: ['messages'], 160 | }, 161 | // Context window management details can be added as a separate property 162 | maxContextTokens: 200000 163 | }, 164 | { 165 | name: 'search_models', 166 | description: 'Queries the OpenRouter.ai model registry, filtering by various criteria like capabilities, pricing, or provider. Use this to discover models suitable for specific needs. Returns a list of matching model metadata objects.', 167 | inputSchema: { 168 | type: 'object', 169 | properties: { 170 | query: { 171 | type: 'string', 172 | description: '(Optional) A text query string to search within model names, descriptions, and provider details.', 173 | }, 174 | provider: { 175 | type: 'string', 176 | description: '(Optional) Restricts the search to models offered by a specific provider ID (e.g., "openai", "anthropic").', 177 | }, 178 | minContextLength: { 179 | type: 'number', 180 | description: '(Optional) Filters for models that support at least the specified context window size (in tokens).', 181 | }, 182 | maxContextLength: { 183 | type: 'number', 184 | description: '(Optional) Filters for models that support at most the specified context window size (in tokens).', 185 | }, 186 | maxPromptPrice: { 187 | type: 'number', 188 | description: '(Optional) Filters for models whose price for processing 1,000 prompt tokens is less than or equal to this value.', 189 | }, 190 | maxCompletionPrice: { 191 | type: 'number', 192 | description: '(Optional) Filters for models whose price for generating 1,000 completion tokens is less than or equal to this value.', 193 | }, 194 | capabilities: { 195 | type: 'object', 196 | description: '(Optional) An object specifying required model capabilities.', 197 | properties: { 198 | functions: { 199 | type: 'boolean', 200 | description: '(Optional) If true, filters for models that support function calling.', 201 | }, 202 | tools: { 203 | type: 'boolean', 204 | description: '(Optional) If true, filters for models that support tool usage.', 205 | }, 206 | vision: { 207 | type: 'boolean', 208 | description: '(Optional) If true, filters for models that support image input (vision).', 209 | }, 210 | json_mode: { 211 | type: 'boolean', 212 | description: '(Optional) If true, filters for models that support guaranteed JSON output mode.', 213 | } 214 | } 215 | }, 216 | limit: { 217 | type: 'number', 218 | description: '(Optional) Limits the number of matching models returned in the response. Must be between 1 and 50. Defaults to 10.', 219 | minimum: 1, 220 | maximum: 50 221 | } 222 | } 223 | }, 224 | }, 225 | { 226 | name: 'get_model_info', 227 | description: 'Retrieves the complete metadata for a single OpenRouter.ai model specified by its unique ID. Use this when you know the model ID and need its full details (pricing, context limits, capabilities, etc.). Returns a model information object.', 228 | inputSchema: { 229 | type: 'object', 230 | properties: { 231 | model: { 232 | type: 'string', 233 | description: '(Required) The unique identifier string of the OpenRouter.ai model whose details are being requested.', 234 | }, 235 | }, 236 | required: ['model'], 237 | }, 238 | }, 239 | { 240 | name: 'validate_model', 241 | description: 'Verifies if a given model ID exists within the OpenRouter.ai registry. Use this for a quick check of model ID validity before making other API calls. Returns a boolean value (`true` if valid, `false` otherwise).', 242 | inputSchema: { 243 | type: 'object', 244 | properties: { 245 | model: { 246 | type: 'string', 247 | description: '(Required) The unique identifier string of the OpenRouter.ai model to check for validity.', 248 | }, 249 | }, 250 | required: ['model'], 251 | }, 252 | }, 253 | ], 254 | })); 255 | 256 | // Remove explicit return type annotation 257 | this.server.setRequestHandler(CallToolRequestSchema, async (request) => { 258 | // Wrap the entire handler logic in a try...catch 259 | try { 260 | switch (request.params.name) { 261 | case 'chat_completion': 262 | // Add 'as any' to satisfy SDK type checker 263 | return handleChatCompletion({ 264 | params: { 265 | arguments: request.params.arguments as unknown as ChatCompletionToolRequest 266 | } 267 | }, 268 | this.openai, 269 | this.defaultModel, 270 | this.defaultMaxTokens, 271 | this.defaultQuantizations, 272 | this.defaultIgnoredProviders, 273 | // Pass Phase 2 defaults 274 | this.defaultProviderSort, 275 | this.defaultProviderOrder, 276 | this.defaultProviderRequireParameters, 277 | this.defaultProviderDataCollection, 278 | this.defaultProviderAllowFallbacks 279 | ) as any; 280 | 281 | case 'search_models': 282 | // Add 'as any' to satisfy SDK type checker 283 | return handleSearchModels({ 284 | params: { 285 | arguments: request.params.arguments as SearchModelsToolRequest 286 | } 287 | }, this.apiClient, this.modelCache) as any; 288 | 289 | case 'get_model_info': 290 | // Add 'as any' to satisfy SDK type checker 291 | return handleGetModelInfo({ 292 | params: { 293 | arguments: request.params.arguments as unknown as GetModelInfoToolRequest 294 | } 295 | }, this.modelCache) as any; 296 | 297 | case 'validate_model': 298 | // Add 'as any' to satisfy SDK type checker 299 | return handleValidateModel({ 300 | params: { 301 | arguments: request.params.arguments as unknown as ValidateModelToolRequest 302 | } 303 | }, this.modelCache) as any; 304 | 305 | default: 306 | // Return ToolResult for unknown tool 307 | console.warn(`Unknown tool requested: ${request.params.name}`); 308 | return { 309 | isError: true, 310 | content: [{ type: 'text', text: `Error: Tool '${request.params.name}' not found.` }], 311 | } as any; // Add 'as any' 312 | } 313 | } catch (error) { 314 | // Catch unexpected errors within the handler itself 315 | console.error('Unexpected error in CallToolRequest handler:', error); 316 | return { 317 | isError: true, 318 | content: [{ type: 'text', text: 'Error: Internal server error occurred while processing the tool call.' }], 319 | } as any; // Add 'as any' 320 | } 321 | }); 322 | } 323 | } -------------------------------------------------------------------------------- /pnpm-lock.yaml: -------------------------------------------------------------------------------- 1 | lockfileVersion: '9.0' 2 | 3 | settings: 4 | autoInstallPeers: true 5 | excludeLinksFromLockfile: false 6 | 7 | importers: 8 | 9 | .: 10 | dependencies: 11 | '@modelcontextprotocol/sdk': 12 | specifier: 1.4.1 13 | version: 1.4.1 14 | axios: 15 | specifier: ^1.7.9 16 | version: 1.8.4 17 | openai: 18 | specifier: ^4.83.0 19 | version: 4.89.0(zod@3.24.2) 20 | typescript: 21 | specifier: ^5.7.3 22 | version: 5.8.2 23 | devDependencies: 24 | '@types/node': 25 | specifier: ^22.13.1 26 | version: 22.13.11 27 | shx: 28 | specifier: ^0.3.4 29 | version: 0.3.4 30 | 31 | packages: 32 | 33 | '@modelcontextprotocol/sdk@1.4.1': 34 | resolution: {integrity: sha512-wS6YC4lkUZ9QpP+/7NBTlVNiEvsnyl0xF7rRusLF+RsG0xDPc/zWR7fEEyhKnnNutGsDAZh59l/AeoWGwIb1+g==} 35 | engines: {node: '>=18'} 36 | 37 | '@types/node-fetch@2.6.12': 38 | resolution: {integrity: sha512-8nneRWKCg3rMtF69nLQJnOYUcbafYeFSjqkw3jCRLsqkWFlHaoQrr5mXmofFGOx3DKn7UfmBMyov8ySvLRVldA==} 39 | 40 | '@types/node@18.19.81': 41 | resolution: {integrity: sha512-7KO9oZ2//ivtSsryp0LQUqq79zyGXzwq1WqfywpC9ucjY7YyltMMmxWgtRFRKCxwa7VPxVBVy4kHf5UC1E8Lug==} 42 | 43 | '@types/node@22.13.11': 44 | resolution: {integrity: sha512-iEUCUJoU0i3VnrCmgoWCXttklWcvoCIx4jzcP22fioIVSdTmjgoEvmAO/QPw6TcS9k5FrNgn4w7q5lGOd1CT5g==} 45 | 46 | abort-controller@3.0.0: 47 | resolution: {integrity: sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==} 48 | engines: {node: '>=6.5'} 49 | 50 | agentkeepalive@4.6.0: 51 | resolution: {integrity: sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==} 52 | engines: {node: '>= 8.0.0'} 53 | 54 | asynckit@0.4.0: 55 | resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==} 56 | 57 | axios@1.8.4: 58 | resolution: {integrity: sha512-eBSYY4Y68NNlHbHBMdeDmKNtDgXWhQsJcGqzO3iLUM0GraQFSS9cVgPX5I9b3lbdFKyYoAEGAZF1DwhTaljNAw==} 59 | 60 | balanced-match@1.0.2: 61 | resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} 62 | 63 | brace-expansion@1.1.11: 64 | resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==} 65 | 66 | bytes@3.1.2: 67 | resolution: {integrity: sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==} 68 | engines: {node: '>= 0.8'} 69 | 70 | call-bind-apply-helpers@1.0.2: 71 | resolution: {integrity: sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==} 72 | engines: {node: '>= 0.4'} 73 | 74 | combined-stream@1.0.8: 75 | resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} 76 | engines: {node: '>= 0.8'} 77 | 78 | concat-map@0.0.1: 79 | resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==} 80 | 81 | content-type@1.0.5: 82 | resolution: {integrity: sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==} 83 | engines: {node: '>= 0.6'} 84 | 85 | delayed-stream@1.0.0: 86 | resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} 87 | engines: {node: '>=0.4.0'} 88 | 89 | depd@2.0.0: 90 | resolution: {integrity: sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==} 91 | engines: {node: '>= 0.8'} 92 | 93 | dunder-proto@1.0.1: 94 | resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==} 95 | engines: {node: '>= 0.4'} 96 | 97 | es-define-property@1.0.1: 98 | resolution: {integrity: sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==} 99 | engines: {node: '>= 0.4'} 100 | 101 | es-errors@1.3.0: 102 | resolution: {integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==} 103 | engines: {node: '>= 0.4'} 104 | 105 | es-object-atoms@1.1.1: 106 | resolution: {integrity: sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==} 107 | engines: {node: '>= 0.4'} 108 | 109 | es-set-tostringtag@2.1.0: 110 | resolution: {integrity: sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==} 111 | engines: {node: '>= 0.4'} 112 | 113 | event-target-shim@5.0.1: 114 | resolution: {integrity: sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==} 115 | engines: {node: '>=6'} 116 | 117 | eventsource-parser@3.0.0: 118 | resolution: {integrity: sha512-T1C0XCUimhxVQzW4zFipdx0SficT651NnkR0ZSH3yQwh+mFMdLfgjABVi4YtMTtaL4s168593DaoaRLMqryavA==} 119 | engines: {node: '>=18.0.0'} 120 | 121 | eventsource@3.0.5: 122 | resolution: {integrity: sha512-LT/5J605bx5SNyE+ITBDiM3FxffBiq9un7Vx0EwMDM3vg8sWKx/tO2zC+LMqZ+smAM0F2hblaDZUVZF0te2pSw==} 123 | engines: {node: '>=18.0.0'} 124 | 125 | follow-redirects@1.15.9: 126 | resolution: {integrity: sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==} 127 | engines: {node: '>=4.0'} 128 | peerDependencies: 129 | debug: '*' 130 | peerDependenciesMeta: 131 | debug: 132 | optional: true 133 | 134 | form-data-encoder@1.7.2: 135 | resolution: {integrity: sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==} 136 | 137 | form-data@4.0.2: 138 | resolution: {integrity: sha512-hGfm/slu0ZabnNt4oaRZ6uREyfCj6P4fT/n6A1rGV+Z0VdGXjfOhVUpkn6qVQONHGIFwmveGXyDs75+nr6FM8w==} 139 | engines: {node: '>= 6'} 140 | 141 | formdata-node@4.4.1: 142 | resolution: {integrity: sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==} 143 | engines: {node: '>= 12.20'} 144 | 145 | fs.realpath@1.0.0: 146 | resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==} 147 | 148 | function-bind@1.1.2: 149 | resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} 150 | 151 | get-intrinsic@1.3.0: 152 | resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==} 153 | engines: {node: '>= 0.4'} 154 | 155 | get-proto@1.0.1: 156 | resolution: {integrity: sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==} 157 | engines: {node: '>= 0.4'} 158 | 159 | glob@7.2.3: 160 | resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==} 161 | deprecated: Glob versions prior to v9 are no longer supported 162 | 163 | gopd@1.2.0: 164 | resolution: {integrity: sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==} 165 | engines: {node: '>= 0.4'} 166 | 167 | has-symbols@1.1.0: 168 | resolution: {integrity: sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==} 169 | engines: {node: '>= 0.4'} 170 | 171 | has-tostringtag@1.0.2: 172 | resolution: {integrity: sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==} 173 | engines: {node: '>= 0.4'} 174 | 175 | hasown@2.0.2: 176 | resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==} 177 | engines: {node: '>= 0.4'} 178 | 179 | http-errors@2.0.0: 180 | resolution: {integrity: sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==} 181 | engines: {node: '>= 0.8'} 182 | 183 | humanize-ms@1.2.1: 184 | resolution: {integrity: sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==} 185 | 186 | iconv-lite@0.6.3: 187 | resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==} 188 | engines: {node: '>=0.10.0'} 189 | 190 | inflight@1.0.6: 191 | resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} 192 | deprecated: This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful. 193 | 194 | inherits@2.0.4: 195 | resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} 196 | 197 | interpret@1.4.0: 198 | resolution: {integrity: sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==} 199 | engines: {node: '>= 0.10'} 200 | 201 | is-core-module@2.16.1: 202 | resolution: {integrity: sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==} 203 | engines: {node: '>= 0.4'} 204 | 205 | math-intrinsics@1.1.0: 206 | resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==} 207 | engines: {node: '>= 0.4'} 208 | 209 | mime-db@1.52.0: 210 | resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==} 211 | engines: {node: '>= 0.6'} 212 | 213 | mime-types@2.1.35: 214 | resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==} 215 | engines: {node: '>= 0.6'} 216 | 217 | minimatch@3.1.2: 218 | resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} 219 | 220 | minimist@1.2.8: 221 | resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==} 222 | 223 | ms@2.1.3: 224 | resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} 225 | 226 | node-domexception@1.0.0: 227 | resolution: {integrity: sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==} 228 | engines: {node: '>=10.5.0'} 229 | 230 | node-fetch@2.7.0: 231 | resolution: {integrity: sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==} 232 | engines: {node: 4.x || >=6.0.0} 233 | peerDependencies: 234 | encoding: ^0.1.0 235 | peerDependenciesMeta: 236 | encoding: 237 | optional: true 238 | 239 | once@1.4.0: 240 | resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} 241 | 242 | openai@4.89.0: 243 | resolution: {integrity: sha512-XNI0q2l8/Os6jmojxaID5EhyQjxZgzR2gWcpEjYWK5hGKwE7AcifxEY7UNwFDDHJQXqeiosQ0CJwQN+rvnwdjA==} 244 | hasBin: true 245 | peerDependencies: 246 | ws: ^8.18.0 247 | zod: ^3.23.8 248 | peerDependenciesMeta: 249 | ws: 250 | optional: true 251 | zod: 252 | optional: true 253 | 254 | path-is-absolute@1.0.1: 255 | resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==} 256 | engines: {node: '>=0.10.0'} 257 | 258 | path-parse@1.0.7: 259 | resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} 260 | 261 | proxy-from-env@1.1.0: 262 | resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==} 263 | 264 | raw-body@3.0.0: 265 | resolution: {integrity: sha512-RmkhL8CAyCRPXCE28MMH0z2PNWQBNk2Q09ZdxM9IOOXwxwZbN+qbWaatPkdkWIKL2ZVDImrN/pK5HTRz2PcS4g==} 266 | engines: {node: '>= 0.8'} 267 | 268 | rechoir@0.6.2: 269 | resolution: {integrity: sha512-HFM8rkZ+i3zrV+4LQjwQ0W+ez98pApMGM3HUrN04j3CqzPOzl9nmP15Y8YXNm8QHGv/eacOVEjqhmWpkRV0NAw==} 270 | engines: {node: '>= 0.10'} 271 | 272 | resolve@1.22.10: 273 | resolution: {integrity: sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w==} 274 | engines: {node: '>= 0.4'} 275 | hasBin: true 276 | 277 | safer-buffer@2.1.2: 278 | resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} 279 | 280 | setprototypeof@1.2.0: 281 | resolution: {integrity: sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==} 282 | 283 | shelljs@0.8.5: 284 | resolution: {integrity: sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow==} 285 | engines: {node: '>=4'} 286 | hasBin: true 287 | 288 | shx@0.3.4: 289 | resolution: {integrity: sha512-N6A9MLVqjxZYcVn8hLmtneQWIJtp8IKzMP4eMnx+nqkvXoqinUPCbUFLp2UcWTEIUONhlk0ewxr/jaVGlc+J+g==} 290 | engines: {node: '>=6'} 291 | hasBin: true 292 | 293 | statuses@2.0.1: 294 | resolution: {integrity: sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==} 295 | engines: {node: '>= 0.8'} 296 | 297 | supports-preserve-symlinks-flag@1.0.0: 298 | resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} 299 | engines: {node: '>= 0.4'} 300 | 301 | toidentifier@1.0.1: 302 | resolution: {integrity: sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==} 303 | engines: {node: '>=0.6'} 304 | 305 | tr46@0.0.3: 306 | resolution: {integrity: sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==} 307 | 308 | typescript@5.8.2: 309 | resolution: {integrity: sha512-aJn6wq13/afZp/jT9QZmwEjDqqvSGp1VT5GVg+f/t6/oVyrgXM6BY1h9BRh/O5p3PlUPAe+WuiEZOmb/49RqoQ==} 310 | engines: {node: '>=14.17'} 311 | hasBin: true 312 | 313 | undici-types@5.26.5: 314 | resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==} 315 | 316 | undici-types@6.20.0: 317 | resolution: {integrity: sha512-Ny6QZ2Nju20vw1SRHe3d9jVu6gJ+4e3+MMpqu7pqE5HT6WsTSlce++GQmK5UXS8mzV8DSYHrQH+Xrf2jVcuKNg==} 318 | 319 | unpipe@1.0.0: 320 | resolution: {integrity: sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==} 321 | engines: {node: '>= 0.8'} 322 | 323 | web-streams-polyfill@4.0.0-beta.3: 324 | resolution: {integrity: sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==} 325 | engines: {node: '>= 14'} 326 | 327 | webidl-conversions@3.0.1: 328 | resolution: {integrity: sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==} 329 | 330 | whatwg-url@5.0.0: 331 | resolution: {integrity: sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==} 332 | 333 | wrappy@1.0.2: 334 | resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} 335 | 336 | zod-to-json-schema@3.24.5: 337 | resolution: {integrity: sha512-/AuWwMP+YqiPbsJx5D6TfgRTc4kTLjsh5SOcd4bLsfUg2RcEXrFMJl1DGgdHy2aCfsIA/cr/1JM0xcB2GZji8g==} 338 | peerDependencies: 339 | zod: ^3.24.1 340 | 341 | zod@3.24.2: 342 | resolution: {integrity: sha512-lY7CDW43ECgW9u1TcT3IoXHflywfVqDYze4waEz812jR/bZ8FHDsl7pFQoSZTz5N+2NqRXs8GBwnAwo3ZNxqhQ==} 343 | 344 | snapshots: 345 | 346 | '@modelcontextprotocol/sdk@1.4.1': 347 | dependencies: 348 | content-type: 1.0.5 349 | eventsource: 3.0.5 350 | raw-body: 3.0.0 351 | zod: 3.24.2 352 | zod-to-json-schema: 3.24.5(zod@3.24.2) 353 | 354 | '@types/node-fetch@2.6.12': 355 | dependencies: 356 | '@types/node': 22.13.11 357 | form-data: 4.0.2 358 | 359 | '@types/node@18.19.81': 360 | dependencies: 361 | undici-types: 5.26.5 362 | 363 | '@types/node@22.13.11': 364 | dependencies: 365 | undici-types: 6.20.0 366 | 367 | abort-controller@3.0.0: 368 | dependencies: 369 | event-target-shim: 5.0.1 370 | 371 | agentkeepalive@4.6.0: 372 | dependencies: 373 | humanize-ms: 1.2.1 374 | 375 | asynckit@0.4.0: {} 376 | 377 | axios@1.8.4: 378 | dependencies: 379 | follow-redirects: 1.15.9 380 | form-data: 4.0.2 381 | proxy-from-env: 1.1.0 382 | transitivePeerDependencies: 383 | - debug 384 | 385 | balanced-match@1.0.2: {} 386 | 387 | brace-expansion@1.1.11: 388 | dependencies: 389 | balanced-match: 1.0.2 390 | concat-map: 0.0.1 391 | 392 | bytes@3.1.2: {} 393 | 394 | call-bind-apply-helpers@1.0.2: 395 | dependencies: 396 | es-errors: 1.3.0 397 | function-bind: 1.1.2 398 | 399 | combined-stream@1.0.8: 400 | dependencies: 401 | delayed-stream: 1.0.0 402 | 403 | concat-map@0.0.1: {} 404 | 405 | content-type@1.0.5: {} 406 | 407 | delayed-stream@1.0.0: {} 408 | 409 | depd@2.0.0: {} 410 | 411 | dunder-proto@1.0.1: 412 | dependencies: 413 | call-bind-apply-helpers: 1.0.2 414 | es-errors: 1.3.0 415 | gopd: 1.2.0 416 | 417 | es-define-property@1.0.1: {} 418 | 419 | es-errors@1.3.0: {} 420 | 421 | es-object-atoms@1.1.1: 422 | dependencies: 423 | es-errors: 1.3.0 424 | 425 | es-set-tostringtag@2.1.0: 426 | dependencies: 427 | es-errors: 1.3.0 428 | get-intrinsic: 1.3.0 429 | has-tostringtag: 1.0.2 430 | hasown: 2.0.2 431 | 432 | event-target-shim@5.0.1: {} 433 | 434 | eventsource-parser@3.0.0: {} 435 | 436 | eventsource@3.0.5: 437 | dependencies: 438 | eventsource-parser: 3.0.0 439 | 440 | follow-redirects@1.15.9: {} 441 | 442 | form-data-encoder@1.7.2: {} 443 | 444 | form-data@4.0.2: 445 | dependencies: 446 | asynckit: 0.4.0 447 | combined-stream: 1.0.8 448 | es-set-tostringtag: 2.1.0 449 | mime-types: 2.1.35 450 | 451 | formdata-node@4.4.1: 452 | dependencies: 453 | node-domexception: 1.0.0 454 | web-streams-polyfill: 4.0.0-beta.3 455 | 456 | fs.realpath@1.0.0: {} 457 | 458 | function-bind@1.1.2: {} 459 | 460 | get-intrinsic@1.3.0: 461 | dependencies: 462 | call-bind-apply-helpers: 1.0.2 463 | es-define-property: 1.0.1 464 | es-errors: 1.3.0 465 | es-object-atoms: 1.1.1 466 | function-bind: 1.1.2 467 | get-proto: 1.0.1 468 | gopd: 1.2.0 469 | has-symbols: 1.1.0 470 | hasown: 2.0.2 471 | math-intrinsics: 1.1.0 472 | 473 | get-proto@1.0.1: 474 | dependencies: 475 | dunder-proto: 1.0.1 476 | es-object-atoms: 1.1.1 477 | 478 | glob@7.2.3: 479 | dependencies: 480 | fs.realpath: 1.0.0 481 | inflight: 1.0.6 482 | inherits: 2.0.4 483 | minimatch: 3.1.2 484 | once: 1.4.0 485 | path-is-absolute: 1.0.1 486 | 487 | gopd@1.2.0: {} 488 | 489 | has-symbols@1.1.0: {} 490 | 491 | has-tostringtag@1.0.2: 492 | dependencies: 493 | has-symbols: 1.1.0 494 | 495 | hasown@2.0.2: 496 | dependencies: 497 | function-bind: 1.1.2 498 | 499 | http-errors@2.0.0: 500 | dependencies: 501 | depd: 2.0.0 502 | inherits: 2.0.4 503 | setprototypeof: 1.2.0 504 | statuses: 2.0.1 505 | toidentifier: 1.0.1 506 | 507 | humanize-ms@1.2.1: 508 | dependencies: 509 | ms: 2.1.3 510 | 511 | iconv-lite@0.6.3: 512 | dependencies: 513 | safer-buffer: 2.1.2 514 | 515 | inflight@1.0.6: 516 | dependencies: 517 | once: 1.4.0 518 | wrappy: 1.0.2 519 | 520 | inherits@2.0.4: {} 521 | 522 | interpret@1.4.0: {} 523 | 524 | is-core-module@2.16.1: 525 | dependencies: 526 | hasown: 2.0.2 527 | 528 | math-intrinsics@1.1.0: {} 529 | 530 | mime-db@1.52.0: {} 531 | 532 | mime-types@2.1.35: 533 | dependencies: 534 | mime-db: 1.52.0 535 | 536 | minimatch@3.1.2: 537 | dependencies: 538 | brace-expansion: 1.1.11 539 | 540 | minimist@1.2.8: {} 541 | 542 | ms@2.1.3: {} 543 | 544 | node-domexception@1.0.0: {} 545 | 546 | node-fetch@2.7.0: 547 | dependencies: 548 | whatwg-url: 5.0.0 549 | 550 | once@1.4.0: 551 | dependencies: 552 | wrappy: 1.0.2 553 | 554 | openai@4.89.0(zod@3.24.2): 555 | dependencies: 556 | '@types/node': 18.19.81 557 | '@types/node-fetch': 2.6.12 558 | abort-controller: 3.0.0 559 | agentkeepalive: 4.6.0 560 | form-data-encoder: 1.7.2 561 | formdata-node: 4.4.1 562 | node-fetch: 2.7.0 563 | optionalDependencies: 564 | zod: 3.24.2 565 | transitivePeerDependencies: 566 | - encoding 567 | 568 | path-is-absolute@1.0.1: {} 569 | 570 | path-parse@1.0.7: {} 571 | 572 | proxy-from-env@1.1.0: {} 573 | 574 | raw-body@3.0.0: 575 | dependencies: 576 | bytes: 3.1.2 577 | http-errors: 2.0.0 578 | iconv-lite: 0.6.3 579 | unpipe: 1.0.0 580 | 581 | rechoir@0.6.2: 582 | dependencies: 583 | resolve: 1.22.10 584 | 585 | resolve@1.22.10: 586 | dependencies: 587 | is-core-module: 2.16.1 588 | path-parse: 1.0.7 589 | supports-preserve-symlinks-flag: 1.0.0 590 | 591 | safer-buffer@2.1.2: {} 592 | 593 | setprototypeof@1.2.0: {} 594 | 595 | shelljs@0.8.5: 596 | dependencies: 597 | glob: 7.2.3 598 | interpret: 1.4.0 599 | rechoir: 0.6.2 600 | 601 | shx@0.3.4: 602 | dependencies: 603 | minimist: 1.2.8 604 | shelljs: 0.8.5 605 | 606 | statuses@2.0.1: {} 607 | 608 | supports-preserve-symlinks-flag@1.0.0: {} 609 | 610 | toidentifier@1.0.1: {} 611 | 612 | tr46@0.0.3: {} 613 | 614 | typescript@5.8.2: {} 615 | 616 | undici-types@5.26.5: {} 617 | 618 | undici-types@6.20.0: {} 619 | 620 | unpipe@1.0.0: {} 621 | 622 | web-streams-polyfill@4.0.0-beta.3: {} 623 | 624 | webidl-conversions@3.0.1: {} 625 | 626 | whatwg-url@5.0.0: 627 | dependencies: 628 | tr46: 0.0.3 629 | webidl-conversions: 3.0.1 630 | 631 | wrappy@1.0.2: {} 632 | 633 | zod-to-json-schema@3.24.5(zod@3.24.2): 634 | dependencies: 635 | zod: 3.24.2 636 | 637 | zod@3.24.2: {} 638 | --------------------------------------------------------------------------------