├── .env.example ├── .gitignore ├── Dockerfile ├── LICENSE ├── README.md ├── package-lock.json ├── package.json ├── smithery.yaml ├── src ├── index.ts └── providers.json └── tsconfig.json /.env.example: -------------------------------------------------------------------------------- 1 | # API Provider Configuration 2 | # You can use either OpenRouter for all models, or configure direct API access to specific providers 3 | 4 | # Option 1: OpenRouter (recommended for easy access to multiple models) 5 | OPENROUTER_API_KEY=your_openrouter_api_key_here 6 | 7 | # Option 2: Direct API Access 8 | # Uncomment and configure these if you want to use direct API access instead of OpenRouter 9 | 10 | # Anthropic API (for Claude models) 11 | ANTHROPIC_API_KEY=your_anthropic_api_key_here 12 | 13 | # DeepSeek API 14 | DEEPSEEK_API_KEY=your_deepseek_api_key_here 15 | 16 | # OpenAI API 17 | OPENAI_API_KEY=your_openai_api_key_here 18 | OPENAI_API_BASE_URL=https://api.openai.com/v1 19 | 20 | # Google Gemini API 21 | GEMINI_API_KEY=your_gemini_api_key_here 22 | 23 | # Vertex AI (for Gemini models through Vertex AI) 24 | VERTEX_PROJECT_ID=your_vertex_ai_project_id_here 25 | VERTEX_REGION=your_vertex_ai_region_here 26 | 27 | # Model Selection 28 | # Configure which models to use for reasoning and coding tasks 29 | # For OpenRouter, use the full model name (e.g., anthropic/claude-3.5-sonnet) 30 | # For direct API access, use the model ID (e.g., claude-3-5-sonnet-20241022) 31 | # The code initializes clients for 32 | # - openrouter 33 | # - anthropic 34 | # - deepseek 35 | # - openai 36 | # - gemini 37 | # - vertex 38 | 39 | 40 | # Reasoning Model (default: DeepSeek Reasoner) 41 | REASONING_PROVIDER=deepseek # Options: openrouter, anthropic, deepseek, openai 42 | REASONING_MODEL=deepseek-reasoner 43 | 44 | # Coding Model (default: Claude) 45 | CODING_PROVIDER=openrouter # Options: openrouter, anthropic, openai 46 | CODING_MODEL=anthropic/claude-3.5-sonnet 47 | 48 | # See src/providers.json for a complete list of available models and their capabilities 49 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Dependencies 2 | node_modules/ 3 | 4 | # Build output 5 | build/ 6 | dist/ 7 | 8 | # Logs 9 | *.log 10 | npm-debug.log* 11 | yarn-debug.log* 12 | yarn-error.log* 13 | 14 | # Environment variables 15 | .env 16 | .env.local 17 | .env.*.local 18 | 19 | # Editor directories and files 20 | .idea/ 21 | .vscode/ 22 | *.swp 23 | *.swo 24 | *.swn 25 | .DS_Store 26 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Generated by https://smithery.ai. See: https://smithery.ai/docs/config#dockerfile 2 | # Stage 1: Build the application using Node.js 3 | FROM node:18-alpine AS builder 4 | 5 | # Set working directory 6 | WORKDIR /app 7 | 8 | # Define build arguments (with defaults) for all potential environment variables 9 | ARG OPENROUTER_API_KEY="" 10 | ARG ANTHROPIC_API_KEY="" 11 | ARG DEEPSEEK_API_KEY="" 12 | ARG OPENAI_API_KEY="" 13 | ARG OPENAI_API_BASE_URL="" 14 | ARG GEMINI_API_KEY="" 15 | ARG VERTEX_PROJECT_ID="" 16 | ARG VERTEX_REGION="" 17 | ARG REASONING_PROVIDER="" 18 | ARG REASONING_MODEL="" 19 | ARG CODING_PROVIDER="" 20 | ARG CODING_MODEL="" 21 | ARG NODE_ENV="production" 22 | 23 | # Copy package.json and package-lock.json to the working directory 24 | COPY package.json package-lock.json ./ 25 | 26 | # Install dependencies 27 | RUN npm install 28 | 29 | # Copy source files 30 | COPY src ./src 31 | 32 | # Build the project 33 | RUN npm run build 34 | 35 | # Stage 2: Create a lightweight image for production 36 | FROM node:18-alpine 37 | 38 | # Set working directory 39 | WORKDIR /app 40 | 41 | # Copy built files from builder 42 | COPY --from=builder /app/build ./build 43 | 44 | # Copy necessary files 45 | COPY package.json package-lock.json ./ 46 | 47 | # Install only production dependencies 48 | RUN npm install --omit=dev 49 | 50 | # Environment variables (set from ARGs) 51 | ENV OPENROUTER_API_KEY=$OPENROUTER_API_KEY 52 | ENV ANTHROPIC_API_KEY=$ANTHROPIC_API_KEY 53 | ENV DEEPSEEK_API_KEY=$DEEPSEEK_API_KEY 54 | ENV OPENAI_API_KEY=$OPENAI_API_KEY 55 | ENV OPENAI_API_BASE_URL=$OPENAI_API_BASE_URL 56 | ENV GEMINI_API_KEY=$GEMINI_API_KEY 57 | ENV VERTEX_PROJECT_ID=$VERTEX_PROJECT_ID 58 | ENV VERTEX_REGION=$VERTEX_REGION 59 | ENV REASONING_PROVIDER=$REASONING_PROVIDER 60 | ENV REASONING_MODEL=$REASONING_MODEL 61 | ENV CODING_PROVIDER=$CODING_PROVIDER 62 | ENV CODING_MODEL=$CODING_MODEL 63 | ENV NODE_ENV=$NODE_ENV 64 | 65 | # Entrypoint command to run the MCP server 66 | ENTRYPOINT ["node", "build/index.js"] 67 | 68 | # Command to start the server 69 | CMD ["node", "build/index.js"] 70 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Skirano 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Reasoning-Coder MCP Server 2 | [![smithery badge](https://smithery.ai/badge/@mario-andreschak/mcp-reasoning-coding)](https://smithery.ai/server/@mario-andreschak/mcp-reasoning-coding) 3 | 4 | 5 | [![smithery badge](https://smithery.ai/badge/@newideas99/Deepseek-Thinking-Claude-3.5-Sonnet-CLINE-MCP)](https://smithery.ai/server/@newideas99/Deepseek-Thinking-Claude-3.5-Sonnet-CLINE-MCP) 6 | 7 | This is a Model Context Protocol (MCP) server that provides a flexible and configurable two-stage reasoning and response generation system. It's a fork of the original [Deepseek-Thinking-Claude-3.5-Sonnet-CLINE-MCP](link-to-original-repo) project, significantly expanding its capabilities by supporting multiple AI providers and models for both reasoning and coding/response generation. 8 | 9 | ## Key Changes and Improvements Compared to the Original 10 | 11 | The original project was specifically designed to use DeepSeek R1 for reasoning and Claude 3.5 Sonnet for response generation, both accessed exclusively through OpenRouter. This fork generalizes the architecture to support a wider range of providers and models. Here's a breakdown of the key differences: 12 | 13 | * **Multi-Provider Support:** Instead of being locked into OpenRouter, this fork can now use: 14 | * OpenRouter 15 | * OpenAI 16 | * Anthropic (Claude) 17 | * DeepSeek 18 | * Google Gemini 19 | * Google Vertex AI 20 | 21 | * **Configurable Reasoning and Coding Models:** The original hardcoded DeepSeek for reasoning and Claude for the final response. This fork allows you to configure *both* the reasoning and coding/response generation models independently. You can mix and match providers. For example, you could use Gemini for reasoning and OpenAI for the final response. 22 | 23 | * **`providers.json` Configuration:** A new `providers.json` file is introduced to manage the available models and their specific parameters (temperature, top_p, etc.) for each provider. This makes it easy to add new models or tweak existing ones without modifying the core code. 24 | 25 | * **Environment Variable Configuration:** The choice of reasoning and coding providers/models is now primarily controlled through environment variables: 26 | * `REASONING_PROVIDER`: Specifies the provider for the reasoning stage (e.g., `openai`, `gemini`, `deepseek`, `openrouter`, `anthropic`, `vertex`). 27 | * `REASONING_MODEL`: Specifies the model to use for reasoning (e.g., `gpt-4`, `gemini-pro`, `deepseek/deepseek-r1`). 28 | * `CODING_PROVIDER`: Specifies the provider for the coding/response generation stage. 29 | * `CODING_MODEL`: Specifies the model for the final response. 30 | 31 | * **Dynamic Client Initialization:** The code now dynamically initializes only the necessary API clients based on the `REASONING_PROVIDER` and `CODING_PROVIDER` settings. This avoids unnecessary initialization and dependencies. 32 | 33 | * **Unified `getReasoning` and `getFinalResponse`:** The provider-specific logic is abstracted into `getReasoning` and `getFinalResponse` functions, making the core task processing logic provider-agnostic. 34 | 35 | * **Retains core MCP structure:** The fork retains the core structure of using MCP, so it will integrate with any MCP client, like the original implementation. It defines the `generate_response` and `check_response_status` tools in the same way. 36 | 37 | * **Retains Cline integration:** Like the original, the fork is intended for integration with Cline, the Claude Desktop extension. 38 | 39 | * **Retains Conversation History Feature:** The forked implementation has kept the feature of using the conversation history of Cline for context. 40 | 41 | * **No Hardcoded Models:** There are no hardcoded models in the new implementation, the models are defined in the .env file and providers.json 42 | 43 | ## Features 44 | 45 | * **Two-Stage Processing:** 46 | * Uses a configurable model for initial reasoning (e.g., DeepSeek, GPT-4, Gemini Pro). 47 | * Uses a configurable model for final response/code generation (e.g., Claude, GPT-4, DeepSeek Chat). 48 | * Injects the reasoning from the first stage into the context of the second stage. 49 | 50 | * **Flexible Provider and Model Selection:** 51 | * Choose from OpenRouter, OpenAI, Anthropic, DeepSeek, Gemini, and Vertex AI for both reasoning and coding stages. 52 | * Easily configure models and their parameters via `providers.json` and environment variables. 53 | 54 | * **Smart Conversation Management (Inherited from Original):** 55 | * Detects active Cline conversations using file modification times. 56 | * Handles multiple concurrent conversations. 57 | * Filters out ended conversations automatically. 58 | * Supports context clearing. 59 | 60 | * **Optimized Parameters (Configurable):** 61 | * Model-specific context limits are respected (e.g., 50,000 characters for DeepSeek reasoning, larger limits for response models). 62 | * Parameters like `temperature`, `top_p`, and `repetition_penalty` are configurable per model in `providers.json`. 63 | 64 | * **Response Polling (Inherited from Original):** 65 | * Uses a polling mechanism with `generate_response` (to get a task ID) and `check_response_status` (to check the status). This handles the asynchronous nature of LLM calls. 66 | 67 | ## Installation 68 | 69 | ### Installing via Smithery 70 | 71 | To install Reasoning-Coder for Claude Desktop automatically via [Smithery](https://smithery.ai/server/@mario-andreschak/mcp-reasoning-coding): 72 | 73 | ```bash 74 | npx -y @smithery/cli install @mario-andreschak/mcp-reasoning-coding --client claude 75 | ``` 76 | 77 | ### Manual Installation 78 | 1. **Clone this repository**: 79 | 80 | ```bash 81 | git clone https://github.com/mario-andreschak/mcp-reasoning-coding.git 82 | cd /mcp-reasoning-coding 83 | ``` 84 | 85 | 2. **Install Dependencies:** 86 | 87 | ```bash 88 | npm install 89 | ``` 90 | 91 | 3. **Create a `.env` File:** This file will hold your API keys and provider/model selections. Example: 92 | 93 | ```env 94 | # --- Required API Keys (at least one) --- 95 | OPENROUTER_API_KEY=your_openrouter_key 96 | OPENAI_API_KEY=your_openai_key 97 | ANTHROPIC_API_KEY=your_anthropic_key 98 | DEEPSEEK_API_KEY=your_deepseek_key 99 | GEMINI_API_KEY=your_gemini_key 100 | VERTEX_PROJECT_ID=your_vertex_project_id # For Vertex AI 101 | VERTEX_REGION=your_vertex_region # For Vertex AI 102 | 103 | # --- Provider and Model Selection --- 104 | REASONING_PROVIDER=openrouter 105 | REASONING_MODEL=deepseek/deepseek-r1 106 | CODING_PROVIDER=openrouter 107 | CODING_MODEL=anthropic/claude-3.5-sonnet:beta 108 | ``` 109 | 110 | **Important:** You only need to provide API keys for the providers you intend to use. If you're only using OpenAI, you don't need an `OPENROUTER_API_KEY`, for example. 111 | 112 | 4. **`providers.json` File:** This file defines the available models for each provider and their parameters. Place this file in the `src` folder. Example (`src/providers.json`): 113 | 114 | ```json 115 | { 116 | "openrouter": { 117 | "deepseek/deepseek-r1": { 118 | "temperature": 0.7, 119 | "top_p": 1 120 | }, 121 | "anthropic/claude-3.5-sonnet:beta": { 122 | "temperature": 0.7, 123 | "top_p": 1, 124 | "repetition_penalty": 1 125 | }, 126 | "deepseek/deepseek-chat":{ 127 | "temperature": 0.7, 128 | "top_p": 1 129 | } 130 | }, 131 | "openai": { 132 | "gpt-4": { 133 | "temperature": 0.7, 134 | "top_p": 1 135 | }, 136 | "gpt-3.5-turbo": { 137 | "temperature": 0.7, 138 | "top_p": 1 139 | } 140 | }, 141 | "anthropic": { 142 | "claude-3-opus-20240229": { 143 | "temperature": 0.7, 144 | "top_p": 1 145 | } 146 | }, 147 | "deepseek": { 148 | "deepseek-coder": { 149 | "temperature": 0.7, 150 | "top_p": 1 151 | } 152 | }, 153 | "gemini":{ 154 | "gemini-pro":{ 155 | 156 | } 157 | }, 158 | "vertex": { 159 | "gemini-1.5-pro-002":{ 160 | 161 | } 162 | } 163 | } 164 | ``` 165 | * **`extra_params`:** You can add provider-specific parameters within the model definition using the `extra_params` key. Consult the API documentation for each provider to see what options are available. 166 | 167 | 5. **Build the server:** 168 | 169 | ```bash 170 | npm run build 171 | ``` 172 | 173 | ## Usage with Cline 174 | 175 | Add to your Cline MCP settings (usually in `~/.vscode/globalStorage/saoudrizwan.claude-dev/settings/cline_mcp_settings.json`): 176 | 177 | ```json 178 | { 179 | "mcpServers": { 180 | "reasoning-coding": { 181 | "command": "/path/to/node", 182 | "args": ["/path/to/your-fork/build/index.js"], // Adjust path 183 | "env": { 184 | // Your .env variables will be inherited, so no need to duplicate them here 185 | }, 186 | "disabled": false, 187 | "autoApprove": [] 188 | } 189 | } 190 | } 191 | ``` 192 | 193 | Replace `/path/to/node` and `/path/to/your-fork/build/index.js` with the correct paths. 194 | 195 | ## Tool Usage (Same as Original) 196 | 197 | The server provides the same two tools as the original: 198 | 199 | ### `generate_response` 200 | 201 | Generates a response using the configured reasoning and coding models. 202 | 203 | ```typescript 204 | { 205 | "prompt": string, // Required: The question or prompt 206 | "showReasoning"?: boolean, // Optional: Show the reasoning process 207 | "clearContext"?: boolean, // Optional: Clear conversation history 208 | "includeHistory"?: boolean // Optional: Include Cline conversation history 209 | } 210 | ``` 211 | 212 | ### `check_response_status` 213 | 214 | Checks the status of a response generation task. 215 | 216 | ```typescript 217 | { 218 | "taskId": string // Required: The task ID from generate_response 219 | } 220 | ``` 221 | 222 | ### Response Polling (Same as Original) 223 | 224 | 1. **Initial Request:** Call `generate_response` to get a `taskId`. 225 | 226 | ```typescript 227 | const result = await use_mcp_tool({ 228 | server_name: "reasoning-coding", 229 | tool_name: "generate_response", 230 | arguments: { 231 | prompt: "Explain the theory of relativity.", 232 | showReasoning: true 233 | } 234 | }); 235 | 236 | const taskId = JSON.parse(result.content[0].text).taskId; 237 | ``` 238 | 239 | 2. **Status Checking:** Poll `check_response_status` with the `taskId` until the status is `complete` (or `error`). 240 | 241 | ```typescript 242 | const status = await use_mcp_tool({ 243 | server_name: "reasoning-coding", 244 | tool_name: "check_response_status", 245 | arguments: { taskId } 246 | }); 247 | 248 | // Example status response when complete: 249 | { 250 | "status": "complete", 251 | "reasoning": "...", // If showReasoning was true 252 | "response": "..." // The final response 253 | "error": undefined // Will have a value if an error occurred 254 | } 255 | ``` 256 | 257 | ## Development 258 | 259 | For development with auto-rebuild: 260 | ```bash 261 | npm run watch 262 | ``` 263 | 264 | ## How It Works (Expanded) 265 | 266 | 1. **Reasoning Stage:** 267 | * The `getReasoning` function is called with the user's prompt (and potentially Cline conversation history). 268 | * Based on the `REASONING_PROVIDER` environment variable, the appropriate provider-specific function (e.g., `getReasoningOpenAI`, `getReasoningGemini`) is called. 269 | * The selected model (from `REASONING_MODEL`) is used to generate the reasoning. 270 | * The reasoning is returned. 271 | 272 | 2. **Response Stage:** 273 | * The `getFinalResponse` function is called with the original prompt and the reasoning from the first stage. 274 | * Based on the `CODING_PROVIDER` environment variable, the appropriate provider-specific function is called. 275 | * The selected model (from `CODING_MODEL`) generates the final response, incorporating the reasoning. 276 | * The response is returned. 277 | 278 | 3. **MCP Handling:** The `ReasoningCodingServer` class handles the MCP communication, task management, and context management. It uses the `getReasoning` and `getFinalResponse` functions to orchestrate the two-stage process. 279 | 280 | ## License 281 | 282 | MIT License - See LICENSE file for details. 283 | 284 | ## Credits 285 | 286 | * Based on the original Deepseek-Thinking-Claude-3.5-Sonnet-CLINE-MCP project. 287 | * Inspired by the RAT (Retrieval Augmented Thinking) concept by [Skirano](https://x.com/skirano/status/1881922469411643413). 288 | ``` 289 | 290 | Key improvements in this README: 291 | 292 | * **Clearer Title:** A more descriptive title reflects the expanded functionality. 293 | * **Detailed Comparison:** A dedicated section highlights the differences between the fork and the original. 294 | * **Comprehensive Installation:** Instructions are more thorough, covering `.env` and `providers.json` setup. 295 | * **Provider/Model Explanation:** The roles of environment variables and `providers.json` are clearly explained. 296 | * **Example `providers.json`:** A complete example helps users get started. 297 | * **Updated Usage:** Reflects the new server name and configuration. 298 | * **Expanded "How It Works":** Provides a more detailed explanation of the internal workings. 299 | * **Corrected filepaths:** Uses correct filepaths to the providers file 300 | -------------------------------------------------------------------------------- /package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "deepseek-thinking-claude-3-5-sonnet-cline-mcp", 3 | "version": "0.1.0", 4 | "lockfileVersion": 3, 5 | "requires": true, 6 | "packages": { 7 | "": { 8 | "name": "deepseek-thinking-claude-3-5-sonnet-cline-mcp", 9 | "version": "0.1.0", 10 | "dependencies": { 11 | "@anthropic-ai/sdk": "^0.36.2", 12 | "@google-cloud/vertexai": "^1.9.3", 13 | "@google/generative-ai": "^0.21.0", 14 | "@modelcontextprotocol/sdk": "0.6.0", 15 | "dotenv": "^16.4.7", 16 | "openai": "^4.80.1", 17 | "uuid": "^11.0.5" 18 | }, 19 | "bin": { 20 | "deepseek-thinking-claude-mcp": "build/index.js" 21 | }, 22 | "devDependencies": { 23 | "@types/node": "^20.11.24", 24 | "@types/uuid": "^10.0.0", 25 | "typescript": "^5.3.3" 26 | } 27 | }, 28 | "node_modules/@anthropic-ai/sdk": { 29 | "version": "0.36.2", 30 | "resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.36.2.tgz", 31 | "integrity": "sha512-+/DcVCGoPtUBOPO+1w+7YPdw9Xzt2M5GW6LpSxeGwDfdUSoJbyezGJvGEtdYjV/XXyGx5pf852S4+PG+3MUEsA==", 32 | "license": "MIT", 33 | "dependencies": { 34 | "@types/node": "^18.11.18", 35 | "@types/node-fetch": "^2.6.4", 36 | "abort-controller": "^3.0.0", 37 | "agentkeepalive": "^4.2.1", 38 | "form-data-encoder": "1.7.2", 39 | "formdata-node": "^4.3.2", 40 | "node-fetch": "^2.6.7" 41 | } 42 | }, 43 | "node_modules/@anthropic-ai/sdk/node_modules/@types/node": { 44 | "version": "18.19.74", 45 | "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.74.tgz", 46 | "integrity": "sha512-HMwEkkifei3L605gFdV+/UwtpxP6JSzM+xFk2Ia6DNFSwSVBRh9qp5Tgf4lNFOMfPVuU0WnkcWpXZpgn5ufO4A==", 47 | "license": "MIT", 48 | "dependencies": { 49 | "undici-types": "~5.26.4" 50 | } 51 | }, 52 | "node_modules/@anthropic-ai/sdk/node_modules/undici-types": { 53 | "version": "5.26.5", 54 | "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", 55 | "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", 56 | "license": "MIT" 57 | }, 58 | "node_modules/@google-cloud/vertexai": { 59 | "version": "1.9.3", 60 | "resolved": "https://registry.npmjs.org/@google-cloud/vertexai/-/vertexai-1.9.3.tgz", 61 | "integrity": "sha512-35o5tIEMLW3JeFJOaaMNR2e5sq+6rpnhrF97PuAxeOm0GlqVTESKhkGj7a5B5mmJSSSU3hUfIhcQCRRsw4Ipzg==", 62 | "license": "Apache-2.0", 63 | "dependencies": { 64 | "google-auth-library": "^9.1.0" 65 | }, 66 | "engines": { 67 | "node": ">=18.0.0" 68 | } 69 | }, 70 | "node_modules/@google/generative-ai": { 71 | "version": "0.21.0", 72 | "resolved": "https://registry.npmjs.org/@google/generative-ai/-/generative-ai-0.21.0.tgz", 73 | "integrity": "sha512-7XhUbtnlkSEZK15kN3t+tzIMxsbKm/dSkKBFalj+20NvPKe1kBY7mR2P7vuijEn+f06z5+A8bVGKO0v39cr6Wg==", 74 | "license": "Apache-2.0", 75 | "engines": { 76 | "node": ">=18.0.0" 77 | } 78 | }, 79 | "node_modules/@modelcontextprotocol/sdk": { 80 | "version": "0.6.0", 81 | "resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-0.6.0.tgz", 82 | "integrity": "sha512-9rsDudGhDtMbvxohPoMMyAUOmEzQsOK+XFchh6gZGqo8sx9sBuZQs+CUttXqa8RZXKDaJRCN2tUtgGof7jRkkw==", 83 | "license": "MIT", 84 | "dependencies": { 85 | "content-type": "^1.0.5", 86 | "raw-body": "^3.0.0", 87 | "zod": "^3.23.8" 88 | } 89 | }, 90 | "node_modules/@types/node": { 91 | "version": "20.17.16", 92 | "resolved": "https://registry.npmjs.org/@types/node/-/node-20.17.16.tgz", 93 | "integrity": "sha512-vOTpLduLkZXePLxHiHsBLp98mHGnl8RptV4YAO3HfKO5UHjDvySGbxKtpYfy8Sx5+WKcgc45qNreJJRVM3L6mw==", 94 | "license": "MIT", 95 | "dependencies": { 96 | "undici-types": "~6.19.2" 97 | } 98 | }, 99 | "node_modules/@types/node-fetch": { 100 | "version": "2.6.12", 101 | "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.12.tgz", 102 | "integrity": "sha512-8nneRWKCg3rMtF69nLQJnOYUcbafYeFSjqkw3jCRLsqkWFlHaoQrr5mXmofFGOx3DKn7UfmBMyov8ySvLRVldA==", 103 | "license": "MIT", 104 | "dependencies": { 105 | "@types/node": "*", 106 | "form-data": "^4.0.0" 107 | } 108 | }, 109 | "node_modules/@types/uuid": { 110 | "version": "10.0.0", 111 | "resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-10.0.0.tgz", 112 | "integrity": "sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==", 113 | "dev": true, 114 | "license": "MIT" 115 | }, 116 | "node_modules/abort-controller": { 117 | "version": "3.0.0", 118 | "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", 119 | "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", 120 | "license": "MIT", 121 | "dependencies": { 122 | "event-target-shim": "^5.0.0" 123 | }, 124 | "engines": { 125 | "node": ">=6.5" 126 | } 127 | }, 128 | "node_modules/agent-base": { 129 | "version": "7.1.3", 130 | "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.3.tgz", 131 | "integrity": "sha512-jRR5wdylq8CkOe6hei19GGZnxM6rBGwFl3Bg0YItGDimvjGtAvdZk4Pu6Cl4u4Igsws4a1fd1Vq3ezrhn4KmFw==", 132 | "license": "MIT", 133 | "engines": { 134 | "node": ">= 14" 135 | } 136 | }, 137 | "node_modules/agentkeepalive": { 138 | "version": "4.6.0", 139 | "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.6.0.tgz", 140 | "integrity": "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==", 141 | "license": "MIT", 142 | "dependencies": { 143 | "humanize-ms": "^1.2.1" 144 | }, 145 | "engines": { 146 | "node": ">= 8.0.0" 147 | } 148 | }, 149 | "node_modules/asynckit": { 150 | "version": "0.4.0", 151 | "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", 152 | "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", 153 | "license": "MIT" 154 | }, 155 | "node_modules/base64-js": { 156 | "version": "1.5.1", 157 | "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", 158 | "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", 159 | "funding": [ 160 | { 161 | "type": "github", 162 | "url": "https://github.com/sponsors/feross" 163 | }, 164 | { 165 | "type": "patreon", 166 | "url": "https://www.patreon.com/feross" 167 | }, 168 | { 169 | "type": "consulting", 170 | "url": "https://feross.org/support" 171 | } 172 | ], 173 | "license": "MIT" 174 | }, 175 | "node_modules/bignumber.js": { 176 | "version": "9.1.2", 177 | "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.1.2.tgz", 178 | "integrity": "sha512-2/mKyZH9K85bzOEfhXDBFZTGd1CTs+5IHpeFQo9luiBG7hghdC851Pj2WAhb6E3R6b9tZj/XKhbg4fum+Kepug==", 179 | "license": "MIT", 180 | "engines": { 181 | "node": "*" 182 | } 183 | }, 184 | "node_modules/buffer-equal-constant-time": { 185 | "version": "1.0.1", 186 | "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", 187 | "integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==", 188 | "license": "BSD-3-Clause" 189 | }, 190 | "node_modules/bytes": { 191 | "version": "3.1.2", 192 | "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", 193 | "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", 194 | "license": "MIT", 195 | "engines": { 196 | "node": ">= 0.8" 197 | } 198 | }, 199 | "node_modules/combined-stream": { 200 | "version": "1.0.8", 201 | "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", 202 | "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", 203 | "license": "MIT", 204 | "dependencies": { 205 | "delayed-stream": "~1.0.0" 206 | }, 207 | "engines": { 208 | "node": ">= 0.8" 209 | } 210 | }, 211 | "node_modules/content-type": { 212 | "version": "1.0.5", 213 | "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", 214 | "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", 215 | "license": "MIT", 216 | "engines": { 217 | "node": ">= 0.6" 218 | } 219 | }, 220 | "node_modules/debug": { 221 | "version": "4.4.0", 222 | "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", 223 | "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", 224 | "license": "MIT", 225 | "dependencies": { 226 | "ms": "^2.1.3" 227 | }, 228 | "engines": { 229 | "node": ">=6.0" 230 | }, 231 | "peerDependenciesMeta": { 232 | "supports-color": { 233 | "optional": true 234 | } 235 | } 236 | }, 237 | "node_modules/delayed-stream": { 238 | "version": "1.0.0", 239 | "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", 240 | "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", 241 | "license": "MIT", 242 | "engines": { 243 | "node": ">=0.4.0" 244 | } 245 | }, 246 | "node_modules/depd": { 247 | "version": "2.0.0", 248 | "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", 249 | "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", 250 | "license": "MIT", 251 | "engines": { 252 | "node": ">= 0.8" 253 | } 254 | }, 255 | "node_modules/dotenv": { 256 | "version": "16.4.7", 257 | "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.7.tgz", 258 | "integrity": "sha512-47qPchRCykZC03FhkYAhrvwU4xDBFIj1QPqaarj6mdM/hgUzfPHcpkHJOn3mJAufFeeAxAzeGsr5X0M4k6fLZQ==", 259 | "license": "BSD-2-Clause", 260 | "engines": { 261 | "node": ">=12" 262 | }, 263 | "funding": { 264 | "url": "https://dotenvx.com" 265 | } 266 | }, 267 | "node_modules/ecdsa-sig-formatter": { 268 | "version": "1.0.11", 269 | "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", 270 | "integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==", 271 | "license": "Apache-2.0", 272 | "dependencies": { 273 | "safe-buffer": "^5.0.1" 274 | } 275 | }, 276 | "node_modules/event-target-shim": { 277 | "version": "5.0.1", 278 | "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz", 279 | "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==", 280 | "license": "MIT", 281 | "engines": { 282 | "node": ">=6" 283 | } 284 | }, 285 | "node_modules/extend": { 286 | "version": "3.0.2", 287 | "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", 288 | "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", 289 | "license": "MIT" 290 | }, 291 | "node_modules/form-data": { 292 | "version": "4.0.1", 293 | "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.1.tgz", 294 | "integrity": "sha512-tzN8e4TX8+kkxGPK8D5u0FNmjPUjw3lwC9lSLxxoB/+GtsJG91CO8bSWy73APlgAZzZbXEYZJuxjkHH2w+Ezhw==", 295 | "license": "MIT", 296 | "dependencies": { 297 | "asynckit": "^0.4.0", 298 | "combined-stream": "^1.0.8", 299 | "mime-types": "^2.1.12" 300 | }, 301 | "engines": { 302 | "node": ">= 6" 303 | } 304 | }, 305 | "node_modules/form-data-encoder": { 306 | "version": "1.7.2", 307 | "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz", 308 | "integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==", 309 | "license": "MIT" 310 | }, 311 | "node_modules/formdata-node": { 312 | "version": "4.4.1", 313 | "resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz", 314 | "integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==", 315 | "license": "MIT", 316 | "dependencies": { 317 | "node-domexception": "1.0.0", 318 | "web-streams-polyfill": "4.0.0-beta.3" 319 | }, 320 | "engines": { 321 | "node": ">= 12.20" 322 | } 323 | }, 324 | "node_modules/gaxios": { 325 | "version": "6.7.1", 326 | "resolved": "https://registry.npmjs.org/gaxios/-/gaxios-6.7.1.tgz", 327 | "integrity": "sha512-LDODD4TMYx7XXdpwxAVRAIAuB0bzv0s+ywFonY46k126qzQHT9ygyoa9tncmOiQmmDrik65UYsEkv3lbfqQ3yQ==", 328 | "license": "Apache-2.0", 329 | "dependencies": { 330 | "extend": "^3.0.2", 331 | "https-proxy-agent": "^7.0.1", 332 | "is-stream": "^2.0.0", 333 | "node-fetch": "^2.6.9", 334 | "uuid": "^9.0.1" 335 | }, 336 | "engines": { 337 | "node": ">=14" 338 | } 339 | }, 340 | "node_modules/gaxios/node_modules/uuid": { 341 | "version": "9.0.1", 342 | "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz", 343 | "integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==", 344 | "funding": [ 345 | "https://github.com/sponsors/broofa", 346 | "https://github.com/sponsors/ctavan" 347 | ], 348 | "license": "MIT", 349 | "bin": { 350 | "uuid": "dist/bin/uuid" 351 | } 352 | }, 353 | "node_modules/gcp-metadata": { 354 | "version": "6.1.1", 355 | "resolved": "https://registry.npmjs.org/gcp-metadata/-/gcp-metadata-6.1.1.tgz", 356 | "integrity": "sha512-a4tiq7E0/5fTjxPAaH4jpjkSv/uCaU2p5KC6HVGrvl0cDjA8iBZv4vv1gyzlmK0ZUKqwpOyQMKzZQe3lTit77A==", 357 | "license": "Apache-2.0", 358 | "dependencies": { 359 | "gaxios": "^6.1.1", 360 | "google-logging-utils": "^0.0.2", 361 | "json-bigint": "^1.0.0" 362 | }, 363 | "engines": { 364 | "node": ">=14" 365 | } 366 | }, 367 | "node_modules/google-auth-library": { 368 | "version": "9.15.1", 369 | "resolved": "https://registry.npmjs.org/google-auth-library/-/google-auth-library-9.15.1.tgz", 370 | "integrity": "sha512-Jb6Z0+nvECVz+2lzSMt9u98UsoakXxA2HGHMCxh+so3n90XgYWkq5dur19JAJV7ONiJY22yBTyJB1TSkvPq9Ng==", 371 | "license": "Apache-2.0", 372 | "dependencies": { 373 | "base64-js": "^1.3.0", 374 | "ecdsa-sig-formatter": "^1.0.11", 375 | "gaxios": "^6.1.1", 376 | "gcp-metadata": "^6.1.0", 377 | "gtoken": "^7.0.0", 378 | "jws": "^4.0.0" 379 | }, 380 | "engines": { 381 | "node": ">=14" 382 | } 383 | }, 384 | "node_modules/google-logging-utils": { 385 | "version": "0.0.2", 386 | "resolved": "https://registry.npmjs.org/google-logging-utils/-/google-logging-utils-0.0.2.tgz", 387 | "integrity": "sha512-NEgUnEcBiP5HrPzufUkBzJOD/Sxsco3rLNo1F1TNf7ieU8ryUzBhqba8r756CjLX7rn3fHl6iLEwPYuqpoKgQQ==", 388 | "license": "Apache-2.0", 389 | "engines": { 390 | "node": ">=14" 391 | } 392 | }, 393 | "node_modules/gtoken": { 394 | "version": "7.1.0", 395 | "resolved": "https://registry.npmjs.org/gtoken/-/gtoken-7.1.0.tgz", 396 | "integrity": "sha512-pCcEwRi+TKpMlxAQObHDQ56KawURgyAf6jtIY046fJ5tIv3zDe/LEIubckAO8fj6JnAxLdmWkUfNyulQ2iKdEw==", 397 | "license": "MIT", 398 | "dependencies": { 399 | "gaxios": "^6.0.0", 400 | "jws": "^4.0.0" 401 | }, 402 | "engines": { 403 | "node": ">=14.0.0" 404 | } 405 | }, 406 | "node_modules/http-errors": { 407 | "version": "2.0.0", 408 | "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", 409 | "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", 410 | "license": "MIT", 411 | "dependencies": { 412 | "depd": "2.0.0", 413 | "inherits": "2.0.4", 414 | "setprototypeof": "1.2.0", 415 | "statuses": "2.0.1", 416 | "toidentifier": "1.0.1" 417 | }, 418 | "engines": { 419 | "node": ">= 0.8" 420 | } 421 | }, 422 | "node_modules/https-proxy-agent": { 423 | "version": "7.0.6", 424 | "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", 425 | "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", 426 | "license": "MIT", 427 | "dependencies": { 428 | "agent-base": "^7.1.2", 429 | "debug": "4" 430 | }, 431 | "engines": { 432 | "node": ">= 14" 433 | } 434 | }, 435 | "node_modules/humanize-ms": { 436 | "version": "1.2.1", 437 | "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz", 438 | "integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==", 439 | "license": "MIT", 440 | "dependencies": { 441 | "ms": "^2.0.0" 442 | } 443 | }, 444 | "node_modules/iconv-lite": { 445 | "version": "0.6.3", 446 | "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", 447 | "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", 448 | "license": "MIT", 449 | "dependencies": { 450 | "safer-buffer": ">= 2.1.2 < 3.0.0" 451 | }, 452 | "engines": { 453 | "node": ">=0.10.0" 454 | } 455 | }, 456 | "node_modules/inherits": { 457 | "version": "2.0.4", 458 | "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", 459 | "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", 460 | "license": "ISC" 461 | }, 462 | "node_modules/is-stream": { 463 | "version": "2.0.1", 464 | "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", 465 | "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", 466 | "license": "MIT", 467 | "engines": { 468 | "node": ">=8" 469 | }, 470 | "funding": { 471 | "url": "https://github.com/sponsors/sindresorhus" 472 | } 473 | }, 474 | "node_modules/json-bigint": { 475 | "version": "1.0.0", 476 | "resolved": "https://registry.npmjs.org/json-bigint/-/json-bigint-1.0.0.tgz", 477 | "integrity": "sha512-SiPv/8VpZuWbvLSMtTDU8hEfrZWg/mH/nV/b4o0CYbSxu1UIQPLdwKOCIyLQX+VIPO5vrLX3i8qtqFyhdPSUSQ==", 478 | "license": "MIT", 479 | "dependencies": { 480 | "bignumber.js": "^9.0.0" 481 | } 482 | }, 483 | "node_modules/jwa": { 484 | "version": "2.0.0", 485 | "resolved": "https://registry.npmjs.org/jwa/-/jwa-2.0.0.tgz", 486 | "integrity": "sha512-jrZ2Qx916EA+fq9cEAeCROWPTfCwi1IVHqT2tapuqLEVVDKFDENFw1oL+MwrTvH6msKxsd1YTDVw6uKEcsrLEA==", 487 | "license": "MIT", 488 | "dependencies": { 489 | "buffer-equal-constant-time": "1.0.1", 490 | "ecdsa-sig-formatter": "1.0.11", 491 | "safe-buffer": "^5.0.1" 492 | } 493 | }, 494 | "node_modules/jws": { 495 | "version": "4.0.0", 496 | "resolved": "https://registry.npmjs.org/jws/-/jws-4.0.0.tgz", 497 | "integrity": "sha512-KDncfTmOZoOMTFG4mBlG0qUIOlc03fmzH+ru6RgYVZhPkyiy/92Owlt/8UEN+a4TXR1FQetfIpJE8ApdvdVxTg==", 498 | "license": "MIT", 499 | "dependencies": { 500 | "jwa": "^2.0.0", 501 | "safe-buffer": "^5.0.1" 502 | } 503 | }, 504 | "node_modules/mime-db": { 505 | "version": "1.52.0", 506 | "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", 507 | "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", 508 | "license": "MIT", 509 | "engines": { 510 | "node": ">= 0.6" 511 | } 512 | }, 513 | "node_modules/mime-types": { 514 | "version": "2.1.35", 515 | "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", 516 | "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", 517 | "license": "MIT", 518 | "dependencies": { 519 | "mime-db": "1.52.0" 520 | }, 521 | "engines": { 522 | "node": ">= 0.6" 523 | } 524 | }, 525 | "node_modules/ms": { 526 | "version": "2.1.3", 527 | "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", 528 | "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", 529 | "license": "MIT" 530 | }, 531 | "node_modules/node-domexception": { 532 | "version": "1.0.0", 533 | "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz", 534 | "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==", 535 | "funding": [ 536 | { 537 | "type": "github", 538 | "url": "https://github.com/sponsors/jimmywarting" 539 | }, 540 | { 541 | "type": "github", 542 | "url": "https://paypal.me/jimmywarting" 543 | } 544 | ], 545 | "license": "MIT", 546 | "engines": { 547 | "node": ">=10.5.0" 548 | } 549 | }, 550 | "node_modules/node-fetch": { 551 | "version": "2.7.0", 552 | "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", 553 | "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", 554 | "license": "MIT", 555 | "dependencies": { 556 | "whatwg-url": "^5.0.0" 557 | }, 558 | "engines": { 559 | "node": "4.x || >=6.0.0" 560 | }, 561 | "peerDependencies": { 562 | "encoding": "^0.1.0" 563 | }, 564 | "peerDependenciesMeta": { 565 | "encoding": { 566 | "optional": true 567 | } 568 | } 569 | }, 570 | "node_modules/openai": { 571 | "version": "4.80.1", 572 | "resolved": "https://registry.npmjs.org/openai/-/openai-4.80.1.tgz", 573 | "integrity": "sha512-+6+bbXFwbIE88foZsBEt36bPkgZPdyFN82clAXG61gnHb2gXdZApDyRrcAHqEtpYICywpqaNo57kOm9dtnb7Cw==", 574 | "license": "Apache-2.0", 575 | "dependencies": { 576 | "@types/node": "^18.11.18", 577 | "@types/node-fetch": "^2.6.4", 578 | "abort-controller": "^3.0.0", 579 | "agentkeepalive": "^4.2.1", 580 | "form-data-encoder": "1.7.2", 581 | "formdata-node": "^4.3.2", 582 | "node-fetch": "^2.6.7" 583 | }, 584 | "bin": { 585 | "openai": "bin/cli" 586 | }, 587 | "peerDependencies": { 588 | "ws": "^8.18.0", 589 | "zod": "^3.23.8" 590 | }, 591 | "peerDependenciesMeta": { 592 | "ws": { 593 | "optional": true 594 | }, 595 | "zod": { 596 | "optional": true 597 | } 598 | } 599 | }, 600 | "node_modules/openai/node_modules/@types/node": { 601 | "version": "18.19.74", 602 | "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.74.tgz", 603 | "integrity": "sha512-HMwEkkifei3L605gFdV+/UwtpxP6JSzM+xFk2Ia6DNFSwSVBRh9qp5Tgf4lNFOMfPVuU0WnkcWpXZpgn5ufO4A==", 604 | "license": "MIT", 605 | "dependencies": { 606 | "undici-types": "~5.26.4" 607 | } 608 | }, 609 | "node_modules/openai/node_modules/undici-types": { 610 | "version": "5.26.5", 611 | "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", 612 | "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", 613 | "license": "MIT" 614 | }, 615 | "node_modules/raw-body": { 616 | "version": "3.0.0", 617 | "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-3.0.0.tgz", 618 | "integrity": "sha512-RmkhL8CAyCRPXCE28MMH0z2PNWQBNk2Q09ZdxM9IOOXwxwZbN+qbWaatPkdkWIKL2ZVDImrN/pK5HTRz2PcS4g==", 619 | "license": "MIT", 620 | "dependencies": { 621 | "bytes": "3.1.2", 622 | "http-errors": "2.0.0", 623 | "iconv-lite": "0.6.3", 624 | "unpipe": "1.0.0" 625 | }, 626 | "engines": { 627 | "node": ">= 0.8" 628 | } 629 | }, 630 | "node_modules/safe-buffer": { 631 | "version": "5.2.1", 632 | "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", 633 | "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", 634 | "funding": [ 635 | { 636 | "type": "github", 637 | "url": "https://github.com/sponsors/feross" 638 | }, 639 | { 640 | "type": "patreon", 641 | "url": "https://www.patreon.com/feross" 642 | }, 643 | { 644 | "type": "consulting", 645 | "url": "https://feross.org/support" 646 | } 647 | ], 648 | "license": "MIT" 649 | }, 650 | "node_modules/safer-buffer": { 651 | "version": "2.1.2", 652 | "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", 653 | "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", 654 | "license": "MIT" 655 | }, 656 | "node_modules/setprototypeof": { 657 | "version": "1.2.0", 658 | "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", 659 | "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==", 660 | "license": "ISC" 661 | }, 662 | "node_modules/statuses": { 663 | "version": "2.0.1", 664 | "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", 665 | "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", 666 | "license": "MIT", 667 | "engines": { 668 | "node": ">= 0.8" 669 | } 670 | }, 671 | "node_modules/toidentifier": { 672 | "version": "1.0.1", 673 | "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", 674 | "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", 675 | "license": "MIT", 676 | "engines": { 677 | "node": ">=0.6" 678 | } 679 | }, 680 | "node_modules/tr46": { 681 | "version": "0.0.3", 682 | "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", 683 | "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", 684 | "license": "MIT" 685 | }, 686 | "node_modules/typescript": { 687 | "version": "5.7.3", 688 | "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.7.3.tgz", 689 | "integrity": "sha512-84MVSjMEHP+FQRPy3pX9sTVV/INIex71s9TL2Gm5FG/WG1SqXeKyZ0k7/blY/4FdOzI12CBy1vGc4og/eus0fw==", 690 | "dev": true, 691 | "license": "Apache-2.0", 692 | "bin": { 693 | "tsc": "bin/tsc", 694 | "tsserver": "bin/tsserver" 695 | }, 696 | "engines": { 697 | "node": ">=14.17" 698 | } 699 | }, 700 | "node_modules/undici-types": { 701 | "version": "6.19.8", 702 | "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.19.8.tgz", 703 | "integrity": "sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==", 704 | "license": "MIT" 705 | }, 706 | "node_modules/unpipe": { 707 | "version": "1.0.0", 708 | "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", 709 | "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", 710 | "license": "MIT", 711 | "engines": { 712 | "node": ">= 0.8" 713 | } 714 | }, 715 | "node_modules/uuid": { 716 | "version": "11.0.5", 717 | "resolved": "https://registry.npmjs.org/uuid/-/uuid-11.0.5.tgz", 718 | "integrity": "sha512-508e6IcKLrhxKdBbcA2b4KQZlLVp2+J5UwQ6F7Drckkc5N9ZJwFa4TgWtsww9UG8fGHbm6gbV19TdM5pQ4GaIA==", 719 | "funding": [ 720 | "https://github.com/sponsors/broofa", 721 | "https://github.com/sponsors/ctavan" 722 | ], 723 | "license": "MIT", 724 | "bin": { 725 | "uuid": "dist/esm/bin/uuid" 726 | } 727 | }, 728 | "node_modules/web-streams-polyfill": { 729 | "version": "4.0.0-beta.3", 730 | "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz", 731 | "integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==", 732 | "license": "MIT", 733 | "engines": { 734 | "node": ">= 14" 735 | } 736 | }, 737 | "node_modules/webidl-conversions": { 738 | "version": "3.0.1", 739 | "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", 740 | "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", 741 | "license": "BSD-2-Clause" 742 | }, 743 | "node_modules/whatwg-url": { 744 | "version": "5.0.0", 745 | "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", 746 | "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", 747 | "license": "MIT", 748 | "dependencies": { 749 | "tr46": "~0.0.3", 750 | "webidl-conversions": "^3.0.0" 751 | } 752 | }, 753 | "node_modules/zod": { 754 | "version": "3.24.1", 755 | "resolved": "https://registry.npmjs.org/zod/-/zod-3.24.1.tgz", 756 | "integrity": "sha512-muH7gBL9sI1nciMZV67X5fTKKBLtwpZ5VBp1vsOQzj1MhrBZ4wlVCm3gedKZWLp0Oyel8sIGfeiz54Su+OVT+A==", 757 | "license": "MIT", 758 | "funding": { 759 | "url": "https://github.com/sponsors/colinhacks" 760 | } 761 | } 762 | } 763 | } 764 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "mcp-reasoning-coding", 3 | "version": "0.1.0", 4 | "description": "MCP server that combines reasoning with response generation through Cline", 5 | "private": true, 6 | "type": "module", 7 | "bin": { 8 | "deepseek-thinking-claude-mcp": "./build/index.js" 9 | }, 10 | "files": [ 11 | "build" 12 | ], 13 | "scripts": { 14 | "build": "tsc && node -e \"require('fs').chmodSync('build/index.js', '755')\"", 15 | "prepare": "npm run build", 16 | "watch": "tsc --watch", 17 | "inspector": "npx @modelcontextprotocol/inspector build/index.js" 18 | }, 19 | "dependencies": { 20 | "@anthropic-ai/sdk": "^0.36.2", 21 | "@google/generative-ai": "^0.21.0", 22 | "@google-cloud/vertexai": "^1.9.3", 23 | "@modelcontextprotocol/sdk": "0.6.0", 24 | "dotenv": "^16.4.7", 25 | "openai": "^4.80.1", 26 | "uuid": "^11.0.5" 27 | }, 28 | "devDependencies": { 29 | "@types/node": "^20.11.24", 30 | "@types/uuid": "^10.0.0", 31 | "typescript": "^5.3.3" 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /smithery.yaml: -------------------------------------------------------------------------------- 1 | # Smithery configuration file: https://smithery.ai/docs/config#smitheryyaml 2 | 3 | startCommand: 4 | type: stdio 5 | configSchema: 6 | # JSON Schema defining the configuration options for the MCP. 7 | type: object 8 | required: 9 | - openrouterApiKey 10 | properties: 11 | openrouterApiKey: 12 | type: string 13 | description: The API key for accessing the OpenRouter service. 14 | commandFunction: 15 | # A function that produces the CLI command to start the MCP on stdio. 16 | |- 17 | (config) => ({ command: 'node', args: ['build/index.js'], env: { OPENROUTER_API_KEY: config.openrouterApiKey } }) 18 | -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | import { Server } from '@modelcontextprotocol/sdk/server/index.js'; 3 | import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js'; 4 | import { 5 | CallToolRequestSchema, 6 | ErrorCode, 7 | ListToolsRequestSchema, 8 | McpError, 9 | } from '@modelcontextprotocol/sdk/types.js'; 10 | import { OpenAI } from "openai"; 11 | import dotenv from "dotenv"; 12 | import * as os from "os"; 13 | import * as path from "path"; 14 | import * as fs from "fs/promises"; 15 | import { v4 as uuidv4 } from "uuid"; 16 | import { GoogleGenerativeAI } from "@google/generative-ai"; 17 | import { VertexAI } from '@google-cloud/vertexai'; 18 | import { fileURLToPath } from 'url'; 19 | import { dirname } from 'path'; 20 | 21 | // console.error(import.meta.url) 22 | const __filename = fileURLToPath(import.meta.url); 23 | const __dirname = dirname(__filename); 24 | console.error(path.join(__dirname, "../src/providers.json")) 25 | 26 | // Load environment variables and providers 27 | dotenv.config({ path: path.resolve(__dirname, '../.env') }); 28 | 29 | const providers = JSON.parse(await fs.readFile(path.join(__dirname, "../src/providers.json"), "utf-8")); 30 | 31 | // Constants, read from environment variables with defaults 32 | const DEFAULT_REASONING_PROVIDER = "openrouter"; 33 | const DEFAULT_REASONING_MODEL = "deepseek/deepseek-r1"; 34 | const DEFAULT_CODING_PROVIDER = "openrouter"; 35 | const DEFAULT_CODING_MODEL = "deepseek/deepseek-chat"; 36 | 37 | const REASONING_PROVIDER = 38 | process.env.REASONING_PROVIDER || DEFAULT_REASONING_PROVIDER; 39 | const REASONING_MODEL = process.env.REASONING_MODEL || DEFAULT_REASONING_MODEL; 40 | const CODING_PROVIDER = process.env.CODING_PROVIDER || DEFAULT_CODING_PROVIDER; 41 | const CODING_MODEL = process.env.CODING_MODEL || DEFAULT_CODING_MODEL; 42 | 43 | interface ConversationEntry { 44 | timestamp: number; 45 | prompt: string; 46 | reasoning: string; 47 | response: string; 48 | model: string; 49 | } 50 | 51 | interface ConversationContext { 52 | entries: ConversationEntry[]; 53 | maxEntries: number; 54 | } 55 | 56 | interface GenerateResponseArgs { 57 | prompt: string; 58 | showReasoning?: boolean; 59 | clearContext?: boolean; 60 | includeHistory?: boolean; 61 | } 62 | 63 | interface CheckResponseStatusArgs { 64 | taskId: string; 65 | } 66 | 67 | interface TaskStatus { 68 | status: 'pending' | 'reasoning' | 'responding' | 'complete' | 'error'; 69 | prompt: string; 70 | showReasoning?: boolean; 71 | reasoning?: string; 72 | response?: string; 73 | error?: string; 74 | timestamp: number; 75 | } 76 | 77 | const isValidCheckResponseStatusArgs = (args: any): args is CheckResponseStatusArgs => 78 | typeof args === 'object' && 79 | args !== null && 80 | typeof args.taskId === 'string'; 81 | 82 | interface ClaudeMessage { 83 | role: 'user' | 'assistant'; 84 | content: string | { type: string; text: string }[]; 85 | } 86 | 87 | interface UiMessage { 88 | ts: number; 89 | type: string; 90 | say?: string; 91 | ask?: string; 92 | text: string; 93 | conversationHistoryIndex: number; 94 | } 95 | 96 | const isValidGenerateResponseArgs = (args: any): args is GenerateResponseArgs => 97 | typeof args === 'object' && 98 | args !== null && 99 | typeof args.prompt === 'string' && 100 | (args.showReasoning === undefined || typeof args.showReasoning === 'boolean') && 101 | (args.clearContext === undefined || typeof args.clearContext === 'boolean') && 102 | (args.includeHistory === undefined || typeof args.includeHistory === 'boolean'); 103 | 104 | function getClaudePath(): string { 105 | const homeDir = os.homedir(); 106 | switch (process.platform) { 107 | case 'win32': 108 | return path.join(homeDir, 'AppData', 'Roaming', 'Code', 'User', 'globalStorage', 'saoudrizwan.claude-dev', 'tasks'); 109 | case 'darwin': 110 | return path.join(homeDir, 'Library', 'Application Support', 'Code', 'User', 'globalStorage', 'saoudrizwan.claude-dev', 'tasks'); 111 | default: // linux 112 | return path.join(homeDir, '.config', 'Code', 'User', 'globalStorage', 'saoudrizwan.claude-dev', 'tasks'); 113 | } 114 | } 115 | 116 | async function findActiveConversation(): Promise { 117 | try { 118 | const tasksPath = getClaudePath(); 119 | const dirs = await fs.readdir(tasksPath); 120 | 121 | // Get modification time for each api_conversation_history.json 122 | const dirStats = await Promise.all( 123 | dirs.map(async (dir) => { 124 | try { 125 | const historyPath = path.join(tasksPath, dir, "api_conversation_history.json"); 126 | const stats = await fs.stat(historyPath); 127 | const uiPath = path.join(tasksPath, dir, "ui_messages.json"); 128 | const uiContent = await fs.readFile(uiPath, "utf8"); 129 | const uiMessages: UiMessage[] = JSON.parse(uiContent); 130 | const hasEnded = uiMessages.some((m) => m.type === "conversation_ended"); 131 | 132 | return { 133 | dir, 134 | mtime: stats.mtime.getTime(), 135 | hasEnded, 136 | }; 137 | } catch (error) { 138 | console.error("Error checking folder:", dir, error); 139 | return null; 140 | } 141 | }) 142 | ); 143 | 144 | // Filter out errors and ended conversations, then sort by modification time 145 | const sortedDirs = dirStats 146 | .filter((stat): stat is NonNullable => stat !== null && !stat.hasEnded) 147 | .sort((a, b) => b.mtime - a.mtime); 148 | 149 | // Use most recently modified active conversation 150 | const latest = sortedDirs[0]?.dir; 151 | if (!latest) { 152 | console.error("No active conversations found"); 153 | return null; 154 | } 155 | 156 | const historyPath = path.join(tasksPath, latest, "api_conversation_history.json"); 157 | const history = await fs.readFile(historyPath, "utf8"); 158 | return JSON.parse(history); 159 | } catch (error) { 160 | console.error("Error finding active conversation:", error); 161 | return null; 162 | } 163 | } 164 | 165 | function formatHistoryForModel(history: ClaudeMessage[], isDeepSeek: boolean): string { 166 | const maxLength = isDeepSeek ? 50000 : 600000; // 50k chars for DeepSeek, 600k for Claude 167 | const formattedMessages = []; 168 | let totalLength = 0; 169 | 170 | // Process messages in reverse chronological order to get most recent first 171 | for (let i = history.length - 1; i >= 0; i--) { 172 | const msg = history[i]; 173 | const content = Array.isArray(msg.content) 174 | ? msg.content.map(c => c.text).join('\n') 175 | : msg.content; 176 | 177 | const formattedMsg = `${msg.role === 'user' ? 'Human' : 'Assistant'}: ${content}`; 178 | const msgLength = formattedMsg.length; 179 | 180 | // Stop adding messages if we'd exceed the limit 181 | if (totalLength + msgLength > maxLength) { 182 | break; 183 | } 184 | 185 | formattedMessages.push(formattedMsg); // Add most recent messages first 186 | totalLength += msgLength; 187 | } 188 | 189 | // Reverse to get chronological order 190 | return formattedMessages.reverse().join('\n\n'); 191 | } 192 | 193 | class ReasoningCodingServer { 194 | private server: Server; 195 | private clients: Record = {}; 196 | private context: ConversationContext = { 197 | entries: [], 198 | maxEntries: 10 199 | }; 200 | private activeTasks: Map = new Map(); 201 | 202 | constructor() { 203 | console.error('Initializing API clients...'); 204 | console.error('Reasoning Provider:'); 205 | console.error(REASONING_PROVIDER); 206 | console.error('Reasoning Model:'); 207 | console.error(REASONING_MODEL); 208 | console.error('Coding Provider:'); 209 | console.error(CODING_PROVIDER); 210 | console.error('Coding Model:'); 211 | console.error(CODING_MODEL); 212 | 213 | 214 | // Initialize API clients for supported providers, ONLY if they are selected 215 | if (REASONING_PROVIDER === "openrouter" || CODING_PROVIDER === "openrouter") { 216 | if (providers.openrouter) { 217 | console.error('Initializing OpenRouter client'); 218 | this.clients.openrouter = new OpenAI({ 219 | baseURL: "https://openrouter.ai/api/v1", 220 | apiKey: process.env.OPENROUTER_API_KEY 221 | }); 222 | console.error('OpenRouter client initialized'); 223 | } else { 224 | console.error("OpenRouter selected as provider, but configuration not found in providers.json"); 225 | } 226 | } 227 | 228 | if (REASONING_PROVIDER === "anthropic" || CODING_PROVIDER === "anthropic") { 229 | if (providers.anthropic) { 230 | console.error('Initializing Anthropic client'); 231 | this.clients.anthropic = new OpenAI({ 232 | baseURL: "https://api.anthropic.com/v1", 233 | apiKey: process.env.ANTHROPIC_API_KEY 234 | }); 235 | console.error('Anthropic client initialized'); 236 | } else { 237 | console.error("Anthropic selected as provider, but configuration not found in providers.json"); 238 | } 239 | } 240 | 241 | if (REASONING_PROVIDER === "deepseek" || CODING_PROVIDER === "deepseek") { 242 | if (providers.deepseek) { 243 | console.error('Initializing Deepseek client'); 244 | this.clients.deepseek = new OpenAI({ 245 | baseURL: "https://api.deepseek.com/v1", 246 | apiKey: process.env.DEEPSEEK_API_KEY 247 | }); 248 | console.error('Deepseek client initialized'); 249 | } else { 250 | console.error("Deepseek selected as provider, but configuration not found in providers.json"); 251 | } 252 | } 253 | 254 | if (REASONING_PROVIDER === "openai" || CODING_PROVIDER === "openai") { 255 | console.error("Checking OpenAI client initialization:", { REASONING_PROVIDER, CODING_PROVIDER, providers_openai: providers.openai }); 256 | if (providers.openai) { 257 | console.error("Initializing OpenAI client with config:", { baseURL: process.env.OPENAI_API_BASE_URL, apiKey: process.env.OPENAI_API_KEY }); 258 | this.clients.openai = new OpenAI({ 259 | baseURL: process.env.OPENAI_API_BASE_URL, 260 | apiKey: process.env.OPENAI_API_KEY, 261 | }); 262 | console.error("OpenAI client initialized"); 263 | } else { 264 | console.error("OpenAI provider configuration not found in providers.json, but REASONING_PROVIDER or CODING_PROVIDER is set to openai. Please check your configuration."); 265 | } 266 | } 267 | 268 | if (REASONING_PROVIDER === "gemini" || CODING_PROVIDER === "gemini") { 269 | if (providers.gemini) { 270 | console.error('Initializing Gemini client'); 271 | this.clients.gemini = new GoogleGenerativeAI(process.env.GEMINI_API_KEY ?? ""); 272 | console.error("Gemini client initialized"); 273 | } else { 274 | console.error("Gemini selected as provider, but configuration not found in providers.json"); 275 | } 276 | } 277 | 278 | if (REASONING_PROVIDER === "vertex" || CODING_PROVIDER === "vertex") { 279 | if (providers.vertex) { 280 | console.error('Initializing Vertex client'); 281 | this.clients.vertex = new VertexAI({project: process.env.VERTEX_PROJECT_ID, location: process.env.VERTEX_REGION}); 282 | console.error("Vertex client initialized"); 283 | } else { 284 | console.error("Vertex selected as provider, but configuration not found in providers.json"); 285 | } 286 | } 287 | 288 | 289 | // TODO: Add clients for other providers as needed 290 | 291 | // Initialize MCP server 292 | this.server = new Server( 293 | { 294 | name: 'deepseek-thinking-claude-mcp', 295 | version: '0.1.0', 296 | }, 297 | { 298 | capabilities: { 299 | tools: {}, 300 | }, 301 | } 302 | ); 303 | 304 | this.setupToolHandlers(); 305 | 306 | // Error handling 307 | this.server.onerror = (error) => console.error('[MCP Error]', error); 308 | process.on('SIGINT', async () => { 309 | await this.server.close(); 310 | process.exit(0); 311 | }); 312 | } 313 | 314 | private addToContext(entry: ConversationEntry) { 315 | this.context.entries.push(entry); 316 | if (this.context.entries.length > this.context.maxEntries) { 317 | this.context.entries.shift(); // Remove oldest 318 | } 319 | } 320 | 321 | private formatContextForPrompt(): string { 322 | return this.context.entries 323 | .map(entry => `Question: ${entry.prompt}\nReasoning: ${entry.reasoning}\nAnswer: ${entry.response}`) 324 | .join('\n\n'); 325 | } 326 | 327 | private setupToolHandlers() { 328 | this.server.setRequestHandler(ListToolsRequestSchema, async () => ({ 329 | tools: [ 330 | { 331 | name: 'generate_response', 332 | description: 'Generate a response using DeepSeek\'s reasoning and Claude\'s response generation through OpenRouter.', 333 | inputSchema: { 334 | type: 'object', 335 | properties: { 336 | prompt: { 337 | type: 'string', 338 | description: 'The user\'s input prompt' 339 | }, 340 | showReasoning: { 341 | type: 'boolean', 342 | description: 'Whether to include reasoning in response', 343 | default: false 344 | }, 345 | clearContext: { 346 | type: 'boolean', 347 | description: 'Clear conversation history before this request', 348 | default: false 349 | }, 350 | includeHistory: { 351 | type: 'boolean', 352 | description: 'Include Cline conversation history for context', 353 | default: true 354 | } 355 | }, 356 | required: ['prompt'] 357 | } 358 | }, 359 | { 360 | name: 'check_response_status', 361 | description: 'Check the status of a response generation task', 362 | inputSchema: { 363 | type: 'object', 364 | properties: { 365 | taskId: { 366 | type: 'string', 367 | description: 'The task ID returned by generate_response' 368 | } 369 | }, 370 | required: ['taskId'] 371 | } 372 | } 373 | ] 374 | })); 375 | 376 | this.server.setRequestHandler(CallToolRequestSchema, async (request) => { 377 | if (request.params.name === 'generate_response') { 378 | if (!isValidGenerateResponseArgs(request.params.arguments)) { 379 | throw new McpError( 380 | ErrorCode.InvalidParams, 381 | 'Invalid generate_response arguments' 382 | ); 383 | } 384 | 385 | const taskId = uuidv4(); 386 | const { prompt, showReasoning, clearContext, includeHistory } = request.params.arguments; 387 | 388 | // Initialize task status 389 | this.activeTasks.set(taskId, { 390 | status: 'pending', 391 | prompt, 392 | showReasoning, 393 | timestamp: Date.now() 394 | }); 395 | 396 | // Start processing in background 397 | this.processTask(taskId, clearContext, includeHistory).catch(error => { 398 | console.error('Error processing task:', error); 399 | this.activeTasks.set(taskId, { 400 | ...this.activeTasks.get(taskId)!, 401 | status: 'error', 402 | error: error.message 403 | }); 404 | }); 405 | 406 | // Return task ID immediately 407 | return { 408 | content: [ 409 | { 410 | type: 'text', 411 | text: JSON.stringify({ taskId }) 412 | } 413 | ] 414 | }; 415 | } else if (request.params.name === 'check_response_status') { 416 | if (!isValidCheckResponseStatusArgs(request.params.arguments)) { 417 | throw new McpError( 418 | ErrorCode.InvalidParams, 419 | 'Invalid check_response_status arguments' 420 | ); 421 | } 422 | 423 | const taskId = request.params.arguments.taskId; 424 | const task = this.activeTasks.get(taskId); 425 | 426 | if (!task) { 427 | throw new McpError( 428 | ErrorCode.InvalidRequest, 429 | `No task found with ID: ${taskId}` 430 | ); 431 | } 432 | 433 | return { 434 | content: [ 435 | { 436 | type: 'text', 437 | text: JSON.stringify({ 438 | status: task.status, 439 | reasoning: task.showReasoning ? task.reasoning : undefined, 440 | response: task.status === 'complete' ? task.response : undefined, 441 | error: task.error 442 | }) 443 | } 444 | ] 445 | }; 446 | } else { 447 | throw new McpError( 448 | ErrorCode.MethodNotFound, 449 | `Unknown tool: ${request.params.name}` 450 | ); 451 | } 452 | }); 453 | } 454 | 455 | private async processTask(taskId: string, clearContext?: boolean, includeHistory?: boolean): Promise { 456 | const task = this.activeTasks.get(taskId); 457 | if (!task) { 458 | throw new Error(`No task found with ID: ${taskId}`); 459 | } 460 | 461 | try { 462 | if (clearContext) { 463 | this.context.entries = []; 464 | } 465 | 466 | // Update status to reasoning 467 | this.activeTasks.set(taskId, { 468 | ...task, 469 | status: 'reasoning' 470 | }); 471 | 472 | // Get Cline conversation history if requested 473 | let history: ClaudeMessage[] | null = null; 474 | if (includeHistory !== false) { 475 | history = await findActiveConversation(); 476 | } 477 | 478 | // Get reasoning with limited history 479 | const reasoningHistory = history ? formatHistoryForModel(history, true) : ''; 480 | const reasoningPrompt = reasoningHistory 481 | ? `${reasoningHistory}\n\nNew question: ${task.prompt}` 482 | : task.prompt; 483 | const reasoning = await this.getReasoning(reasoningPrompt); 484 | 485 | // Update status with reasoning 486 | this.activeTasks.set(taskId, { 487 | ...task, 488 | status: 'responding', 489 | reasoning, 490 | }); 491 | 492 | // Get final response with full history 493 | const responseHistory = history ? formatHistoryForModel(history, false) : ''; 494 | const fullPrompt = responseHistory 495 | ? `${responseHistory}\n\nCurrent task: ${task.prompt}` 496 | : task.prompt; 497 | const response = await this.getFinalResponse(fullPrompt, reasoning); 498 | 499 | // Add to context after successful response 500 | this.addToContext({ 501 | timestamp: Date.now(), 502 | prompt: task.prompt, 503 | reasoning, 504 | response, 505 | model: CODING_MODEL, // Use CODING_MODEL here 506 | }); 507 | 508 | // Update status to complete 509 | this.activeTasks.set(taskId, { 510 | ...task, 511 | status: "complete", 512 | reasoning, 513 | response, 514 | timestamp: Date.now(), 515 | }); 516 | } catch (error) { 517 | // Update status to error 518 | this.activeTasks.set(taskId, { 519 | ...task, 520 | status: "error", 521 | error: error instanceof Error ? error.message : "Unknown error", 522 | timestamp: Date.now(), 523 | }); 524 | throw error; 525 | } 526 | } 527 | 528 | private async getReasoningDeepseek(prompt: string): Promise { 529 | const modelInfo = providers.deepseek[REASONING_MODEL]; 530 | if (!modelInfo) { 531 | throw new Error(`Model ${REASONING_MODEL} for provider deepseek not found in providers.json`); 532 | } 533 | if (!this.clients.deepseek) { 534 | throw new Error(`Client not initialized for provider: deepseek`); 535 | } 536 | 537 | const response = await this.clients.deepseek.chat.completions.create({ 538 | model: REASONING_MODEL, 539 | messages: [{ role: "user", content: prompt }], 540 | temperature: modelInfo.temperature ?? 0.7, 541 | top_p: modelInfo.top_p ?? 1, 542 | ...(modelInfo.extra_params || {}) 543 | } as any); 544 | 545 | if (!response.choices?.[0]?.message?.content) { 546 | throw new Error("No reasoning received from DeepSeek"); 547 | } 548 | return response.choices[0].message.content; 549 | } 550 | 551 | private async getFinalResponseDeepseek(prompt: string, reasoning: string): Promise { 552 | const modelInfo = providers.deepseek[CODING_MODEL]; 553 | if (!modelInfo) { 554 | throw new Error(`Model ${CODING_MODEL} for provider deepseek not found in providers.json`); 555 | } 556 | if (!this.clients.deepseek) { 557 | throw new Error(`Client not initialized for provider: deepseek`); 558 | } 559 | const messages = [ 560 | { 561 | role: "user" as const, 562 | content: prompt 563 | }, 564 | { 565 | role: "assistant" as const, 566 | content: `${reasoning}` 567 | } 568 | ]; 569 | const response = await this.clients.deepseek.chat.completions.create({ 570 | model: CODING_MODEL, 571 | messages, 572 | temperature: modelInfo.temperature ?? 0.7, 573 | top_p: modelInfo.top_p ?? 1, 574 | repetition_penalty: modelInfo.repetition_penalty ?? 1, 575 | ...(modelInfo.extra_params || {}) 576 | } as any); 577 | 578 | return response.choices[0].message.content || "Error: No response content"; 579 | } 580 | 581 | private async getReasoningGemini(prompt: string): Promise { 582 | const modelInfo = providers.gemini[REASONING_MODEL]; 583 | if (!modelInfo) { 584 | throw new Error(`Model ${REASONING_MODEL} for provider gemini not found in providers.json`); 585 | } 586 | if (!this.clients.gemini) { 587 | throw new Error(`Client not initialized for provider: gemini`); 588 | } 589 | const geminiPrompt = [{ role: "user", parts: [{ text: prompt }] }]; 590 | const result = await this.clients.gemini.getGenerativeModel({ model: REASONING_MODEL }).generateContentStream(geminiPrompt); 591 | let response = ""; 592 | for await (const chunk of result.stream) { 593 | response += chunk.text(); 594 | } 595 | if (!response) { 596 | throw new Error("No reasoning received from Gemini"); 597 | } 598 | return response; 599 | } 600 | 601 | private async getFinalResponseGemini(prompt: string, reasoning: string): Promise { 602 | const modelInfo = providers.gemini[CODING_MODEL]; 603 | if (!modelInfo) { 604 | throw new Error(`Model ${CODING_MODEL} for provider gemini not found in providers.json`); 605 | } 606 | if (!this.clients.gemini) { 607 | throw new Error(`Client not initialized for provider: gemini`); 608 | } 609 | const geminiPrompt = [{ role: "user", parts: [{ text: prompt }] }, { role: 'model', parts: [{ text: reasoning }] }]; 610 | const result = await this.clients.gemini.getGenerativeModel({ model: CODING_MODEL }).generateContentStream(geminiPrompt); 611 | let response = ""; 612 | for await (const chunk of result.stream) { 613 | response += chunk.text(); 614 | } 615 | if (!response) { 616 | throw new Error("No reasoning received from Gemini"); 617 | } 618 | return response; 619 | } 620 | 621 | private async getReasoningAnthropic(prompt: string): Promise { 622 | const modelInfo = providers.anthropic[REASONING_MODEL]; 623 | if (!modelInfo) { 624 | throw new Error(`Model ${REASONING_MODEL} for provider anthropic not found in providers.json`); 625 | } 626 | if (!this.clients.anthropic) { 627 | throw new Error(`Client not initialized for provider: anthropic`); 628 | } 629 | 630 | const response = await this.clients.anthropic.chat.completions.create({ 631 | model: REASONING_MODEL, 632 | messages: [{ role: "user", content: prompt }], 633 | temperature: modelInfo.temperature ?? 0.7, 634 | top_p: modelInfo.top_p ?? 1, 635 | ...(modelInfo.extra_params || {}) 636 | } as any); 637 | 638 | if (!response.choices?.[0]?.message?.content) { 639 | throw new Error("No reasoning received from Anthropic"); 640 | } 641 | return response.choices[0].message.content; 642 | } 643 | 644 | private async getFinalResponseAnthropic(prompt: string, reasoning: string): Promise { 645 | const modelInfo = providers.anthropic[CODING_MODEL]; 646 | if (!modelInfo) { 647 | throw new Error(`Model ${CODING_MODEL} for provider anthropic not found in providers.json`); 648 | } 649 | if (!this.clients.anthropic) { 650 | throw new Error(`Client not initialized for provider: anthropic`); 651 | } 652 | const messages = [ 653 | { 654 | role: "user" as const, 655 | content: prompt 656 | }, 657 | { 658 | role: "assistant" as const, 659 | content: `${reasoning}` 660 | } 661 | ]; 662 | const response = await this.clients.anthropic.chat.completions.create({ 663 | model: CODING_MODEL, 664 | messages, 665 | temperature: modelInfo.temperature ?? 0.7, 666 | top_p: modelInfo.top_p ?? 1, 667 | repetition_penalty: modelInfo.repetition_penalty ?? 1, 668 | ...(modelInfo.extra_params || {}) 669 | } as any); 670 | 671 | return response.choices[0].message.content || "Error: No response content"; 672 | } 673 | 674 | private async getReasoningVertex(prompt: string): Promise { 675 | const modelInfo = providers.vertex[REASONING_MODEL]; 676 | if (!modelInfo) { 677 | throw new Error(`Model ${REASONING_MODEL} for provider vertex not found in providers.json`); 678 | } 679 | if (!this.clients.vertex) { 680 | throw new Error(`Client not initialized for provider: vertex`); 681 | } 682 | const vertexModel = this.clients.vertex.getGenerativeModel({ model: REASONING_MODEL }); 683 | const vertexPrompt = [{ role: "user", parts: [{ text: prompt }] }]; 684 | const result = await vertexModel.generateContentStream(vertexPrompt); 685 | let response = ""; 686 | for await (const chunk of result.stream) { 687 | response += chunk.text(); 688 | } 689 | if (!response) { 690 | throw new Error("No reasoning received from Vertex"); 691 | } 692 | return response; 693 | } 694 | 695 | private async getFinalResponseVertex(prompt: string, reasoning: string): Promise { 696 | const modelInfo = providers.vertex[CODING_MODEL]; 697 | if (!modelInfo) { 698 | throw new Error(`Model ${CODING_MODEL} for provider vertex not found in providers.json`); 699 | } 700 | if (!this.clients.vertex) { 701 | throw new Error(`Client not initialized for provider: vertex`); 702 | } 703 | const vertexModel = this.clients.vertex.getGenerativeModel({ model: CODING_MODEL }); 704 | const vertexPrompt = [ 705 | { role: "user", parts: [{ text: prompt }] }, 706 | { role: "model", parts: [{ text: reasoning }] }, 707 | ]; 708 | const result = await vertexModel.generateContentStream(vertexPrompt); 709 | let response = ""; 710 | for await (const chunk of result.stream) { 711 | response += chunk.text(); 712 | } 713 | if (!response) { 714 | throw new Error("No reasoning received from Vertex"); 715 | } 716 | return response; 717 | } 718 | 719 | private async getReasoningOpenAI(prompt: string): Promise { 720 | const modelInfo = providers.openai[REASONING_MODEL]; 721 | 722 | if (!modelInfo) { 723 | throw new Error(`Model ${REASONING_MODEL} for provider openai not found in providers.json`); 724 | } 725 | 726 | if (!this.clients.openai) { 727 | throw new Error(`Client not initialized for provider: openai`); 728 | } 729 | 730 | const response = await this.clients.openai.chat.completions.create({ 731 | model: REASONING_MODEL, 732 | messages: [ 733 | { 734 | role: "user", 735 | content: prompt, 736 | }, 737 | ], 738 | temperature: modelInfo.temperature ?? 0.7, 739 | top_p: modelInfo.top_p ?? 1, 740 | ...(modelInfo.extra_params || {}) 741 | } as any); 742 | 743 | if (!response.choices?.[0]?.message?.content) { 744 | throw new Error("No reasoning received from OpenAI"); 745 | } 746 | return response.choices[0].message.content; 747 | } 748 | 749 | private async getFinalResponseOpenAI(prompt: string, reasoning: string): Promise { 750 | const modelInfo = providers.openai[CODING_MODEL]; 751 | if (!modelInfo) { 752 | throw new Error(`Model ${CODING_MODEL} for provider openai not found in providers.json`); 753 | } 754 | if (!this.clients.openai) { 755 | throw new Error(`Client not initialized for provider: openai`); 756 | } 757 | const messages = [ 758 | { 759 | role: "user" as const, 760 | content: prompt 761 | }, 762 | { 763 | role: "assistant" as const, 764 | content: `${reasoning}` 765 | } 766 | ]; 767 | const response = await this.clients.openai.chat.completions.create({ 768 | model: CODING_MODEL, 769 | messages, 770 | temperature: modelInfo.temperature ?? 0.7, 771 | top_p: modelInfo.top_p ?? 1, 772 | repetition_penalty: modelInfo.repetition_penalty ?? 1, 773 | ...(modelInfo.extra_params || {}) 774 | } as any); 775 | 776 | return response.choices[0].message.content || "Error: No response content"; 777 | } 778 | 779 | private async getReasoningOpenRouter(prompt: string): Promise { 780 | const modelInfo = providers.openrouter[REASONING_MODEL]; 781 | 782 | if (!modelInfo) { 783 | throw new Error(`Model ${REASONING_MODEL} for provider openrouter not found in providers.json`); 784 | } 785 | 786 | if (!this.clients.openrouter) { 787 | throw new Error(`Client not initialized for provider: openrouter`); 788 | } 789 | 790 | const response = await this.clients.openrouter.chat.completions.create({ 791 | model: REASONING_MODEL, 792 | messages: [ 793 | { 794 | role: "user", 795 | content: prompt, 796 | }, 797 | ], 798 | temperature: modelInfo.temperature ?? 0.7, 799 | top_p: modelInfo.top_p ?? 1, 800 | ...(modelInfo.extra_params || {}) 801 | } as any); 802 | 803 | if (!response.choices?.[0]?.message?.content) { 804 | throw new Error("No reasoning received from OpenRouter"); 805 | } 806 | return response.choices[0].message.content; 807 | } 808 | 809 | private async getFinalResponseOpenRouter(prompt: string, reasoning: string): Promise { 810 | const modelInfo = providers.openrouter[CODING_MODEL]; 811 | if (!modelInfo) { 812 | throw new Error(`Model ${CODING_MODEL} for provider openrouter not found in providers.json`); 813 | } 814 | if (!this.clients.openrouter) { 815 | throw new Error(`Client not initialized for provider: openrouter`); 816 | } 817 | const messages = [ 818 | { 819 | role: "user" as const, 820 | content: prompt 821 | }, 822 | { 823 | role: "assistant" as const, 824 | content: `${reasoning}` 825 | } 826 | ]; 827 | const response = await this.clients.openrouter.chat.completions.create({ 828 | model: CODING_MODEL, 829 | messages, 830 | temperature: modelInfo.temperature ?? 0.7, 831 | top_p: modelInfo.top_p ?? 1, 832 | repetition_penalty: modelInfo.repetition_penalty ?? 1, 833 | ...(modelInfo.extra_params || {}) 834 | } as any); 835 | 836 | return response.choices[0].message.content || "Error: No response content"; 837 | } 838 | 839 | private async getReasoning(prompt: string): Promise { 840 | const contextPrompt = 841 | this.context.entries.length > 0 842 | ? `Previous conversation:\n${this.formatContextForPrompt()}\n\nNew question: ${prompt}` 843 | : prompt; 844 | 845 | try { 846 | switch (REASONING_PROVIDER) { 847 | case "anthropic": 848 | return this.getReasoningAnthropic(contextPrompt); 849 | case "deepseek": 850 | return this.getReasoningDeepseek(contextPrompt); 851 | case "gemini": 852 | return this.getReasoningGemini(contextPrompt); 853 | case "vertex": 854 | return this.getReasoningVertex(contextPrompt); 855 | case "openai": 856 | return this.getReasoningOpenAI(contextPrompt); 857 | case "openrouter": 858 | return this.getReasoningOpenRouter(contextPrompt); 859 | default: 860 | throw new Error(`Unsupported reasoning provider: ${REASONING_PROVIDER}`); 861 | } 862 | } 863 | catch (error) { 864 | console.error("Error in getReasoning:", error); 865 | throw error; 866 | } 867 | } 868 | 869 | private async getFinalResponse(prompt: string, reasoning: string): Promise { 870 | try { 871 | switch (CODING_PROVIDER) { 872 | case "anthropic": 873 | return this.getFinalResponseAnthropic(prompt, reasoning); 874 | case "deepseek": 875 | return this.getFinalResponseDeepseek(prompt, reasoning); 876 | case "gemini": 877 | return this.getFinalResponseGemini(prompt, reasoning); 878 | case "vertex": 879 | return this.getFinalResponseVertex(prompt, reasoning); 880 | case "openai": 881 | return this.getFinalResponseOpenAI(prompt, reasoning); 882 | case "openrouter": 883 | return this.getFinalResponseOpenRouter(prompt, reasoning); 884 | default: 885 | throw new Error(`Unsupported coding provider: ${CODING_PROVIDER}`); 886 | } 887 | } catch (error) { 888 | console.error('Error in getFinalResponse:', error); 889 | throw error; 890 | } 891 | } 892 | 893 | getModel(provider: string, model: string): {id: string, info: any} { 894 | const providerInfo = providers[provider]; 895 | if (!providerInfo) { 896 | throw new Error(`Provider not found: ${provider}`); 897 | } 898 | const modelInfo = providerInfo[model]; 899 | 900 | if (!modelInfo) { 901 | throw new Error(`Model not found: ${provider}/${model}`); 902 | } 903 | return {id: model, info: modelInfo} 904 | } 905 | 906 | async run(): Promise { 907 | const transport = new StdioServerTransport(); 908 | await this.server.connect(transport); 909 | console.error('Reasoning-Coding MCP server running on stdio'); 910 | } 911 | } 912 | 913 | const server = new ReasoningCodingServer(); 914 | server.run() 915 | -------------------------------------------------------------------------------- /src/providers.json: -------------------------------------------------------------------------------- 1 | { 2 | "anthropic": { 3 | "claude-3-5-sonnet-20241022": { 4 | "maxTokens": 8192, 5 | "contextWindow": 200000, 6 | "supportsImages": true, 7 | "supportsComputerUse": true, 8 | "supportsPromptCache": true, 9 | "inputPrice": 3.0, 10 | "outputPrice": 15.0, 11 | "cacheWritesPrice": 3.75, 12 | "cacheReadsPrice": 0.3 13 | }, 14 | "claude-3-5-haiku-20241022": { 15 | "maxTokens": 8192, 16 | "contextWindow": 200000, 17 | "supportsImages": false, 18 | "supportsPromptCache": true, 19 | "inputPrice": 0.8, 20 | "outputPrice": 4.0, 21 | "cacheWritesPrice": 1.0, 22 | "cacheReadsPrice": 0.08 23 | }, 24 | "claude-3-opus-20240229": { 25 | "maxTokens": 4096, 26 | "contextWindow": 200000, 27 | "supportsImages": true, 28 | "supportsPromptCache": true, 29 | "inputPrice": 15.0, 30 | "outputPrice": 75.0, 31 | "cacheWritesPrice": 18.75, 32 | "cacheReadsPrice": 1.5 33 | }, 34 | "claude-3-haiku-20240307": { 35 | "maxTokens": 4096, 36 | "contextWindow": 200000, 37 | "supportsImages": true, 38 | "supportsPromptCache": true, 39 | "inputPrice": 0.25, 40 | "outputPrice": 1.25, 41 | "cacheWritesPrice": 0.3, 42 | "cacheReadsPrice": 0.03 43 | } 44 | }, 45 | "bedrock": { 46 | "anthropic.claude-3-5-sonnet-20241022-v2:0": { 47 | "maxTokens": 8192, 48 | "contextWindow": 200000, 49 | "supportsImages": true, 50 | "supportsComputerUse": true, 51 | "supportsPromptCache": false, 52 | "inputPrice": 3.0, 53 | "outputPrice": 15.0 54 | }, 55 | "anthropic.claude-3-5-haiku-20241022-v1:0": { 56 | "maxTokens": 8192, 57 | "contextWindow": 200000, 58 | "supportsImages": false, 59 | "supportsPromptCache": false, 60 | "inputPrice": 1.0, 61 | "outputPrice": 5.0 62 | }, 63 | "anthropic.claude-3-5-sonnet-20240620-v1:0": { 64 | "maxTokens": 8192, 65 | "contextWindow": 200000, 66 | "supportsImages": true, 67 | "supportsPromptCache": false, 68 | "inputPrice": 3.0, 69 | "outputPrice": 15.0 70 | }, 71 | "anthropic.claude-3-opus-20240229-v1:0": { 72 | "maxTokens": 4096, 73 | "contextWindow": 200000, 74 | "supportsImages": true, 75 | "supportsPromptCache": false, 76 | "inputPrice": 15.0, 77 | "outputPrice": 75.0 78 | }, 79 | "anthropic.claude-3-sonnet-20240229-v1:0": { 80 | "maxTokens": 4096, 81 | "contextWindow": 200000, 82 | "supportsImages": true, 83 | "supportsPromptCache": false, 84 | "inputPrice": 3.0, 85 | "outputPrice": 15.0 86 | }, 87 | "anthropic.claude-3-haiku-20240307-v1:0": { 88 | "maxTokens": 4096, 89 | "contextWindow": 200000, 90 | "supportsImages": true, 91 | "supportsPromptCache": false, 92 | "inputPrice": 0.25, 93 | "outputPrice": 1.25 94 | } 95 | }, 96 | "openrouter": { 97 | "openRouterDefaultModelId": "anthropic/claude-3.5-sonnet", 98 | "openRouterDefaultModelInfo": { 99 | "maxTokens": 8192, 100 | "contextWindow": 200000, 101 | "supportsImages": true, 102 | "supportsComputerUse": true, 103 | "supportsPromptCache": true, 104 | "inputPrice": 3.0, 105 | "outputPrice": 15.0, 106 | "cacheWritesPrice": 3.75, 107 | "cacheReadsPrice": 0.3, 108 | "description": "The new Claude 3.5 Sonnet delivers better-than-Opus capabilities, faster-than-Sonnet speeds, at the same Sonnet prices. Sonnet is particularly good at:\\n\\n- Coding: New Sonnet scores ~49% on SWE-Bench Verified, higher than the last best score, and without any fancy prompt scaffolding\\n- Data science: Augments human data science expertise; navigates unstructured data while using multiple tools for insights\\n- Visual processing: excelling at interpreting charts, graphs, and images, accurately transcribing text to derive insights beyond just the text alone\\n- Agentic tasks: exceptional tool use, making it great at agentic tasks (i.e. complex, multi-step problem solving tasks that require engaging with other systems)\\n\\n#multimodal" 109 | } 110 | }, 111 | "vertex": { 112 | "claude-3-5-sonnet-v2@20241022": { 113 | "maxTokens": 8192, 114 | "contextWindow": 200000, 115 | "supportsImages": true, 116 | "supportsComputerUse": true, 117 | "supportsPromptCache": false, 118 | "inputPrice": 3.0, 119 | "outputPrice": 15.0 120 | }, 121 | "claude-3-5-sonnet@20240620": { 122 | "maxTokens": 8192, 123 | "contextWindow": 200000, 124 | "supportsImages": true, 125 | "supportsPromptCache": false, 126 | "inputPrice": 3.0, 127 | "outputPrice": 15.0 128 | }, 129 | "claude-3-5-haiku@20241022": { 130 | "maxTokens": 8192, 131 | "contextWindow": 200000, 132 | "supportsImages": false, 133 | "supportsPromptCache": false, 134 | "inputPrice": 1.0, 135 | "outputPrice": 5.0 136 | }, 137 | "claude-3-opus@20240229": { 138 | "maxTokens": 4096, 139 | "contextWindow": 200000, 140 | "supportsImages": true, 141 | "supportsPromptCache": false, 142 | "inputPrice": 15.0, 143 | "outputPrice": 75.0 144 | }, 145 | "claude-3-haiku@20240307": { 146 | "maxTokens": 4096, 147 | "contextWindow": 200000, 148 | "supportsImages": true, 149 | "supportsPromptCache": false, 150 | "inputPrice": 0.25, 151 | "outputPrice": 1.25 152 | } 153 | }, 154 | "openai": { 155 | "openAiModelInfoSaneDefaults": { 156 | "maxTokens": -1, 157 | "contextWindow": 128000, 158 | "supportsImages": true, 159 | "supportsPromptCache": false, 160 | "inputPrice": 0, 161 | "outputPrice": 0 162 | } 163 | }, 164 | "gemini": { 165 | "gemini-2.0-flash-001": { 166 | "maxTokens": 8192, 167 | "contextWindow": 1048576, 168 | "supportsImages": true, 169 | "supportsPromptCache": false, 170 | "inputPrice": 0, 171 | "outputPrice": 0 172 | }, 173 | "gemini-2.0-flash-lite-preview-02-05": { 174 | "maxTokens": 8192, 175 | "contextWindow": 1048576, 176 | "supportsImages": true, 177 | "supportsPromptCache": false, 178 | "inputPrice": 0, 179 | "outputPrice": 0 180 | }, 181 | "gemini-2.0-pro-exp-02-05": { 182 | "maxTokens": 8192, 183 | "contextWindow": 2097152, 184 | "supportsImages": true, 185 | "supportsPromptCache": false, 186 | "inputPrice": 0, 187 | "outputPrice": 0 188 | }, 189 | "gemini-2.0-flash-thinking-exp-01-21": { 190 | "maxTokens": 65536, 191 | "contextWindow": 1048576, 192 | "supportsImages": true, 193 | "supportsPromptCache": false, 194 | "inputPrice": 0, 195 | "outputPrice": 0 196 | }, 197 | "gemini-2.0-flash-thinking-exp-1219": { 198 | "maxTokens": 8192, 199 | "contextWindow": 32767, 200 | "supportsImages": true, 201 | "supportsPromptCache": false, 202 | "inputPrice": 0, 203 | "outputPrice": 0 204 | }, 205 | "gemini-2.0-flash-exp": { 206 | "maxTokens": 8192, 207 | "contextWindow": 1048576, 208 | "supportsImages": true, 209 | "supportsPromptCache": false, 210 | "inputPrice": 0, 211 | "outputPrice": 0 212 | }, 213 | "gemini-1.5-flash-002": { 214 | "maxTokens": 8192, 215 | "contextWindow": 1048576, 216 | "supportsImages": true, 217 | "supportsPromptCache": false, 218 | "inputPrice": 0, 219 | "outputPrice": 0 220 | }, 221 | "gemini-1.5-flash-exp-0827": { 222 | "maxTokens": 8192, 223 | "contextWindow": 1048576, 224 | "supportsImages": true, 225 | "supportsPromptCache": false, 226 | "inputPrice": 0, 227 | "outputPrice": 0 228 | }, 229 | "gemini-1.5-flash-8b-exp-0827": { 230 | "maxTokens": 8192, 231 | "contextWindow": 1048576, 232 | "supportsImages": true, 233 | "supportsPromptCache": false, 234 | "inputPrice": 0, 235 | "outputPrice": 0 236 | }, 237 | "gemini-1.5-pro-002": { 238 | "maxTokens": 8192, 239 | "contextWindow": 2097152, 240 | "supportsImages": true, 241 | "supportsPromptCache": false, 242 | "inputPrice": 0, 243 | "outputPrice": 0 244 | }, 245 | "gemini-1.5-pro-exp-0827": { 246 | "maxTokens": 8192, 247 | "contextWindow": 2097152, 248 | "supportsImages": true, 249 | "supportsPromptCache": false, 250 | "inputPrice": 0, 251 | "outputPrice": 0 252 | }, 253 | "gemini-exp-1206": { 254 | "maxTokens": 8192, 255 | "contextWindow": 2097152, 256 | "supportsImages": true, 257 | "supportsPromptCache": false, 258 | "inputPrice": 0, 259 | "outputPrice": 0 260 | } 261 | }, 262 | "openai-native": { 263 | "o3-mini": { 264 | "maxTokens": 100000, 265 | "contextWindow": 200000, 266 | "supportsImages": false, 267 | "supportsPromptCache": false, 268 | "inputPrice": 1.1, 269 | "outputPrice": 4.4 270 | }, 271 | "o1": { 272 | "maxTokens": 100000, 273 | "contextWindow": 200000, 274 | "supportsImages": true, 275 | "supportsPromptCache": false, 276 | "inputPrice": 15, 277 | "outputPrice": 60 278 | }, 279 | "o1-preview": { 280 | "maxTokens": 32768, 281 | "contextWindow": 128000, 282 | "supportsImages": true, 283 | "supportsPromptCache": false, 284 | "inputPrice": 15, 285 | "outputPrice": 60 286 | }, 287 | "o1-mini": { 288 | "maxTokens": 65536, 289 | "contextWindow": 128000, 290 | "supportsImages": true, 291 | "supportsPromptCache": false, 292 | "inputPrice": 1.1, 293 | "outputPrice": 4.4 294 | }, 295 | "gpt-4o": { 296 | "maxTokens": 4096, 297 | "contextWindow": 128000, 298 | "supportsImages": true, 299 | "supportsPromptCache": false, 300 | "inputPrice": 2.5, 301 | "outputPrice": 10 302 | }, 303 | "gpt-4o-mini": { 304 | "maxTokens": 16384, 305 | "contextWindow": 128000, 306 | "supportsImages": true, 307 | "supportsPromptCache": false, 308 | "inputPrice": 0.15, 309 | "outputPrice": 0.6 310 | } 311 | }, 312 | "deepseek": { 313 | "deepseek-chat": { 314 | "maxTokens": 8000, 315 | "contextWindow": 64000, 316 | "supportsImages": false, 317 | "supportsPromptCache": true, 318 | "inputPrice": 0, 319 | "outputPrice": 0.28, 320 | "cacheWritesPrice": 0.14, 321 | "cacheReadsPrice": 0.014 322 | }, 323 | "deepseek-reasoner": { 324 | "maxTokens": 8000, 325 | "contextWindow": 64000, 326 | "supportsImages": false, 327 | "supportsPromptCache": true, 328 | "inputPrice": 0, 329 | "outputPrice": 2.19, 330 | "cacheWritesPrice": 0.55, 331 | "cacheReadsPrice": 0.14 332 | } 333 | }, 334 | "qwen": { 335 | "qwen-coder-plus-latest": { 336 | "maxTokens": 129024, 337 | "contextWindow": 131072, 338 | "supportsImages": false, 339 | "supportsPromptCache": false, 340 | "inputPrice": 0.0035, 341 | "outputPrice": 0.007, 342 | "cacheWritesPrice": 0.0035, 343 | "cacheReadsPrice": 0.007 344 | }, 345 | "qwen-plus-latest": { 346 | "maxTokens": 129024, 347 | "contextWindow": 131072, 348 | "supportsImages": false, 349 | "supportsPromptCache": false, 350 | "inputPrice": 0.0008, 351 | "outputPrice": 0.002, 352 | "cacheWritesPrice": 0.0004, 353 | "cacheReadsPrice": 0.001 354 | }, 355 | "qwen-turbo-latest": { 356 | "maxTokens": 1000000, 357 | "contextWindow": 1000000, 358 | "supportsImages": false, 359 | "supportsPromptCache": false, 360 | "inputPrice": 0.0003, 361 | "outputPrice": 0.0006, 362 | "cacheWritesPrice": 0.00015, 363 | "cacheReadsPrice": 0.0003 364 | }, 365 | "qwen-max-latest": { 366 | "maxTokens": 30720, 367 | "contextWindow": 32768, 368 | "supportsImages": false, 369 | "supportsPromptCache": false, 370 | "inputPrice": 0.0112, 371 | "outputPrice": 0.0448, 372 | "cacheWritesPrice": 0.0056, 373 | "cacheReadsPrice": 0.0224 374 | }, 375 | "qwen-coder-plus": { 376 | "maxTokens": 129024, 377 | "contextWindow": 131072, 378 | "supportsImages": false, 379 | "supportsPromptCache": false, 380 | "inputPrice": 0.0035, 381 | "outputPrice": 0.007, 382 | "cacheWritesPrice": 0.0035, 383 | "cacheReadsPrice": 0.007 384 | }, 385 | "qwen-plus": { 386 | "maxTokens": 129024, 387 | "contextWindow": 131072, 388 | "supportsImages": false, 389 | "supportsPromptCache": false, 390 | "inputPrice": 0.0008, 391 | "outputPrice": 0.002, 392 | "cacheWritesPrice": 0.0004, 393 | "cacheReadsPrice": 0.001 394 | }, 395 | "qwen-turbo": { 396 | "maxTokens": 1000000, 397 | "contextWindow": 1000000, 398 | "supportsImages": false, 399 | "supportsPromptCache": false, 400 | "inputPrice": 0.0003, 401 | "outputPrice": 0.0006, 402 | "cacheWritesPrice": 0.00015, 403 | "cacheReadsPrice": 0.0003 404 | }, 405 | "qwen-max": { 406 | "maxTokens": 30720, 407 | "contextWindow": 32768, 408 | "supportsImages": false, 409 | "supportsPromptCache": false, 410 | "inputPrice": 0.0112, 411 | "outputPrice": 0.0448, 412 | "cacheWritesPrice": 0.0056, 413 | "cacheReadsPrice": 0.0224 414 | } 415 | }, 416 | "mistral": { 417 | "mistral-large-2411": { 418 | "maxTokens": 131000, 419 | "contextWindow": 131000, 420 | "supportsImages": false, 421 | "supportsPromptCache": false, 422 | "inputPrice": 2.0, 423 | "outputPrice": 6.0 424 | }, 425 | "pixtral-large-2411": { 426 | "maxTokens": 131000, 427 | "contextWindow": 131000, 428 | "supportsImages": true, 429 | "supportsPromptCache": false, 430 | "inputPrice": 2.0, 431 | "outputPrice": 6.0 432 | }, 433 | "ministral-3b-2410": { 434 | "maxTokens": 131000, 435 | "contextWindow": 131000, 436 | "supportsImages": false, 437 | "supportsPromptCache": false, 438 | "inputPrice": 0.04, 439 | "outputPrice": 0.04 440 | }, 441 | "ministral-8b-2410": { 442 | "maxTokens": 131000, 443 | "contextWindow": 131000, 444 | "supportsImages": false, 445 | "supportsPromptCache": false, 446 | "inputPrice": 0.1, 447 | "outputPrice": 0.1 448 | }, 449 | "mistral-small-2501": { 450 | "maxTokens": 32000, 451 | "contextWindow": 32000, 452 | "supportsImages": false, 453 | "supportsPromptCache": false, 454 | "inputPrice": 0.1, 455 | "outputPrice": 0.3 456 | }, 457 | "pixtral-12b-2409": { 458 | "maxTokens": 131000, 459 | "contextWindow": 131000, 460 | "supportsImages": true, 461 | "supportsPromptCache": false, 462 | "inputPrice": 0.15, 463 | "outputPrice": 0.15 464 | }, 465 | "open-mistral-nemo-2407": { 466 | "maxTokens": 131000, 467 | "contextWindow": 131000, 468 | "supportsImages": false, 469 | "supportsPromptCache": false, 470 | "inputPrice": 0.15, 471 | "outputPrice": 0.15 472 | }, 473 | "open-codestral-mamba": { 474 | "maxTokens": 256000, 475 | "contextWindow": 256000, 476 | "supportsImages": false, 477 | "supportsPromptCache": false, 478 | "inputPrice": 0.15, 479 | "outputPrice": 0.15 480 | }, 481 | "codestral-2501": { 482 | "maxTokens": 256000, 483 | "contextWindow": 256000, 484 | "supportsImages": false, 485 | "supportsPromptCache": false, 486 | "inputPrice": 0.3, 487 | "outputPrice": 0.9 488 | } 489 | }, 490 | "litellm": { 491 | "liteLlmDefaultModelId": "gpt-3.5-turbo", 492 | "liteLlmModelInfoSaneDefaults": { 493 | "maxTokens": 4096, 494 | "contextWindow": 8192, 495 | "supportsImages": false, 496 | "supportsPromptCache": false, 497 | "inputPrice": 0, 498 | "outputPrice": 0 499 | } 500 | }, 501 | "ollama": {}, 502 | "lmstudio": {}, 503 | "requesty": {}, 504 | "together": {}, 505 | "vscode-lm": {} 506 | } -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2022", 4 | "module": "Node16", 5 | "moduleResolution": "Node16", 6 | "outDir": "./build", 7 | "rootDir": "./src", 8 | "strict": true, 9 | "esModuleInterop": true, 10 | "skipLibCheck": true, 11 | "forceConsistentCasingInFileNames": true 12 | }, 13 | "include": ["src/**/*"], 14 | "exclude": ["node_modules"] 15 | } 16 | --------------------------------------------------------------------------------