├── .eslintrc.json ├── .github ├── ISSUE_TEMPLATE │ ├── 1.bug_report.yml │ ├── 2.feature_request.yml │ └── config.yml ├── dependabot.yml └── workflows │ ├── build.yml │ └── release.yml ├── .gitignore ├── .npmrc ├── .vscode ├── extensions.json ├── launch.json ├── settings.json └── tasks.json ├── .vscodeignore ├── .yarnclean ├── .yarnrc ├── CHANGELOG.md ├── LICENSE ├── README.md ├── images ├── ai-logo-small.png └── ai-logo.png ├── media ├── main.css ├── main.js ├── mcp-servers.css ├── mcp-servers.js ├── mcp.svg ├── prompt-manager.css ├── prompt-manager.js ├── reasoning.js ├── tool-call.css ├── tool-call.js └── vendor │ ├── highlight.min.css │ ├── highlight.min.js │ ├── jquery-3.5.1.min.js │ ├── jquery-ui.css │ ├── jquery-ui.min.js │ ├── marked.min.js │ ├── tailwindcss.3.2.4.min.js │ └── turndown.js ├── package.json ├── src ├── chatgpt-view-provider.ts ├── deepclaude.ts ├── extension.ts ├── github-copilot.ts ├── lib.dom.d.ts ├── llms.ts ├── logger.ts ├── mcp-server-provider.ts ├── mcp.ts ├── model-config.ts ├── openai-legacy.ts ├── openai.ts ├── prompt-based-chat.ts ├── prompt-based-tools.ts ├── prompt-manager-provider.ts ├── tool-call-parser.ts ├── types.ts └── utils.ts ├── tsconfig.json ├── vsc-extension-quickstart.md └── yarn.lock /.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "root": true, 3 | "parser": "@typescript-eslint/parser", 4 | "parserOptions": { 5 | "ecmaVersion": 6, 6 | "sourceType": "module" 7 | }, 8 | "plugins": [ 9 | "@typescript-eslint" 10 | ], 11 | "rules": { 12 | "@typescript-eslint/naming-convention": "warn", 13 | "@typescript-eslint/semi": "warn", 14 | "curly": "warn", 15 | "eqeqeq": "warn", 16 | "no-throw-literal": "warn", 17 | "semi": "off" 18 | }, 19 | "ignorePatterns": [ 20 | "out", 21 | "dist", 22 | "**/*.d.ts" 23 | ] 24 | } 25 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/1.bug_report.yml: -------------------------------------------------------------------------------- 1 | name: 🐞 Bug Report 2 | description: Create a bug report 3 | labels: ["bug"] 4 | body: 5 | - type: markdown 6 | attributes: 7 | value: | 8 | Thanks for using the extension and taking the time to fill out this bug report! 9 | - type: textarea 10 | attributes: 11 | label: Describe the Bug 12 | description: A clear description of what the bug is. Please make sure to list steps to reproduce your issue. Please share your OS, VS Code details as well. You could details of your VS Code via (Help->About) 13 | placeholder: | 14 | - Steps to reproduce the bug 15 | - ... 16 | - OS and version: [i.e. macOS Ventura (version 13)] 17 | - VS Code details: [i.e. 1.76.0] 18 | validations: 19 | required: true 20 | - type: textarea 21 | attributes: 22 | label: "Please tell us if you have customized any of the extension settings or whether you are using the defaults." 23 | description: Please list whether you use `Browser Auto-login` or `OpenAI API Key` method. Which model you are using i.e. `gpt-4o` and the parameters you may have customized in your settings. You could find all of the customized settings in your `Settings.json` 24 | validations: 25 | required: true 26 | - type: textarea 27 | attributes: 28 | label: Additional context 29 | description: Add any other context about the problem here. Please provide screenshots or screen recordings if possible. 30 | validations: 31 | required: false 32 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/2.feature_request.yml: -------------------------------------------------------------------------------- 1 | name: 💡 Feature Request 2 | description: Suggest an idea 3 | labels: ["enhancement"] 4 | body: 5 | - type: markdown 6 | attributes: 7 | value: | 8 | Thanks for using the extension and considering suggesting an idea 9 | - type: textarea 10 | attributes: 11 | label: Describe the feature 12 | description: What would you like to see added / supported? 13 | validations: 14 | required: true 15 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: 💭 Join the Discord 4 | url: https://discord.gg/GuEdNDHQaM 5 | about: Ask questions and discuss with other community members 6 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "npm" # See documentation for possible values 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "daily" 12 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | push: 7 | branches: 8 | - main 9 | 10 | jobs: 11 | build: 12 | permissions: 13 | contents: read 14 | strategy: 15 | matrix: 16 | os: [macos-latest, ubuntu-latest, windows-latest] 17 | runs-on: ${{ matrix.os }} 18 | steps: 19 | - name: Checkout 20 | uses: actions/checkout@v3 21 | - name: Install Node.js 22 | uses: actions/setup-node@v3 23 | with: 24 | node-version: latest 25 | - run: yarn install 26 | - run: yarn run build 27 | - run: yarn run package 28 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | tags: 6 | - "v*.*.*" 7 | 8 | jobs: 9 | release: 10 | permissions: 11 | contents: read 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@v3 16 | - name: Install Node.js 17 | uses: actions/setup-node@v3 18 | with: 19 | node-version: latest 20 | - run: yarn install 21 | - run: yarn run package 22 | - run: yarn run publish 23 | env: 24 | VSCE_PAT: ${{ secrets.VSCE_PAT }} 25 | - run: npx ovsx publish -p ${OPEN_VSX_TOKEN} 26 | env: 27 | OPEN_VSX_TOKEN: ${{ secrets.OPEN_VSX_TOKEN }} 28 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | out 2 | dist 3 | node_modules 4 | package-lock.json 5 | .vscode-test/ 6 | *.vsix 7 | .DS_Store 8 | !node_modules/chatgpt/build/index.js 9 | 10 | .yarn/* 11 | !.yarn/releases 12 | !.yarn/plugins 13 | !.yarn/sdks 14 | !.yarn/versions 15 | .pnp.* 16 | -------------------------------------------------------------------------------- /.npmrc: -------------------------------------------------------------------------------- 1 | registry=https://registry.npmjs.org/ 2 | -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | // See http://go.microsoft.com/fwlink/?LinkId=827846 3 | // for the documentation about the extensions.json format 4 | "recommendations": [ 5 | "dbaeumer.vscode-eslint", 6 | "esbenp.prettier-vscode", 7 | "connor4312.esbuild-problem-matchers" 8 | ] 9 | } -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | // A launch configuration that compiles the extension and then opens it inside a new window 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | { 6 | "version": "0.2.0", 7 | "configurations": [ 8 | { 9 | "name": "Run Extension", 10 | "type": "extensionHost", 11 | "request": "launch", 12 | "args": [ 13 | "--extensionDevelopmentPath=${workspaceFolder}" 14 | ], 15 | "outFiles": [ 16 | "${workspaceFolder}/out/**/*.js" 17 | ], 18 | "preLaunchTask": "npm: watch" 19 | }, 20 | { 21 | "name": "Extension Tests", 22 | "type": "extensionHost", 23 | "request": "launch", 24 | "args": [ 25 | "--extensionDevelopmentPath=${workspaceFolder}", 26 | "--extensionTestsPath=${workspaceFolder}/out/test/suite/index" 27 | ], 28 | "outFiles": [ 29 | "${workspaceFolder}/out/test/**/*.js" 30 | ], 31 | "preLaunchTask": "${defaultBuildTask}" 32 | } 33 | ] 34 | } -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "files.exclude": { 3 | "out": false 4 | }, 5 | "search.exclude": { 6 | "out": true 7 | }, 8 | "typescript.tsc.autoDetect": "off", 9 | "editor.formatOnSave": true, 10 | "editor.formatOnPaste": true, 11 | "editor.tabSize": 4, 12 | "editor.insertSpaces": true, 13 | "editor.codeActionsOnSave": { 14 | "source.fixAll": "explicit", 15 | "source.organizeImports": "explicit" 16 | }, 17 | "javascript.format.semicolons": "insert", 18 | "typescript.format.semicolons": "insert", 19 | "javascript.preferences.quoteStyle": "double", 20 | "[typescript]": { 21 | "editor.defaultFormatter": "vscode.typescript-language-features", 22 | "typescript.preferences.quoteStyle": "double", 23 | }, 24 | "[javascript]": { 25 | "editor.defaultFormatter": "vscode.typescript-language-features" 26 | }, 27 | "[json]": { 28 | "editor.defaultFormatter": "vscode.json-language-features" 29 | }, 30 | "[jsonc]": { 31 | "editor.defaultFormatter": "vscode.json-language-features" 32 | }, 33 | "[css]": { 34 | "editor.defaultFormatter": "vscode.css-language-features" 35 | }, 36 | } -------------------------------------------------------------------------------- /.vscode/tasks.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "2.0.0", 3 | "tasks": [ 4 | { 5 | "type": "npm", 6 | "script": "watch", 7 | "group": "build", 8 | "problemMatcher": "$esbuild-watch", 9 | "isBackground": true, 10 | "label": "npm: watch", 11 | }, 12 | { 13 | "type": "npm", 14 | "script": "build", 15 | "group": "build", 16 | "problemMatcher": "$esbuild", 17 | "label": "npm: build", 18 | } 19 | ] 20 | } -------------------------------------------------------------------------------- /.vscodeignore: -------------------------------------------------------------------------------- 1 | .vscode/** 2 | .vscode-test/** 3 | node_modules/** 4 | src/** 5 | .github 6 | .gitignore 7 | .yarnrc 8 | vsc-extension-quickstart.md 9 | **/tsconfig.json 10 | **/.eslintrc.json 11 | **/*.map 12 | **/*.ts 13 | images/** 14 | temp/** 15 | .vsix 16 | !images/*.png 17 | !images/*.svg 18 | -------------------------------------------------------------------------------- /.yarnclean: -------------------------------------------------------------------------------- 1 | # test directories 2 | __tests__ 3 | test 4 | tests 5 | powered-test 6 | 7 | # asset directories 8 | docs 9 | website 10 | images 11 | assets 12 | 13 | # examples 14 | example 15 | examples 16 | 17 | # code coverage directories 18 | coverage 19 | .nyc_output 20 | 21 | # build scripts 22 | Makefile 23 | Gulpfile.js 24 | Gruntfile.js 25 | 26 | # configs 27 | appveyor.yml 28 | circle.yml 29 | codeship-services.yml 30 | codeship-steps.yml 31 | wercker.yml 32 | .tern-project 33 | .gitattributes 34 | .editorconfig 35 | .*ignore 36 | .eslintrc 37 | .jshintrc 38 | .flowconfig 39 | .documentup.json 40 | .yarn-metadata.json 41 | .travis.yml 42 | 43 | # misc 44 | *.md 45 | -------------------------------------------------------------------------------- /.yarnrc: -------------------------------------------------------------------------------- 1 | --ignore-engines true -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # CHANGELOG 2 | 3 | ## v4.9.2 4 | 5 | * Add prompt-based tool calling 6 | * Enable tool calling for Github Copilot models 7 | 8 | ## v4.9.1 9 | 10 | * fix: terminate conversation gracefully on error 11 | 12 | ## v4.9.0 13 | 14 | * Refine tool call UX for MCP 15 | * Add new claude 4 and Gemini 2.5 Pro models 16 | * Fix tool call schema issues for OpenAI/AzureOpenAI o-series models 17 | 18 | ## v4.8.3 19 | 20 | * Add request headers 21 | 22 | ## v4.8.2 23 | 24 | * CVE fixes for various packages 25 | 26 | ## v4.8.1 27 | 28 | * feat: add env config options for mcp servers 29 | 30 | ## v4.8.0 31 | 32 | * Update LOGO and fix logger 33 | 34 | ## v4.7.3 35 | 36 | * Fix reasoning model for deepclaude mode 37 | 38 | ## v4.7.2 39 | 40 | * feat: add agent max steps when enabling MCP servers 41 | 42 | ## v4.7.0 43 | 44 | * Added Model Context Protocol (MCP) integration. 45 | 46 | ## v4.6.9 47 | 48 | * Add support for Github Copilot models. 49 | 50 | ## v4.6.8 51 | 52 | * Remove the default system prompt. 53 | * Fix the reasoning model bug. 54 | * Add support for Azure AI inference endpoint (baseURL: ). 55 | 56 | ## v4.6.7 57 | 58 | * Add new reasoning model configurations and added support of DeepClaude mode (DeepSeek + Claude). 59 | 60 | ## v4.6.6 61 | 62 | * Add support for more AI/LLM providers 63 | * Show reasoning response for deepseek/o1/o3 64 | 65 | ## v4.6.5 66 | 67 | * feat: add support for reasoning models (DeepSeek R1 and o3-mini) 68 | 69 | ## v4.6.4 70 | 71 | * doc: refine startup doc 72 | 73 | ## v4.6.3 74 | 75 | * feat: chat with files (including text files and images) 76 | 77 | ## v4.6.2 78 | 79 | * feat: set selected prompt as system prompt 80 | * fix markdown rendering issue 81 | * bump dependencies for security fixes 82 | 83 | ## v4.6.1 84 | 85 | * Add a set of new models from OpenAI, Gemini and Claude, including o1, claude 3.5 and gemini-2.0-flash-thinking models. 86 | * Bump default model to gpt-4o from gpt-3.5-turbo. 87 | 88 | ## v4.6.0 89 | 90 | * Add support of prompt manager and chat with your own prompts (use # to search) 91 | 92 | ## v4.5.1 93 | 94 | * Fix the version compatibility issue 95 | 96 | ## v4.5.0 97 | 98 | * Add support of Google Generative AI models 99 | * Refine the project and reduce extension size (web searching is temporally removed and will be added back in the future) 100 | 101 | ## v4.4.4 102 | 103 | * Add support for Anthropic Claude 3.5 104 | 105 | ## v4.4.3 106 | 107 | * Support GPT-4o 108 | 109 | ## v4.4.2 110 | 111 | * Support custom model names for local or self-hosted LLMs 112 | 113 | ## v4.4.1 114 | 115 | * Add support of Serper and Bing search 116 | * Add searching support for Claude models 117 | 118 | ## v4.4.0 119 | 120 | * Add support for Anthropic Claude 3 121 | 122 | ## v4.3.1 123 | 124 | * Add support for Google Custom Search 125 | * Set "chatgpt.gpt3.googleCSEApiKey" and "chatgpt.gpt3.googleCSEId" to enable this feature 126 | 127 | ## v4.3.0 128 | 129 | * Add support of customized baseURL 130 | * Bump depdencies and fix CVEs 131 | * Add support of latest OpenAI models (e.g. GPT-4 Turbo and so on) 132 | * Add support of reading OpenAI Key from environment variable "OPENAI_API_KEY" when it is not set in vscode configure file 133 | * Add a few samples for typical configurations 134 | 135 | ## v4.2.0 136 | 137 | * Switch to a new LOGO 138 | * Added streaming output support 139 | * Bump dependencies to fix the potential security issues 140 | 141 | ## v4.1.3 142 | 143 | * Bump dependencies to fix the potential security issues 144 | 145 | ## v4.1.2 146 | 147 | * Bump dependencies to fix the potential security issues 148 | 149 | ## v4.1.1 150 | 151 | * Fix the default base URL for OpenAI 152 | 153 | ## v4.1.0 154 | 155 | * Cleanup the unused parameters and functions 156 | * Fix the conversation stuck issues 157 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | ISC License 2 | 3 | Copyright (c) 2022, Ali Gençay 4 | Copyright (c) 2023-Present, Pengfei Ni 5 | 6 | Permission to use, copy, modify, and/or distribute this software for any 7 | purpose with or without fee is hereby granted, provided that the above 8 | copyright notice and this permission notice appear in all copies. 9 | 10 | THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 | WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 | MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 | ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 | WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 | ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 | OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |


An VS Code ChatGPT Copilot Extension

2 | 3 |

4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 |

14 | 15 | ## The Most Loved Open-Source ChatGPT Extension for VS Code 16 | 17 | ChatGPT Copilot is a powerful and telemetry-free extension for Visual Studio Code, bringing the capabilities of ChatGPT directly into your coding environment. 18 | 19 | ## Features 20 | 21 | - 🤖 Supports GPT-4, o1, Claude, Gemini, Ollama, Github and other OpenAI-compatible local models with your API key from OpenAI, Azure OpenAI Service, Google, Anthropic or other providers. 22 | - 💥 Model Context Protocol (MCP) to bring your own tools and DeepClaude (DeepSeek R1 + Claude) mode for best AI responses. 23 | - 📂 Chat with your Files: Add multiple files and images to your chat using `@` for seamless collaboration. 24 | - 📃 Streaming Answers: Receive real-time responses to your prompts in the sidebar conversation window. 25 | - 📖 Prompt Manager: Chat with your own prompts (use # to search). 26 | - 🔥 Tool calls via prompt parsing for models that don't support native tool calling. 27 | - 📝 Code Assistance: Create files or fix your code with one click or keyboard shortcuts. 28 | - ➡️ Export Conversations: Export all your conversation history at once in Markdown format. 29 | - 📰 Custom Prompt Prefixes: Customize what you are asking ChatGPT with ad-hoc prompt prefixes. 30 | - 💻 Seamless Code Integration: Copy, insert, or create new files directly from ChatGPT's code suggestions. 31 | - ➕ Editable Prompts: Edit and resend previous prompts. 32 | - 🛡️ Telemetry Free: No usage data is collected. 33 | 34 | ## Recent Release Highlights 35 | 36 | * **v4.9**: Add prompt based tool calls for models that don't support native tool calling. 37 | * **v4.8**: New LOGO and new models. 38 | * **v4.7**: Added Model Context Protocol (MCP) integration. 39 | * **v4.6**: Added prompt manager, DeepClaude mode (DeepSeek + Claude) mode, Github Copilot provider and chat with files. 40 | 41 | ## Installation 42 | 43 | - Install the extension from the [Visual Studio Marketplace](https://marketplace.visualstudio.com/items?itemName=feiskyer.chatgpt-copilot) or search `ChatGPT Copilot` in VScode Extensions and click install. 44 | - Reload Visual Studio Code after installation. 45 | 46 | ## Supported Models & Providers 47 | 48 | ### **AI Providers** 49 | 50 | The extension supports major AI providers with hundreds of models: 51 | 52 | | Provider | Models | Special Features | 53 | | -------- | ------ | ---------------- | 54 | | **OpenAI** | GPT-4o, GPT-4, GPT-3.5-turbo, o1, o3, o4-mini | Reasoning models, function calling | 55 | | **Anthropic** | Claude Sonnet 4, Claude 3.5 Sonnet, Claude Opus 4 | Advanced reasoning, large context | 56 | | **Google** | Gemini 2.5 Pro, Gemini 2.0 Flash, Gemini Pro | Search grounding, multimodal | 57 | | **GitHub Copilot** | GPT-4o, Claude Sonnet 4, o3-mini, Gemini 2.5 Pro | Built-in VS Code authentication | 58 | | **DeepSeek** | DeepSeek R1, DeepSeek Reasoner | Advanced reasoning capabilities | 59 | | **Azure OpenAI** | GPT-4o, GPT-4, o1 | Enterprise-grade security | 60 | | **Azure AI** | Various non-OpenAI models | Microsoft's AI model hub | 61 | | **Ollama** | Llama, Qwen, CodeLlama, Mistral | Local model execution | 62 | | **Groq** | Llama, Mixtral, Gemma | Ultra-fast inference | 63 | | **Perplexity** | Llama, Mistral models | Web-enhanced responses | 64 | | **xAI** | Grok models | Real-time information | 65 | | **Mistral** | Mistral Large, Codestral | Code-specialized models | 66 | | **Together** | Various open-source models | Community models | 67 | | **OpenRouter** | 200+ models | Access to multiple providers | 68 | 69 | ## AI Services 70 | 71 | Configure the extension by setting your API keys and preferences in the settings. 72 | 73 | | Configuration | Description | 74 | | ------------- | ----------- | 75 | | API Key | Required, get from [OpenAI](https://platform.openai.com/account/api-keys), [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service), [Anthropic](https://console.anthropic.com/settings/keys) or other AI services | 76 | | API Base URL | Optional, default to "" | 77 | | Model | Optional, default to "gpt-4o" | 78 | 79 | Refer to the following sections for more details on configuring various AI services. 80 | 81 |
82 | 83 | OpenAI 84 | 85 | > **Special notes for ChatGPT users**: 86 | > OpenAI API is billed separately from ChatGPT App. You need to add credits to your OpenAI for API usage [here](https://platform.openai.com/settings/organization/billing/overview). Once you add credits to your API, create a new api key and it should work. 87 | 88 | | Configuration | Example | 89 | | ------------- | ----------- | 90 | | API Key | your-api-key | 91 | | Model | gpt-4o | 92 | | API Base URL | (Optional) | 93 | 94 |
95 | 96 |
97 | Ollama 98 | 99 | Pull your image first from Ollama [library](https://ollama.com/library) and then setup the base URL and custom model. 100 | 101 | | Configuration | Example | 102 | | ------------- | ----------- | 103 | | API Key | ollama (Optional) | 104 | | Model | custom | 105 | | Custom Model | qwen2.5 | 106 | | API Base URL | | 107 | 108 |
109 | 110 |
111 | DeepSeek 112 | 113 | Ollama provider: 114 | 115 | | Configuration | Example | 116 | | ------------- | ---------------------------- | 117 | | API Key | ollama (Optional) | 118 | | Model | custom | 119 | | Custom Model | deepseek-r1 | 120 | | API Base URL | | 121 | 122 | DeepSeek provider: 123 | 124 | | Configuration | Example | 125 | | ------------- | -------------------------- | 126 | | API Key | your-deepseek-key | 127 | | Model | deepseek-reasoner | 128 | | API Base URL | | 129 | 130 | SiliconFlow (SiliconCloud) provider: 131 | 132 | | Configuration | Example | 133 | | ------------- | ----------------------------- | 134 | | API Key | your-siliconflow-key | 135 | | Model | custom | 136 | | Custom Model | deepseek-ai/DeepSeek-R1 | 137 | | API Base URL | | 138 | 139 | Azure AI Foundry provider: 140 | 141 | | Configuration | Example | 142 | | ------------- | ---------------------------------------------------- | 143 | | API Key | your-azure-ai-key | 144 | | Model | DeepSeek-R1 | 145 | | API Base URL | https://[endpoint-name].[region].models.ai.azure.com | 146 | 147 |
148 | 149 |
150 | Anthropic Claude 151 | 152 | | Configuration | Example | 153 | | ------------- | ----------- | 154 | | API Key | your-api-key | 155 | | Model | claude-3-sonnet-20240229 | 156 | | API Base URL | (Optional) | 157 | 158 |
159 | 160 |
161 | Google Gemini 162 | 163 | | Configuration | Example | 164 | | ------------- | ----------- | 165 | | API Key | your-api-key | 166 | | Model | gemini-2.0-flash-thinking-exp-1219 | 167 | | API Base URL | (Optional) | 168 | 169 |
170 | 171 |
172 | Azure OpenAI 173 | 174 | For Azure OpenAI Service, apiBaseUrl should be set to format `https://[YOUR-ENDPOINT-NAME].openai.azure.com/openai/deployments/[YOUR-DEPLOYMENT-NAME]`. 175 | 176 | | Configuration | Example | 177 | | ------------- | ----------- | 178 | | API Key | your-api-key | 179 | | Model | gpt-4o | 180 | | API Base URL | | 181 | 182 |
183 | 184 |
185 | Github Copilot 186 | 187 | [Github Copilot](https://github.com/features/copilot) is supported with built-in authentication (a popup will ask your permission when using Github Copilot models). 188 | 189 | **Supported Models:** 190 | - **OpenAI Models**: `gpt-3.5-turbo`, `gpt-4`, `gpt-4-turbo`, `gpt-4o`, `gpt-4o-mini`, `gpt-4.1`, `gpt-4.5` 191 | - **Reasoning Models**: `o1-ga`, `o3-mini`, `o3`, `o4-mini` 192 | - **Claude Models**: `claude-3.5-sonnet`, `claude-3.7-sonnet`, `claude-3.7-sonnet-thought`, `claude-sonnet-4`, `claude-opus-4` 193 | - **Gemini Models**: `gemini-2.0-flash`, `gemini-2.5-pro` 194 | 195 | | Configuration | Example | 196 | | ------------- | ----------- | 197 | | Provider | GitHubCopilot | 198 | | API Key | github | 199 | | Model | custom | 200 | | Custom Model | claude-sonnet-4 | 201 | 202 |
203 | 204 |
205 | Github Models 206 | 207 | For [Github Models](https://github.com/marketplace/models), get your Github token from [here](https://github.com/settings/tokens). 208 | 209 | | Configuration | Example | 210 | | ------------- | ----------- | 211 | | API Key | your-github-token | 212 | | Model | o1 | 213 | | API Base URL | | 214 | 215 |
216 | 217 |
218 | OpenAI compatible Models 219 | 220 | To use OpenAI compatible APIs, you need to set a custom model name: set model to `"custom"` and then specify your custom model name. 221 | 222 | Example for [groq](https://console.groq.com/): 223 | 224 | | Configuration | Example | 225 | | ------------- | ----------- | 226 | | API Key | your-groq-key | 227 | | Model | custom | 228 | | Custom Model | mixtral-8x7b-32768 | 229 | | API Base URL | | 230 | 231 |
232 | 233 |
234 | DeepClaude (DeepSeek + Claude) 235 | 236 | | Configuration | Example | 237 | | ------------- | ----------- | 238 | | API Key | your-api-key | 239 | | Model | claude-3-sonnet-20240229 | 240 | | API Base URL | (Optional) | 241 | | Reasoning API Key | your-deepseek-api-key| 242 | | Reasoning Model | deepseek-reasoner (or deepseek-r1 regarding to your provider) | 243 | | Reasoning API Base URL | (or your own base URL) | 244 | 245 |
246 | 247 | ## Commands & Keyboard Shortcuts 248 | 249 | The extension provides various commands accessible through the Command Palette (`Ctrl+Shift+P` / `Cmd+Shift+P`) and keyboard shortcuts. 250 | 251 |
252 | 253 | Context Menu Commands 254 | 255 | ### **Context Menu Commands** (Right-click on selected code) 256 | | Command | Keyboard Shortcut | Description | 257 | | ------- | ----------------- | ----------- | 258 | | **Generate Code** | `Ctrl+Shift+A` / `Cmd+Shift+A` | Generate code based on comments or requirements | 259 | | **Add Tests** | `Ctrl+K Ctrl+Shift+1` / `Cmd+K Cmd+Shift+1` | Generate unit tests for selected code | 260 | | **Find Problems** | `Ctrl+K Ctrl+Shift+2` / `Cmd+K Cmd+Shift+2` | Analyze code for bugs and issues | 261 | | **Optimize** | `Ctrl+K Ctrl+Shift+3` / `Cmd+K Cmd+Shift+3` | Optimize and improve selected code | 262 | | **Explain** | `Ctrl+K Ctrl+Shift+4` / `Cmd+K Cmd+Shift+4` | Explain how the selected code works | 263 | | **Add Comments** | `Ctrl+K Ctrl+Shift+5` / `Cmd+K Cmd+Shift+5` | Add documentation comments to code | 264 | | **Complete Code** | `Ctrl+K Ctrl+Shift+6` / `Cmd+K Cmd+Shift+6` | Complete partial or incomplete code | 265 | | **Ad-hoc Prompt** | `Ctrl+K Ctrl+Shift+7` / `Cmd+K Cmd+Shift+7` | Use custom prompt with selected code | 266 | | **Custom Prompt 1** | `Ctrl+K Ctrl+Shift+8` / `Cmd+K Cmd+Shift+8` | Apply your first custom prompt | 267 | | **Custom Prompt 2** | `Ctrl+K Ctrl+Shift+9` / `Cmd+K Cmd+Shift+9` | Apply your second custom prompt | 268 | 269 |
270 | 271 | 272 |
273 | General Commands 274 | 275 | ### **General Commands** 276 | | Command | Description | 277 | | ------- | ----------- | 278 | | `ChatGPT: Ask anything` | Open input box to ask any question | 279 | | `ChatGPT: Reset session` | Clear current conversation and start fresh | 280 | | `ChatGPT: Clear conversation` | Clear the conversation history | 281 | | `ChatGPT: Export conversation` | Export chat history to Markdown file | 282 | | `ChatGPT: Manage Prompts` | Open prompt management interface | 283 | | `ChatGPT: Toggle Prompt Manager` | Show/hide the prompt manager panel | 284 | | `Add Current File to Chat Context` | Add the currently open file to chat context | 285 | | `ChatGPT: Open MCP Servers` | Manage Model Context Protocol servers | 286 | 287 | 288 |
289 | 290 |
291 | 292 | Prompt Management 293 | 294 | ### **Prompt Management** 295 | - Use `#` followed by prompt name to search and apply saved prompts 296 | - Use `@` to add files to your conversation context 297 | - Access the Prompt Manager through the sidebar for full prompt management 298 | 299 |
300 | 301 | ## Model Context Protocol (MCP) 302 | 303 | The extension supports the **Model Context Protocol (MCP)**, allowing you to extend AI capabilities with custom tools and integrations. 304 | 305 |
306 | 307 | What is MCP? 308 | 309 | ### **What is MCP?** 310 | 311 | MCP enables AI models to securely connect to external data sources and tools, providing: 312 | - **Custom Tools**: Integrate your own tools and APIs 313 | - **Data Sources**: Connect to databases, file systems, APIs, and more 314 | - **Secure Execution**: Sandboxed tool execution environment 315 | - **Multi-Step Workflows**: Agent-like behavior with tool chaining 316 | 317 | ### **MCP Server Types** 318 | The extension supports three types of MCP servers: 319 | 320 | | Type | Description | Use Case | 321 | | ---- | ----------- | -------- | 322 | | **stdio** | Standard input/output communication | Local command-line tools and scripts | 323 | | **sse** | Server-Sent Events over HTTP | Web-based tools and APIs | 324 | | **streamable-http** | HTTP streaming communication | Real-time data sources | 325 | 326 | 327 |
328 | 329 |
330 | 331 | How to configure MCP? 332 | 333 | ### **MCP Configuration** 334 | 1. **Access MCP Manager**: Use `ChatGPT: Open MCP Servers` command or click the MCP icon in the sidebar 335 | 2. **Add MCP Server**: Configure your MCP servers with: 336 | - **Name**: Unique identifier for the server 337 | - **Type**: Choose from stdio, sse, or streamable-http 338 | - **Command/URL**: Executable path or HTTP endpoint 339 | - **Arguments**: Command-line arguments (for stdio) 340 | - **Environment Variables**: Custom environment settings 341 | - **Headers**: HTTP headers (for sse/streamable-http) 342 | 343 | ### **Example MCP Configurations** 344 | 345 | **File System Access (stdio):** 346 | ```json 347 | { 348 | "name": "filesystem", 349 | "type": "stdio", 350 | "command": "npx", 351 | "args": ["-y", "@modelcontextprotocol/server-filesystem", "/path/to/directory"], 352 | "isEnabled": true 353 | } 354 | ``` 355 | 356 | **Web Search (sse):** 357 | ```json 358 | { 359 | "name": "web-search", 360 | "type": "sse", 361 | "url": "https://api.example.com/mcp/search", 362 | "headers": {"Authorization": "Bearer your-token"}, 363 | "isEnabled": true 364 | } 365 | ``` 366 | 367 |
368 | 369 |
370 | Agent Mode 371 | 372 | ### **Agent Mode** 373 | 374 | When MCP servers are enabled, the extension operates in **Agent Mode**: 375 | - **Max Steps**: Configure up to 15 tool execution steps 376 | - **Tool Chaining**: Automatic multi-step workflows 377 | - **Error Handling**: Robust error recovery and retry logic 378 | - **Progress Tracking**: Real-time tool execution feedback 379 | 380 |
381 | 382 | ## Configurations 383 | 384 |
385 | 386 | Full list of configuration options 387 | 388 | ### **Core Configuration** 389 | | Setting | Default | Description | 390 | | ------- | ------- | ----------- | 391 | | `chatgpt.gpt3.provider` | `Auto` | AI Provider: Auto, OpenAI, Azure, AzureAI, Anthropic, GitHubCopilot, Google, Mistral, xAI, Together, DeepSeek, Groq, Perplexity, OpenRouter, Ollama | 392 | | `chatgpt.gpt3.apiKey` | | API key for your chosen provider | 393 | | `chatgpt.gpt3.apiBaseUrl` | `https://api.openai.com/v1` | API base URL for your provider | 394 | | `chatgpt.gpt3.model` | `gpt-4o` | Model to use for conversations | 395 | | `chatgpt.gpt3.customModel` | | Custom model name when using `custom` model option | 396 | | `chatgpt.gpt3.organization` | | Organization ID (OpenAI only) | 397 | 398 | ### **Model Parameters** 399 | | Setting | Default | Description | 400 | | ------- | ------- | ----------- | 401 | | `chatgpt.gpt3.maxTokens` | `0` (unlimited) | Maximum tokens to generate in completion | 402 | | `chatgpt.gpt3.temperature` | `1` | Sampling temperature (0-2). Higher = more creative | 403 | | `chatgpt.gpt3.top_p` | `1` | Nucleus sampling parameter (0-1) | 404 | | `chatgpt.systemPrompt` | | System prompt for the AI assistant | 405 | 406 | ### **DeepClaude (Reasoning + Chat) Configuration** 407 | | Setting | Default | Description | 408 | | ------- | ------- | ----------- | 409 | | `chatgpt.gpt3.reasoning.provider` | `Auto` | Provider for reasoning model (Auto, OpenAI, Azure, AzureAI, Google, DeepSeek, Groq, OpenRouter, Ollama) | 410 | | `chatgpt.gpt3.reasoning.apiKey` | | API key for reasoning model | 411 | | `chatgpt.gpt3.reasoning.apiBaseUrl` | `https://api.openai.com/v1` | API base URL for reasoning model | 412 | | `chatgpt.gpt3.reasoning.model` | | Model to use for reasoning (e.g., deepseek-reasoner, o1) | 413 | | `chatgpt.gpt3.reasoning.organization` | | Organization ID for reasoning model (OpenAI only) | 414 | 415 | ### **Agent & MCP Configuration** 416 | | Setting | Default | Description | 417 | | ------- | ------- | ----------- | 418 | | `chatgpt.gpt3.maxSteps` | `15` | Maximum steps for agent mode when using MCP servers | 419 | 420 | ### **Feature Toggles** 421 | | Setting | Default | Description | 422 | | ------- | ------- | ----------- | 423 | | `chatgpt.gpt3.generateCode-enabled` | `true` | Enable code generation context menu | 424 | | `chatgpt.gpt3.searchGrounding.enabled` | `false` | Enable search grounding (Gemini models only) | 425 | 426 | ### **Prompt Prefixes & Context Menu** 427 | | Setting | Default | Description | 428 | | ------- | ------- | ----------- | 429 | | `chatgpt.promptPrefix.addTests` | `Implement tests for the following code` | Prompt for generating unit tests | 430 | | `chatgpt.promptPrefix.addTests-enabled` | `true` | Enable "Add Tests" context menu item | 431 | | `chatgpt.promptPrefix.findProblems` | `Find problems with the following code` | Prompt for finding bugs and issues | 432 | | `chatgpt.promptPrefix.findProblems-enabled` | `true` | Enable "Find Problems" context menu item | 433 | | `chatgpt.promptPrefix.optimize` | `Optimize the following code` | Prompt for code optimization | 434 | | `chatgpt.promptPrefix.optimize-enabled` | `true` | Enable "Optimize" context menu item | 435 | | `chatgpt.promptPrefix.explain` | `Explain the following code` | Prompt for code explanation | 436 | | `chatgpt.promptPrefix.explain-enabled` | `true` | Enable "Explain" context menu item | 437 | | `chatgpt.promptPrefix.addComments` | `Add comments for the following code` | Prompt for adding documentation | 438 | | `chatgpt.promptPrefix.addComments-enabled` | `true` | Enable "Add Comments" context menu item | 439 | | `chatgpt.promptPrefix.completeCode` | `Complete the following code` | Prompt for code completion | 440 | | `chatgpt.promptPrefix.completeCode-enabled` | `true` | Enable "Complete Code" context menu item | 441 | | `chatgpt.promptPrefix.adhoc-enabled` | `true` | Enable "Ad-hoc Prompt" context menu item | 442 | | `chatgpt.promptPrefix.customPrompt1` | | Your first custom prompt template | 443 | | `chatgpt.promptPrefix.customPrompt1-enabled` | `false` | Enable first custom prompt in context menu | 444 | | `chatgpt.promptPrefix.customPrompt2` | | Your second custom prompt template | 445 | | `chatgpt.promptPrefix.customPrompt2-enabled` | `false` | Enable second custom prompt in context menu | 446 | 447 | ### **User Interface** 448 | | Setting | Default | Description | 449 | | ------- | ------- | ----------- | 450 | | `chatgpt.response.showNotification` | `false` | Show notification when AI responds | 451 | | `chatgpt.response.autoScroll` | `true` | Auto-scroll to bottom when new content is added | 452 | 453 |
454 | 455 | ## How to install locally 456 | 457 |
458 | 459 | Build and install locally 460 | 461 | We highly recommend installing the extension directly from the VS Code Marketplace for the easiest setup and automatic updates. However, for advanced users, building and installing locally is also an option. 462 | 463 | - Install `vsce` if you don't have it on your machine (The Visual Studio Code Extension Manager) 464 | - `npm install --global vsce` 465 | - Run `vsce package` 466 | - Follow the instructions and install manually. 467 | 468 | ```sh 469 | npm run build 470 | npm run package 471 | code --uninstall-extension feiskyer.chatgpt-copilot 472 | code --install-extension chatgpt-copilot-*.vsix 473 | ``` 474 | 475 |
476 | 477 | ## Acknowledgement 478 | 479 |
480 | 481 | Acknowledgements 482 | 483 | Inspired by [gencay/vscode-chatgpt](https://github.com/gencay/vscode-chatgpt) project and made effortlessly accessible thanks to the intuitive client provided by the [Vercel AI Toolkit](https://sdk.vercel.ai), this extension continues the open-source legacy, bringing seamless and robust AI functionalities directly into the editor with telemetry free. 484 | 485 |
486 | 487 | ## License 488 | 489 | This project is released under ISC License - See [LICENSE](LICENSE) for details. Copyright notice and the respective permission notices must appear in all copies. 490 | -------------------------------------------------------------------------------- /images/ai-logo-small.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/feiskyer/chatgpt-copilot/17cab1d3df7794c60e86e5e6e2cfdd2e6e7a0fc7/images/ai-logo-small.png -------------------------------------------------------------------------------- /images/ai-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/feiskyer/chatgpt-copilot/17cab1d3df7794c60e86e5e6e2cfdd2e6e7a0fc7/images/ai-logo.png -------------------------------------------------------------------------------- /media/mcp-servers.css: -------------------------------------------------------------------------------- 1 | :root { 2 | --primary-color: var(--vscode-button-background); 3 | --secondary-color: var(--vscode-button-secondaryBackground); 4 | --accent-color: var(--vscode-activityBarBadge-background); 5 | --text-color: var(--vscode-foreground); 6 | --bg-color: var(--vscode-editor-background); 7 | --bg-light-color: var(--vscode-input-background); 8 | --border-color: var(--vscode-panel-border); 9 | --hover-color: var(--vscode-list-hoverBackground); 10 | --shadow-color: rgba(0, 0, 0, 0.2); 11 | } 12 | 13 | body { 14 | padding: 0; 15 | color: var(--text-color); 16 | font-family: var(--vscode-font-family); 17 | font-size: var(--vscode-font-size); 18 | background-color: var(--bg-color); 19 | } 20 | 21 | /* MCP Servers container */ 22 | .mcp-servers-container { 23 | display: flex; 24 | flex-direction: column; 25 | height: 100%; 26 | width: 100%; 27 | } 28 | 29 | /* Header section */ 30 | .view-header { 31 | display: flex; 32 | align-items: center; 33 | justify-content: space-between; 34 | padding: 10px 15px; 35 | border-bottom: 1px solid var(--border-color); 36 | background-color: var(--bg-color); 37 | } 38 | 39 | .title-container { 40 | display: flex; 41 | align-items: center; 42 | gap: 8px; 43 | } 44 | 45 | .title-icon { 46 | width: 20px; 47 | height: 20px; 48 | } 49 | 50 | .view-header h2 { 51 | margin: 0; 52 | font-size: 14px; 53 | font-weight: 600; 54 | } 55 | 56 | .action-button { 57 | display: flex; 58 | align-items: center; 59 | gap: 6px; 60 | background-color: var(--primary-color); 61 | color: var(--vscode-button-foreground); 62 | border: none; 63 | border-radius: 4px; 64 | padding: 4px 10px; 65 | font-size: 12px; 66 | cursor: pointer; 67 | transition: background-color 0.2s; 68 | } 69 | 70 | .action-button:hover { 71 | background-color: var(--vscode-button-hoverBackground); 72 | } 73 | 74 | .info-panel { 75 | display: flex; 76 | align-items: center; 77 | gap: 10px; 78 | margin: 10px 15px; 79 | padding: 8px 12px; 80 | background-color: var(--bg-light-color); 81 | border-radius: 4px; 82 | border-left: 3px solid var(--accent-color); 83 | } 84 | 85 | .info-icon { 86 | font-size: 16px; 87 | } 88 | 89 | .info-panel p { 90 | margin: 0; 91 | font-size: 12px; 92 | line-height: 1.4; 93 | } 94 | 95 | .learn-more-link { 96 | color: var(--primary-color); 97 | text-decoration: none; 98 | margin-left: 5px; 99 | } 100 | 101 | .learn-more-link:hover { 102 | text-decoration: underline; 103 | } 104 | 105 | .content-area { 106 | flex: 1; 107 | padding: 10px 15px; 108 | overflow: auto; 109 | } 110 | 111 | /* Server list */ 112 | .server-list { 113 | display: flex; 114 | flex-direction: column; 115 | gap: 10px; 116 | } 117 | 118 | .server-item { 119 | display: flex; 120 | flex-direction: column; 121 | padding: 12px; 122 | background-color: var(--bg-light-color); 123 | border-radius: 6px; 124 | transition: box-shadow 0.2s, transform 0.2s; 125 | } 126 | 127 | .server-item:hover { 128 | transform: translateY(-2px); 129 | box-shadow: 0 4px 6px var(--shadow-color); 130 | } 131 | 132 | .server-header { 133 | display: flex; 134 | justify-content: space-between; 135 | align-items: center; 136 | margin-bottom: 8px; 137 | } 138 | 139 | .server-title { 140 | font-weight: 600; 141 | font-size: 14px; 142 | display: flex; 143 | align-items: center; 144 | gap: 8px; 145 | } 146 | 147 | .server-type-badge { 148 | font-size: 10px; 149 | padding: 2px 6px; 150 | border-radius: 10px; 151 | background-color: var(--secondary-color); 152 | color: var(--vscode-button-secondaryForeground); 153 | } 154 | 155 | .server-actions { 156 | display: flex; 157 | gap: 4px; 158 | } 159 | 160 | .icon-button { 161 | background: transparent; 162 | border: none; 163 | cursor: pointer; 164 | padding: 4px; 165 | border-radius: 3px; 166 | color: var(--text-color); 167 | opacity: 0.7; 168 | } 169 | 170 | .icon-button:hover { 171 | background-color: var(--hover-color); 172 | opacity: 1; 173 | } 174 | 175 | .server-details { 176 | font-size: 12px; 177 | line-height: 1.4; 178 | color: var(--vscode-descriptionForeground); 179 | } 180 | 181 | .server-tools { 182 | display: flex; 183 | gap: 6px; 184 | margin-top: 8px; 185 | flex-wrap: wrap; 186 | } 187 | 188 | .tool-tag { 189 | font-size: 10px; 190 | padding: 2px 6px; 191 | border-radius: 10px; 192 | background-color: var(--bg-color); 193 | border: 1px solid var(--border-color); 194 | } 195 | 196 | .server-item.disabled { 197 | opacity: 0.6; 198 | } 199 | 200 | .toggle-switch { 201 | position: relative; 202 | display: inline-block; 203 | width: 36px; 204 | height: 18px; 205 | } 206 | 207 | .toggle-switch input { 208 | opacity: 0; 209 | width: 0; 210 | height: 0; 211 | } 212 | 213 | .toggle-slider { 214 | position: absolute; 215 | cursor: pointer; 216 | top: 0; 217 | left: 0; 218 | right: 0; 219 | bottom: 0; 220 | background-color: var(--border-color); 221 | transition: .4s; 222 | border-radius: 18px; 223 | } 224 | 225 | .toggle-slider:before { 226 | position: absolute; 227 | content: ""; 228 | height: 14px; 229 | width: 14px; 230 | left: 2px; 231 | bottom: 2px; 232 | background-color: white; 233 | transition: .4s; 234 | border-radius: 50%; 235 | } 236 | 237 | input:checked+.toggle-slider { 238 | background-color: var(--primary-color); 239 | } 240 | 241 | input:checked+.toggle-slider:before { 242 | transform: translateX(18px); 243 | } 244 | 245 | .empty-state { 246 | display: flex; 247 | flex-direction: column; 248 | align-items: center; 249 | justify-content: center; 250 | padding: 30px; 251 | text-align: center; 252 | height: 100%; 253 | } 254 | 255 | .empty-icon { 256 | margin-bottom: 15px; 257 | color: var(--vscode-descriptionForeground); 258 | } 259 | 260 | .empty-state p { 261 | margin: 0 0 20px 0; 262 | color: var(--vscode-descriptionForeground); 263 | } 264 | 265 | .primary-action-button { 266 | background-color: var(--primary-color); 267 | color: var(--vscode-button-foreground); 268 | border: none; 269 | border-radius: 4px; 270 | padding: 8px 16px; 271 | font-size: 13px; 272 | cursor: pointer; 273 | transition: background-color 0.2s; 274 | } 275 | 276 | .primary-action-button:hover { 277 | background-color: var(--vscode-button-hoverBackground); 278 | } 279 | 280 | .hidden { 281 | display: none; 282 | } 283 | 284 | /* Dialog/popup styles */ 285 | .dialog-overlay { 286 | position: fixed; 287 | top: 0; 288 | left: 0; 289 | right: 0; 290 | bottom: 0; 291 | background-color: rgba(0, 0, 0, 0.5); 292 | display: flex; 293 | justify-content: center; 294 | align-items: center; 295 | z-index: 100; 296 | } 297 | 298 | .dialog { 299 | background-color: var(--bg-color); 300 | border-radius: 6px; 301 | box-shadow: 0 4px 12px var(--shadow-color); 302 | width: 90%; 303 | max-width: 450px; 304 | max-height: 90vh; 305 | overflow-y: auto; 306 | } 307 | 308 | .dialog-header { 309 | padding: 15px; 310 | border-bottom: 1px solid var(--border-color); 311 | display: flex; 312 | justify-content: space-between; 313 | align-items: center; 314 | } 315 | 316 | .dialog-title { 317 | margin: 0; 318 | font-size: 16px; 319 | font-weight: 600; 320 | } 321 | 322 | .dialog-content { 323 | padding: 15px; 324 | } 325 | 326 | .form-group { 327 | margin-bottom: 15px; 328 | } 329 | 330 | .form-label { 331 | display: block; 332 | margin-bottom: 5px; 333 | font-weight: 500; 334 | } 335 | 336 | .form-input { 337 | width: 100%; 338 | padding: 6px 8px; 339 | background-color: var(--vscode-input-background); 340 | color: var(--vscode-input-foreground); 341 | border: 1px solid var(--vscode-input-border); 342 | border-radius: 4px; 343 | font-size: 13px; 344 | } 345 | 346 | .form-input:focus { 347 | border-color: var(--primary-color); 348 | outline: none; 349 | } 350 | 351 | .dialog-footer { 352 | padding: 10px 15px 15px; 353 | display: flex; 354 | justify-content: flex-end; 355 | gap: 10px; 356 | } 357 | 358 | .dialog-btn { 359 | padding: 6px 12px; 360 | border-radius: 4px; 361 | font-size: 13px; 362 | cursor: pointer; 363 | border: none; 364 | } 365 | 366 | .dialog-btn-secondary { 367 | background-color: transparent; 368 | color: var(--text-color); 369 | border: 1px solid var(--border-color); 370 | } 371 | 372 | .dialog-btn-primary { 373 | background-color: var(--primary-color); 374 | color: var(--vscode-button-foreground); 375 | } -------------------------------------------------------------------------------- /media/mcp.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /media/prompt-manager.css: -------------------------------------------------------------------------------- 1 | /** 2 | * 3 | * @license 4 | * Copyright (c) 2023 - Present, Pengfei Ni 5 | * 6 | * All rights reserved. Code licensed under the ISC license 7 | * 8 | * The above copyright notice and this permission notice shall be included in all 9 | * copies or substantial portions of the Software. 10 | */ 11 | .container { 12 | padding: 20px; 13 | } 14 | 15 | .header { 16 | display: flex; 17 | justify-content: space-between; 18 | align-items: center; 19 | margin-bottom: 20px; 20 | } 21 | 22 | .prompt-item { 23 | background: var(--vscode-editor-background); 24 | border: 1px solid var(--vscode-widget-border); 25 | border-radius: 0.5rem; 26 | padding: 1rem; 27 | transition: all 0.2s; 28 | margin-bottom: 1rem; 29 | } 30 | 31 | .prompt-item:hover { 32 | border-color: var(--vscode-focusBorder); 33 | } 34 | 35 | .prompt-header { 36 | margin-bottom: 0.75rem; 37 | } 38 | 39 | .prompt-title { 40 | font-size: 1rem; 41 | font-weight: 700; 42 | color: var(--vscode-foreground); 43 | } 44 | 45 | .prompt-actions { 46 | display: flex; 47 | gap: 0.5rem; 48 | } 49 | 50 | .prompt-content { 51 | font-size: 0.875rem; 52 | margin-bottom: 0.75rem; 53 | color: var(--vscode-foreground); 54 | opacity: 0.8; 55 | display: -webkit-box; 56 | display: -moz-box; 57 | display: box; 58 | -webkit-line-clamp: 2; 59 | -moz-line-clamp: 2; 60 | line-clamp: 2; 61 | -webkit-box-orient: vertical; 62 | -moz-box-orient: vertical; 63 | box-orient: vertical; 64 | overflow: hidden; 65 | text-overflow: ellipsis; 66 | line-height: 1.5; 67 | max-height: 3em; 68 | } 69 | 70 | .prompt-dialog { 71 | position: fixed; 72 | inset: 0; 73 | display: flex; 74 | align-items: center; 75 | justify-content: center; 76 | z-index: 50; 77 | background: rgba(0, 0, 0, 0.5); 78 | } 79 | 80 | .prompt-dialog-content { 81 | border-radius: 0.5rem; 82 | padding: 1.5rem; 83 | max-width: 42rem; 84 | width: 100%; 85 | margin: 0 1rem; 86 | background: var(--vscode-editor-background); 87 | } 88 | 89 | .prompt-dialog-content h3 { 90 | font-size: 1.125rem; 91 | font-weight: 600; 92 | margin-bottom: 1rem; 93 | color: var(--vscode-foreground); 94 | } 95 | 96 | .form-group { 97 | margin-bottom: 1rem; 98 | } 99 | 100 | .form-group label { 101 | display: block; 102 | margin-bottom: 0.5rem; 103 | font-size: 0.875rem; 104 | font-weight: 500; 105 | color: var(--vscode-foreground); 106 | } 107 | 108 | .form-group input, 109 | .form-group textarea { 110 | width: 100%; 111 | padding: 0.75rem; 112 | border-radius: 0.375rem; 113 | font-size: 0.875rem; 114 | background: var(--vscode-input-background); 115 | color: var(--vscode-input-foreground); 116 | border: 1px solid var(--vscode-input-border); 117 | } 118 | 119 | .form-group textarea { 120 | resize: vertical; 121 | min-height: 120px; 122 | } 123 | 124 | .dialog-buttons { 125 | display: flex; 126 | justify-content: flex-end; 127 | gap: 0.75rem; 128 | margin-top: 1.5rem; 129 | } 130 | 131 | button { 132 | padding: 0.5rem 0.75rem; 133 | border-radius: 0.375rem; 134 | font-size: 0.875rem; 135 | font-weight: 500; 136 | transition: all 0.2s; 137 | display: flex; 138 | align-items: center; 139 | gap: 0.5rem; 140 | } 141 | 142 | button.primary { 143 | background: var(--vscode-button-background); 144 | color: var(--vscode-button-foreground); 145 | } 146 | 147 | button.primary:hover { 148 | background: var(--vscode-button-hoverBackground); 149 | } 150 | 151 | button.secondary { 152 | background: var(--vscode-button-secondaryBackground); 153 | color: var(--vscode-button-secondaryForeground); 154 | } 155 | 156 | button.secondary:hover { 157 | background: var(--vscode-button-secondaryHoverBackground); 158 | } 159 | 160 | .edit-prompt, 161 | .delete-prompt { 162 | font-size: 0.75rem; 163 | padding: 0.375rem; 164 | display: flex; 165 | align-items: center; 166 | justify-content: center; 167 | min-width: 28px; 168 | height: 28px; 169 | } 170 | 171 | .edit-prompt:hover, 172 | .delete-prompt:hover { 173 | opacity: 0.8; 174 | } 175 | 176 | .edit-prompt svg, 177 | .delete-prompt svg { 178 | flex-shrink: 0; 179 | } 180 | 181 | .line-clamp-2 { 182 | display: -webkit-box; 183 | display: -moz-box; 184 | display: box; 185 | -webkit-line-clamp: 2; 186 | -moz-line-clamp: 2; 187 | line-clamp: 2; 188 | -webkit-box-orient: vertical; 189 | -moz-box-orient: vertical; 190 | box-orient: vertical; 191 | overflow: hidden; 192 | text-overflow: ellipsis; 193 | } 194 | 195 | /* 添加 flex 相关样式 */ 196 | .flex { 197 | display: flex; 198 | } 199 | 200 | .flex-col { 201 | flex-direction: column; 202 | } 203 | 204 | .items-center { 205 | align-items: center; 206 | } 207 | 208 | .justify-between { 209 | justify-content: space-between; 210 | } 211 | 212 | .gap-2 { 213 | gap: 0.5rem; 214 | } 215 | 216 | .w-full { 217 | width: 100%; 218 | } 219 | 220 | .h-screen { 221 | height: 100vh; 222 | } 223 | 224 | .overflow-hidden { 225 | overflow: hidden; 226 | } 227 | 228 | .overflow-y-auto { 229 | overflow-y: auto; 230 | } 231 | 232 | .space-y-4>*+* { 233 | margin-top: 1rem; 234 | } 235 | 236 | .p-4 { 237 | padding: 1rem; 238 | } 239 | 240 | .mb-6 { 241 | margin-bottom: 1.5rem; 242 | } -------------------------------------------------------------------------------- /media/prompt-manager.js: -------------------------------------------------------------------------------- 1 | /** 2 | * 3 | * @license 4 | * Copyright (c) 2023 - Present, Pengfei Ni 5 | * 6 | * All rights reserved. Code licensed under the ISC license 7 | * 8 | * The above copyright notice and this permission notice shall be included in all 9 | * copies or substantial portions of the Software. 10 | */ 11 | (function () { 12 | const vscode = acquireVsCodeApi(); 13 | let prompts = []; 14 | 15 | window.addEventListener('message', event => { 16 | const message = event.data; 17 | switch (message.type) { 18 | case 'updatePrompts': 19 | prompts = message.prompts; 20 | renderPrompts(); 21 | break; 22 | } 23 | }); 24 | 25 | function showPromptDialog(existingPrompt) { 26 | const dialog = document.createElement('div'); 27 | dialog.className = 'prompt-dialog'; 28 | dialog.innerHTML = ` 29 |
30 |

${existingPrompt ? 'Edit Prompt' : 'Add New Prompt'}

31 |
32 | 33 | 34 |
35 |
36 | 37 | 38 |
39 |
40 | 41 | 42 |
43 |
44 | `; 45 | document.body.appendChild(dialog); 46 | 47 | const saveButton = dialog.querySelector('#savePrompt'); 48 | const cancelButton = dialog.querySelector('#cancelPrompt'); 49 | const nameInput = /** @type {HTMLInputElement} */ (dialog.querySelector('#promptName')); 50 | const contentInput = /** @type {HTMLTextAreaElement} */ (dialog.querySelector('#promptContent')); 51 | 52 | if (saveButton && nameInput && contentInput) { 53 | saveButton.addEventListener('click', () => { 54 | const name = nameInput.value; 55 | const content = contentInput.value; 56 | 57 | if (!name || !content) { 58 | vscode.postMessage({ type: 'showError', message: 'Name and content are required' }); 59 | return; 60 | } 61 | 62 | if (existingPrompt) { 63 | vscode.postMessage({ 64 | type: 'updatePrompt', 65 | prompt: { 66 | id: existingPrompt.id, 67 | name, 68 | content, 69 | createdAt: existingPrompt.createdAt, 70 | updatedAt: Date.now() 71 | } 72 | }); 73 | } else { 74 | vscode.postMessage({ 75 | type: 'addPrompt', 76 | prompt: { name, content } 77 | }); 78 | } 79 | dialog.remove(); 80 | }); 81 | } 82 | 83 | if (cancelButton) { 84 | cancelButton.addEventListener('click', () => { 85 | dialog.remove(); 86 | }); 87 | } 88 | } 89 | 90 | function renderPrompts() { 91 | const list = document.getElementById('promptList'); 92 | if (!list) return; 93 | 94 | list.innerHTML = prompts.map(prompt => ` 95 |
96 |
97 |
98 |

99 | 100 | 101 | 102 | ${prompt.name} 103 |

104 |
105 | 110 | 115 |
116 |
117 |
118 |
${prompt.content}
119 |
120 | Created: ${new Date(prompt.createdAt).toLocaleString()} 121 |
122 |
123 | `).join(''); 124 | 125 | list.addEventListener('click', (e) => { 126 | if (!e.target) return; 127 | 128 | const element = e.target instanceof Element ? e.target : null; 129 | if (!element) return; 130 | 131 | const button = element.closest('button'); 132 | if (!button) return; 133 | 134 | const promptId = button.getAttribute('data-id'); 135 | if (!promptId) return; 136 | 137 | if (button.classList.contains('edit-prompt')) { 138 | const prompt = prompts.find(p => p.id === promptId); 139 | if (prompt) { 140 | showPromptDialog(prompt); 141 | } 142 | } 143 | 144 | if (button.classList.contains('delete-prompt')) { 145 | vscode.postMessage({ 146 | type: 'deletePrompt', 147 | id: promptId 148 | }); 149 | } 150 | }); 151 | } 152 | 153 | function initialize() { 154 | const addButton = document.getElementById('addPrompt'); 155 | if (addButton) { 156 | addButton.addEventListener('click', () => { 157 | showPromptDialog(); 158 | }); 159 | } 160 | 161 | vscode.postMessage({ type: 'getPrompts' }); 162 | } 163 | 164 | initialize(); 165 | })(); -------------------------------------------------------------------------------- /media/reasoning.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Enhanced Reasoning Block Functionality 3 | * Similar to tool-call.js but for reasoning blocks 4 | */ 5 | 6 | // Global state for reasoning blocks 7 | window.reasoningStates = window.reasoningStates || new Map(); 8 | window.reasoningCounter = window.reasoningCounter || 0; 9 | 10 | /** 11 | * Toggle reasoning block visibility with smooth animation 12 | */ 13 | window.toggleReasoning = function (id) { 14 | const reasoningBlock = document.getElementById(id); 15 | if (!reasoningBlock) { 16 | console.error('Reasoning block not found:', id); 17 | return; 18 | } 19 | 20 | const content = reasoningBlock.querySelector('.reasoning-content'); 21 | const header = reasoningBlock.querySelector('.reasoning-header'); 22 | const chevron = header.querySelector('.reasoning-chevron'); 23 | 24 | if (!content || !chevron) { 25 | console.error('Required reasoning elements not found', { content, chevron }); 26 | return; 27 | } 28 | 29 | // Ensure reasoningStates exists 30 | if (typeof reasoningStates === 'undefined') { 31 | window.reasoningStates = new Map(); 32 | } 33 | 34 | const isCollapsed = content.classList.contains('collapsed'); 35 | 36 | if (isCollapsed) { 37 | // Expand 38 | content.classList.remove('collapsed'); 39 | chevron.style.transform = 'rotate(90deg)'; 40 | reasoningStates.set(id, 'expanded'); 41 | 42 | // Auto-scroll to bottom when expanding 43 | setTimeout(() => { 44 | content.scrollTop = content.scrollHeight; 45 | }, 300); // Wait for expand animation to complete 46 | } else { 47 | // Collapse 48 | content.classList.add('collapsed'); 49 | chevron.style.transform = 'rotate(0deg)'; 50 | reasoningStates.set(id, 'collapsed'); 51 | } 52 | }; 53 | 54 | /** 55 | * Create HTML for reasoning block 56 | */ 57 | window.createReasoningHtml = function (reasoningText, messageId) { 58 | const reasoningId = `${messageId}-reasoning`; 59 | 60 | return ` 61 |
62 |
63 | 64 | 65 | 66 |
67 | 68 | 69 | 70 | Reasoning 71 |
72 |
73 | 76 |
77 | `; 78 | }; 79 | 80 | /** 81 | * Update existing reasoning content and auto-scroll to bottom 82 | */ 83 | window.updateReasoningContent = function (reasoningId, content) { 84 | const contentElement = document.getElementById(`${reasoningId}-content`); 85 | if (contentElement) { 86 | contentElement.innerHTML = content; 87 | 88 | // Auto-scroll to bottom if content is expanded and overflowing 89 | if (!contentElement.classList.contains('collapsed')) { 90 | // Use setTimeout to ensure DOM is updated before scrolling 91 | setTimeout(() => { 92 | contentElement.scrollTop = contentElement.scrollHeight; 93 | }, 0); 94 | } 95 | } 96 | }; 97 | 98 | /** 99 | * Toggle all reasoning blocks (expand/collapse all) 100 | */ 101 | window.toggleAllReasoning = function (expand = null) { 102 | const reasoningBlocks = document.querySelectorAll('.reasoning-block'); 103 | 104 | reasoningBlocks.forEach(block => { 105 | const content = block.querySelector('.reasoning-content'); 106 | const chevron = block.querySelector('.reasoning-chevron'); 107 | 108 | if (expand === null) { 109 | // Auto-detect: if any are expanded, collapse all; otherwise expand all 110 | const hasExpanded = Array.from(reasoningBlocks).some(b => 111 | !b.querySelector('.reasoning-content').classList.contains('collapsed') 112 | ); 113 | expand = !hasExpanded; 114 | } 115 | 116 | if (expand && content.classList.contains('collapsed')) { 117 | content.classList.remove('collapsed'); 118 | chevron.style.transform = 'rotate(90deg)'; 119 | reasoningStates.set(block.id, 'expanded'); 120 | } else if (!expand && !content.classList.contains('collapsed')) { 121 | content.classList.add('collapsed'); 122 | chevron.style.transform = 'rotate(0deg)'; 123 | reasoningStates.set(block.id, 'collapsed'); 124 | } 125 | }); 126 | }; 127 | 128 | /** 129 | * Initialize reasoning blocks when DOM is ready 130 | */ 131 | function setupReasoningProcessing() { 132 | // Set initial state for existing reasoning blocks 133 | const reasoningBlocks = document.querySelectorAll('.reasoning-block'); 134 | reasoningBlocks.forEach(block => { 135 | const content = block.querySelector('.reasoning-content'); 136 | const chevron = block.querySelector('.reasoning-chevron'); 137 | 138 | if (content && chevron) { 139 | // Default to collapsed state 140 | content.classList.add('collapsed'); 141 | chevron.style.transform = 'rotate(0deg)'; 142 | reasoningStates.set(block.id, 'collapsed'); 143 | } 144 | }); 145 | } 146 | 147 | /** 148 | * Set up event delegation for reasoning clicks 149 | */ 150 | function setupReasoningEventDelegation() { 151 | // Handle clicks on reasoning headers 152 | document.body.addEventListener('click', (event) => { 153 | const header = event.target.closest('.reasoning-header'); 154 | if (header) { 155 | // Check if it has inline onclick 156 | if (header.hasAttribute('onclick')) { 157 | // Let inline onclick handle it 158 | return; 159 | } 160 | 161 | event.preventDefault(); 162 | event.stopPropagation(); 163 | 164 | const reasoningBlock = header.closest('.reasoning-block'); 165 | if (reasoningBlock && reasoningBlock.id) { 166 | window.toggleReasoning(reasoningBlock.id); 167 | } 168 | } 169 | }); 170 | } 171 | 172 | /** 173 | * Handle streaming updates for reasoning 174 | */ 175 | function handleStreamingReasoning(reasoningId) { 176 | const reasoningBlock = document.getElementById(reasoningId); 177 | if (!reasoningBlock) return; 178 | 179 | // Add any streaming-specific styling or animations here 180 | const content = reasoningBlock.querySelector('.reasoning-content'); 181 | if (content) { 182 | content.classList.add('reasoning-streaming'); 183 | } 184 | } 185 | 186 | /** 187 | * Finalize reasoning block (remove streaming state) 188 | */ 189 | function finalizeReasoning(reasoningId) { 190 | const reasoningBlock = document.getElementById(reasoningId); 191 | if (!reasoningBlock) return; 192 | 193 | const content = reasoningBlock.querySelector('.reasoning-content'); 194 | if (content) { 195 | content.classList.remove('reasoning-streaming'); 196 | } 197 | } 198 | 199 | // Initialize when DOM is ready 200 | document.addEventListener('DOMContentLoaded', () => { 201 | setupReasoningProcessing(); 202 | setupReasoningEventDelegation(); 203 | }); 204 | 205 | // Export for use in other scripts 206 | window.reasoningEnhanced = { 207 | toggleReasoning, 208 | createReasoningHtml, 209 | updateReasoningContent, 210 | toggleAllReasoning, 211 | handleStreamingReasoning, 212 | finalizeReasoning 213 | }; 214 | -------------------------------------------------------------------------------- /media/tool-call.css: -------------------------------------------------------------------------------- 1 | /* Enhanced Tool Call Styles - Claude-inspired Design */ 2 | 3 | .tool-call-block { 4 | background: var(--vscode-editorWidget-background); 5 | border: 1px solid var(--vscode-editorWidget-border); 6 | border-radius: 8px; 7 | margin: 12px 0; 8 | overflow: hidden; 9 | transition: all 0.2s ease; 10 | } 11 | 12 | .tool-call-block:hover { 13 | border-color: var(--vscode-focusBorder); 14 | } 15 | 16 | .tool-call-header { 17 | padding: 12px 16px; 18 | cursor: pointer; 19 | display: flex; 20 | align-items: center; 21 | gap: 12px; 22 | user-select: none; 23 | background: var(--vscode-editorWidget-background); 24 | border-bottom: 1px solid var(--vscode-editorWidget-border); 25 | transition: background-color 0.2s ease; 26 | } 27 | 28 | .tool-call-header:hover { 29 | background: var(--vscode-list-hoverBackground); 30 | } 31 | 32 | /* Chevron icon for expand/collapse */ 33 | .tool-chevron { 34 | width: 16px; 35 | height: 16px; 36 | transition: transform 0.2s ease; 37 | color: var(--vscode-foreground); 38 | flex-shrink: 0; 39 | } 40 | 41 | /* Tool name and icon container */ 42 | .tool-info { 43 | display: flex; 44 | align-items: center; 45 | gap: 8px; 46 | flex: 1; 47 | } 48 | 49 | .tool-icon { 50 | width: 16px; 51 | height: 16px; 52 | color: var(--vscode-symbolIcon-functionForeground); 53 | flex-shrink: 0; 54 | } 55 | 56 | .tool-name { 57 | font-weight: 500; 58 | color: var(--vscode-foreground); 59 | font-size: 14px; 60 | } 61 | 62 | /* Status badge */ 63 | .tool-status { 64 | padding: 2px 8px; 65 | border-radius: 12px; 66 | font-size: 11px; 67 | font-weight: 500; 68 | text-transform: uppercase; 69 | letter-spacing: 0.5px; 70 | flex-shrink: 0; 71 | } 72 | 73 | .status-running { 74 | background: var(--vscode-editorInfo-background); 75 | color: var(--vscode-editorInfo-foreground); 76 | animation: pulse 1.5s ease-in-out infinite; 77 | } 78 | 79 | .status-success { 80 | background: rgba(34, 197, 94, 0.2); /* Green background */ 81 | color: rgb(34, 197, 94); /* Green text */ 82 | border: 1px solid rgba(34, 197, 94, 0.3); 83 | } 84 | 85 | .status-error { 86 | background: rgba(239, 68, 68, 0.2); /* Red background */ 87 | color: rgb(239, 68, 68); /* Red text */ 88 | border: 1px solid rgba(239, 68, 68, 0.3); 89 | } 90 | 91 | @keyframes pulse { 92 | 93 | 0%, 94 | 100% { 95 | opacity: 0.8; 96 | } 97 | 98 | 50% { 99 | opacity: 1; 100 | } 101 | } 102 | 103 | /* Tool call content area */ 104 | .tool-call-content { 105 | padding: 16px; 106 | background: var(--vscode-editor-background); 107 | overflow-y: auto; 108 | transition: max-height 0.3s ease, opacity 0.3s ease, padding 0.3s ease; 109 | max-height: 600px; 110 | opacity: 1; 111 | } 112 | 113 | .tool-call-content.collapsed { 114 | max-height: 0 !important; 115 | opacity: 0; 116 | padding-top: 0; 117 | padding-bottom: 0; 118 | overflow: hidden; 119 | pointer-events: none; 120 | } 121 | 122 | /* Arguments section */ 123 | .tool-call-args { 124 | margin-bottom: 16px; 125 | } 126 | 127 | .args-header { 128 | display: flex; 129 | align-items: center; 130 | justify-content: space-between; 131 | margin-bottom: 8px; 132 | } 133 | 134 | .section-label { 135 | font-size: 12px; 136 | font-weight: 600; 137 | text-transform: uppercase; 138 | letter-spacing: 0.5px; 139 | color: var(--vscode-descriptionForeground); 140 | } 141 | 142 | .copy-button { 143 | padding: 4px 8px; 144 | font-size: 11px; 145 | background: var(--vscode-button-secondaryBackground); 146 | color: var(--vscode-button-secondaryForeground); 147 | border: none; 148 | border-radius: 4px; 149 | cursor: pointer; 150 | transition: all 0.2s ease; 151 | } 152 | 153 | .copy-button:hover { 154 | background: var(--vscode-button-secondaryHoverBackground); 155 | } 156 | 157 | .copy-button.copy-success { 158 | background: var(--vscode-terminal-ansiGreen); 159 | color: white; 160 | } 161 | 162 | /* Code blocks within tool calls */ 163 | .tool-call-content pre { 164 | margin: 8px 0; 165 | background: var(--vscode-textCodeBlock-background); 166 | border: 1px solid var(--vscode-editorWidget-border); 167 | border-radius: 6px; 168 | overflow-x: auto; 169 | } 170 | 171 | .tool-call-content pre code { 172 | display: block; 173 | padding: 12px; 174 | font-size: 13px; 175 | line-height: 1.5; 176 | font-family: var(--vscode-editor-font-family); 177 | } 178 | 179 | /* Result section */ 180 | .tool-call-result { 181 | border-top: 1px solid var(--vscode-editorWidget-border); 182 | padding-top: 16px; 183 | margin-top: 16px; 184 | } 185 | 186 | .tool-call-result:empty { 187 | display: none; 188 | } 189 | 190 | .tool-call-result .section-label { 191 | font-size: 12px; 192 | font-weight: 600; 193 | text-transform: uppercase; 194 | letter-spacing: 0.5px; 195 | color: var(--vscode-descriptionForeground); 196 | margin-bottom: 8px; 197 | } 198 | 199 | /* Empty state for results */ 200 | .result-empty { 201 | color: var(--vscode-descriptionForeground); 202 | font-style: italic; 203 | padding: 8px 0; 204 | } 205 | 206 | /* Scrollbar styling */ 207 | .tool-call-content::-webkit-scrollbar { 208 | width: 8px; 209 | height: 8px; 210 | } 211 | 212 | .tool-call-content::-webkit-scrollbar-track { 213 | background: transparent; 214 | } 215 | 216 | .tool-call-content::-webkit-scrollbar-thumb { 217 | background: var(--vscode-scrollbarSlider-background); 218 | border-radius: 4px; 219 | } 220 | 221 | .tool-call-content::-webkit-scrollbar-thumb:hover { 222 | background: var(--vscode-scrollbarSlider-hoverBackground); 223 | } 224 | 225 | /* Batch controls */ 226 | .tool-call-controls { 227 | display: flex; 228 | gap: 8px; 229 | margin: 8px 0; 230 | justify-content: flex-end; 231 | } 232 | 233 | .tool-call-controls button { 234 | padding: 4px 12px; 235 | font-size: 12px; 236 | background: var(--vscode-button-secondaryBackground); 237 | color: var(--vscode-button-secondaryForeground); 238 | border: none; 239 | border-radius: 4px; 240 | cursor: pointer; 241 | transition: all 0.2s ease; 242 | } 243 | 244 | .tool-call-controls button:hover { 245 | background: var(--vscode-button-secondaryHoverBackground); 246 | } 247 | 248 | /* Tool result formatting */ 249 | .tool-call-result p { 250 | margin: 8px 0; 251 | line-height: 1.6; 252 | } 253 | 254 | .tool-call-result ul, 255 | .tool-call-result ol { 256 | margin: 8px 0; 257 | padding-left: 24px; 258 | } 259 | 260 | .tool-call-result li { 261 | margin: 4px 0; 262 | } 263 | 264 | .tool-call-result img { 265 | max-width: 100%; 266 | height: auto; 267 | border-radius: 4px; 268 | margin: 8px 0; 269 | } 270 | 271 | .tool-call-result table { 272 | width: 100%; 273 | border-collapse: collapse; 274 | margin: 8px 0; 275 | } 276 | 277 | .tool-call-result th, 278 | .tool-call-result td { 279 | padding: 8px; 280 | text-align: left; 281 | border: 1px solid var(--vscode-editorWidget-border); 282 | } 283 | 284 | .tool-call-result th { 285 | background: var(--vscode-editor-background); 286 | font-weight: 600; 287 | } 288 | 289 | /* Loading state */ 290 | .tool-loading { 291 | display: inline-flex; 292 | align-items: center; 293 | gap: 8px; 294 | color: var(--vscode-descriptionForeground); 295 | } 296 | 297 | .tool-loading-spinner { 298 | width: 12px; 299 | height: 12px; 300 | border: 2px solid var(--vscode-progressBar-background); 301 | border-top-color: transparent; 302 | border-radius: 50%; 303 | animation: spin 0.8s linear infinite; 304 | } 305 | 306 | @keyframes spin { 307 | to { 308 | transform: rotate(360deg); 309 | } 310 | } 311 | 312 | /* Streaming result indicator */ 313 | .result-streaming::after { 314 | content: "▋"; 315 | animation: blink 1s steps(5, start) infinite; 316 | margin-left: 2px; 317 | vertical-align: baseline; 318 | } 319 | 320 | @keyframes blink { 321 | to { 322 | visibility: hidden; 323 | } 324 | } 325 | 326 | /* Dark theme adjustments */ 327 | @media (prefers-color-scheme: dark) { 328 | .tool-call-block { 329 | box-shadow: 0 2px 8px rgba(0, 0, 0, 0.2); 330 | } 331 | } 332 | 333 | /* Light theme adjustments */ 334 | @media (prefers-color-scheme: light) { 335 | .tool-call-block { 336 | box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1); 337 | } 338 | } 339 | 340 | /* Responsive design */ 341 | @media (max-width: 600px) { 342 | .tool-call-header { 343 | padding: 10px 12px; 344 | } 345 | 346 | .tool-call-content { 347 | padding: 12px; 348 | } 349 | 350 | .tool-name { 351 | font-size: 13px; 352 | } 353 | } -------------------------------------------------------------------------------- /media/tool-call.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Tool call handling functions for collapsible tool call blocks 3 | */ 4 | 5 | // Tool call state management 6 | const toolCallStates = new Map(); 7 | 8 | // Function to toggle tool call blocks with smooth animation 9 | window.toggleToolCall = function (id) { 10 | const toolCallBlock = document.getElementById(id); 11 | if (!toolCallBlock) { 12 | console.error('Tool call block not found:', id); 13 | return; 14 | } 15 | 16 | const content = toolCallBlock.querySelector('.tool-call-content'); 17 | const header = toolCallBlock.querySelector('.tool-call-header'); 18 | const chevron = header.querySelector('.tool-chevron'); 19 | 20 | if (!content || !chevron) { 21 | console.error('Required elements not found', { content, chevron }); 22 | return; 23 | } 24 | 25 | // 确保 toolCallStates 存在 26 | if (typeof toolCallStates === 'undefined') { 27 | window.toolCallStates = new Map(); 28 | } 29 | 30 | if (content.classList.contains('collapsed')) { 31 | // Expand 32 | content.classList.remove('collapsed'); 33 | chevron.style.transform = 'rotate(90deg)'; 34 | toolCallStates.set(id, 'expanded'); 35 | 36 | // Highlight code blocks 37 | const codeBlocks = content.querySelectorAll('pre code:not(.hljs)'); 38 | codeBlocks.forEach(block => { 39 | if (typeof hljs !== 'undefined') { 40 | hljs.highlightBlock(block); 41 | } 42 | }); 43 | } else { 44 | // Collapse 45 | content.classList.add('collapsed'); 46 | chevron.style.transform = 'rotate(0deg)'; 47 | toolCallStates.set(id, 'collapsed'); 48 | } 49 | }; 50 | 51 | // Enhanced tool result processing 52 | function setupToolCallProcessing() { 53 | const processToolResults = new MutationObserver((mutations) => { 54 | for (const mutation of mutations) { 55 | if (mutation.type === 'childList') { 56 | // Process new tool results 57 | const toolResults = document.querySelectorAll('tool-result:not(.processed)'); 58 | 59 | toolResults.forEach(toolResult => { 60 | toolResult.classList.add('processed'); 61 | 62 | const toolName = toolResult.getAttribute('data-tool-name'); 63 | const counter = toolResult.getAttribute('data-counter'); 64 | 65 | // Find matching tool call 66 | const targetToolCall = document.querySelector( 67 | `.tool-call-block[data-tool-name="${toolName}"][data-tool-counter="${counter}"]` 68 | ); 69 | 70 | if (targetToolCall) { 71 | processToolResult(targetToolCall, toolResult); 72 | } 73 | 74 | // Remove the tool-result element 75 | toolResult.remove(); 76 | }); 77 | 78 | // Initialize new tool call blocks 79 | initializeNewToolCalls(); 80 | } 81 | } 82 | }); 83 | 84 | processToolResults.observe(document.body, { 85 | childList: true, 86 | subtree: true 87 | }); 88 | } 89 | 90 | function processToolResult(toolCallBlock, toolResult) { 91 | const resultContainer = toolCallBlock.querySelector('.tool-call-result'); 92 | const statusBadge = toolCallBlock.querySelector('.tool-status'); 93 | 94 | if (!resultContainer) return; 95 | 96 | // Extract and format result content 97 | let resultContent = toolResult.textContent.trim(); 98 | let isError = false; 99 | 100 | try { 101 | const jsonContent = JSON.parse(resultContent); 102 | 103 | // Check if it's an error 104 | // Only consider .error and .isError keys as indicators of an error 105 | if (jsonContent.error || jsonContent.isError === true) { 106 | isError = true; 107 | } 108 | 109 | // Always format as JSON for consistency 110 | resultContent = '```json\n' + JSON.stringify(jsonContent, null, 2) + '\n```'; 111 | } catch (e) { 112 | // Not JSON, check if it contains error keywords 113 | if (resultContent.toLowerCase().includes('error') || 114 | resultContent.toLowerCase().includes('failed') || 115 | resultContent.toLowerCase().includes('exception')) { 116 | isError = true; 117 | } 118 | // Keep as is if not JSON 119 | } 120 | 121 | // Update status badge 122 | if (statusBadge) { 123 | statusBadge.textContent = isError ? 'Failed' : 'Done'; 124 | statusBadge.className = `tool-status ${isError ? 'status-error' : 'status-success'}`; 125 | } 126 | 127 | // Clear the placeholder content first 128 | resultContainer.innerHTML = ''; 129 | 130 | // Create the result HTML directly, similar to the Arguments section 131 | if (resultContent.startsWith('```json') && resultContent.endsWith('```')) { 132 | // Extract the JSON content from the code block 133 | const jsonContent = resultContent.slice(7, -3).trim(); 134 | resultContainer.innerHTML = ` 135 | 136 |
${jsonContent}
137 | `; 138 | } else { 139 | // For non-JSON content, use marked.parse 140 | resultContainer.innerHTML = ` 141 | 142 | ${marked.parse(resultContent)} 143 | `; 144 | } 145 | 146 | // Highlight code blocks 147 | const codeBlocks = resultContainer.querySelectorAll('pre code'); 148 | codeBlocks.forEach(block => { 149 | if (typeof hljs !== 'undefined') { 150 | hljs.highlightBlock(block); 151 | } 152 | }); 153 | 154 | // Auto-expand if error 155 | if (isError) { 156 | const content = toolCallBlock.querySelector('.tool-call-content'); 157 | const chevron = toolCallBlock.querySelector('.tool-chevron'); 158 | if (content.classList.contains('collapsed')) { 159 | content.classList.remove('collapsed'); 160 | chevron.style.transform = 'rotate(90deg)'; 161 | toolCallStates.set(toolCallBlock.id, 'expanded'); 162 | } 163 | } 164 | } 165 | 166 | function initializeNewToolCalls() { 167 | const uninitializedBlocks = document.querySelectorAll('.tool-call-block:not(.initialized)'); 168 | 169 | uninitializedBlocks.forEach(block => { 170 | block.classList.add('initialized'); 171 | 172 | // Set initial state 173 | const content = block.querySelector('.tool-call-content'); 174 | const chevron = block.querySelector('.tool-chevron'); 175 | 176 | // Start collapsed 177 | content.classList.add('collapsed'); 178 | chevron.style.transform = 'rotate(0deg)'; 179 | 180 | // Store state 181 | toolCallStates.set(block.id, 'collapsed'); 182 | }); 183 | } 184 | 185 | // Enhanced copy functionality for tool arguments 186 | window.copyToolArgs = function (button, toolId) { 187 | const toolBlock = document.getElementById(toolId); 188 | if (!toolBlock) return; 189 | 190 | const argsCode = toolBlock.querySelector('.tool-call-args pre code'); 191 | if (!argsCode) return; 192 | 193 | const text = argsCode.textContent; 194 | 195 | navigator.clipboard.writeText(text).then(() => { 196 | // Visual feedback 197 | const originalText = button.textContent; 198 | button.textContent = '✓ Copied'; 199 | button.classList.add('copy-success'); 200 | 201 | setTimeout(() => { 202 | button.textContent = originalText; 203 | button.classList.remove('copy-success'); 204 | }, 2000); 205 | }).catch(err => { 206 | console.error('Failed to copy:', err); 207 | }); 208 | }; 209 | 210 | // Batch expand/collapse functionality 211 | window.toggleAllToolCalls = function (expand) { 212 | const toolCallBlocks = document.querySelectorAll('.tool-call-block'); 213 | 214 | toolCallBlocks.forEach(block => { 215 | const content = block.querySelector('.tool-call-content'); 216 | const chevron = block.querySelector('.tool-chevron'); 217 | 218 | if (expand && content.classList.contains('collapsed')) { 219 | content.classList.remove('collapsed'); 220 | chevron.style.transform = 'rotate(90deg)'; 221 | toolCallStates.set(block.id, 'expanded'); 222 | 223 | // Highlight code blocks 224 | const codeBlocks = content.querySelectorAll('pre code:not(.hljs)'); 225 | codeBlocks.forEach(codeBlock => { 226 | hljs.highlightBlock(codeBlock); 227 | }); 228 | } else if (!expand && !content.classList.contains('collapsed')) { 229 | content.classList.add('collapsed'); 230 | chevron.style.transform = 'rotate(0deg)'; 231 | toolCallStates.set(block.id, 'collapsed'); 232 | } 233 | }); 234 | }; 235 | 236 | // Initialize when DOM is ready 237 | document.addEventListener('DOMContentLoaded', () => { 238 | setupToolCallProcessing(); 239 | setupEventDelegation(); 240 | }); 241 | 242 | // Set up event delegation for tool call clicks 243 | function setupEventDelegation() { 244 | // Handle clicks on tool call headers 245 | document.body.addEventListener('click', (event) => { 246 | const header = event.target.closest('.tool-call-header'); 247 | if (header) { 248 | // 检查是否有内联 onclick 249 | if (header.hasAttribute('onclick')) { 250 | // 让内联 onclick 处理,不做任何事 251 | return; 252 | } 253 | 254 | event.preventDefault(); 255 | event.stopPropagation(); 256 | 257 | const toolCallBlock = header.closest('.tool-call-block'); 258 | if (toolCallBlock && toolCallBlock.id) { 259 | window.toggleToolCall(toolCallBlock.id); 260 | } 261 | } 262 | }); 263 | } 264 | 265 | // Handle streaming updates 266 | function handleStreamingToolCall(toolCallId) { 267 | const toolBlock = document.getElementById(toolCallId); 268 | if (!toolBlock) return; 269 | 270 | const statusBadge = toolBlock.querySelector('.tool-status'); 271 | if (statusBadge && statusBadge.textContent === 'Running') { 272 | // Add pulsing animation for running state 273 | statusBadge.classList.add('status-running'); 274 | } 275 | } 276 | 277 | // Export for use in other scripts 278 | window.toolCallEnhanced = { 279 | toggleToolCall, 280 | copyToolArgs, 281 | toggleAllToolCalls, 282 | handleStreamingToolCall 283 | }; 284 | -------------------------------------------------------------------------------- /media/vendor/highlight.min.css: -------------------------------------------------------------------------------- 1 | .hljs-comment, .hljs-quote {color: #5c6370;font-style: italic }.hljs-doctag, .hljs-formula, .hljs-keyword {color: #c678dd }.hljs-deletion, .hljs-name, .hljs-section, .hljs-selector-tag, .hljs-subst {color: #e06c75 }.hljs-literal {color: #56b6c2 }.hljs-addition, .hljs-attribute, .hljs-meta .hljs-string, .hljs-regexp, .hljs-string {color: #98c379 }.hljs-attr, .hljs-number, .hljs-selector-attr, .hljs-selector-class, .hljs-selector-pseudo, .hljs-template-variable, .hljs-type, .hljs-variable {color: #d19a66 }.hljs-bullet, .hljs-link, .hljs-meta, .hljs-selector-id, .hljs-symbol, .hljs-title {color: #61aeee }.hljs-built_in, .hljs-class .hljs-title, .hljs-title.class_ {color: #e6c07b }.hljs-emphasis {font-style: italic }.hljs-strong {font-weight: 700 }.hljs-link {text-decoration: underline } -------------------------------------------------------------------------------- /src/deepclaude.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable eqeqeq */ 2 | /* eslint-disable @typescript-eslint/naming-convention */ 3 | /** 4 | * @author Pengfei Ni 5 | * 6 | * @license 7 | * Copyright (c) 2024 - Present, Pengfei Ni 8 | * 9 | * All rights reserved. Code licensed under the ISC license 10 | * 11 | * The above copyright notice and this permission notice shall be included in all 12 | * copies or substantial portions of the Software. 13 | */ 14 | import { CoreMessage, streamText } from "ai"; 15 | import ChatGptViewProvider from "./chatgpt-view-provider"; 16 | import { logger } from "./logger"; 17 | import { getHeaders } from "./model-config"; 18 | import { isOpenAIOModel } from "./types"; 19 | 20 | // reasoningChat performs reasoning + chat (e.g. DeepSeek + Claude). 21 | export async function reasoningChat( 22 | provider: ChatGptViewProvider, 23 | question: string, 24 | images: Record, 25 | startResponse: () => void, 26 | updateResponse: (message: string) => void, 27 | updateReasoning: (message: string, roundNumber?: number) => void, 28 | ) { 29 | if (!provider.apiChat) { 30 | throw new Error("apiChat is undefined"); 31 | } 32 | if (!provider.apiReasoning) { 33 | throw new Error("apiReasoning is undefined"); 34 | } 35 | 36 | try { 37 | logger.appendLine( 38 | `INFO: deepclaude.model: ${provider.model}, reasoning.model: ${provider.reasoningModel}, question: ${question}`, 39 | ); 40 | 41 | var chatMessage: CoreMessage = { 42 | role: "user", 43 | content: [ 44 | { 45 | type: "text", 46 | text: question, 47 | }, 48 | ], 49 | }; 50 | 51 | /* placeholder for response */ 52 | startResponse(); 53 | // provider.chatHistory.push({ role: "user", content: provider.modelConfig.systemPrompt }); 54 | provider.chatHistory.push(chatMessage); 55 | 56 | /* step 1: perform reasoning */ 57 | let reasoningResult = ""; 58 | { 59 | const chunks = []; 60 | const reasonChunks = []; 61 | let hasReasoning = false; 62 | let reasoningDone = false; 63 | const result = await streamText({ 64 | model: provider.apiReasoning, 65 | messages: provider.chatHistory, 66 | 67 | abortSignal: provider.abortController?.signal, 68 | tools: provider.toolSet?.tools || undefined, 69 | headers: getHeaders(), 70 | ...(isOpenAIOModel(provider.reasoningModel) && { 71 | providerOptions: { 72 | openai: { 73 | reasoningEffort: provider.reasoningEffort, 74 | ...provider.modelConfig.maxTokens > 0 && { 75 | maxCompletionTokens: provider.modelConfig.maxTokens, 76 | }, 77 | }, 78 | }, 79 | }), 80 | ...(!isOpenAIOModel(provider.reasoningModel) && { 81 | maxTokens: provider.modelConfig.maxTokens > 0 ? provider.modelConfig.maxTokens : undefined, 82 | temperature: provider.modelConfig.temperature, 83 | // topP: provider.modelConfig.topP, 84 | }), 85 | }); 86 | for await (const part of result.fullStream) { 87 | // logger.appendLine(`INFO: deepclaude.reasoning.model: ${provider.reasoningModel} deepclaude.question: ${question} response: ${JSON.stringify(part, null, 2)}`); 88 | if (reasoningDone) { 89 | // no need to process response after reasoning is done. 90 | break; 91 | } 92 | 93 | switch (part.type) { 94 | case "text-delta": { 95 | if (hasReasoning) { 96 | // Reasoning may be empty 97 | if (reasonChunks.join("").trim() == "") { 98 | hasReasoning = false; 99 | } else { 100 | reasoningDone = true; 101 | } 102 | } else { 103 | updateReasoning(part.textDelta, 1); // First reasoning phase 104 | chunks.push(part.textDelta); 105 | } 106 | break; 107 | } 108 | case "reasoning": { 109 | hasReasoning = true; 110 | updateReasoning(part.textDelta, 1); // First reasoning phase 111 | reasonChunks.push(part.textDelta); 112 | break; 113 | } 114 | case "error": 115 | provider.sendMessage({ 116 | type: "addError", 117 | value: JSON.stringify(part.error, null, 2), 118 | autoScroll: provider.autoScroll, 119 | }); 120 | break; 121 | 122 | default: { 123 | // logger.appendLine(`INFO: deepclaude.reasoning.model: ${provider.reasoningModel} deepclaude.question: ${question} response: ${JSON.stringify(part, null, 2)}`); 124 | break; 125 | } 126 | } 127 | } 128 | 129 | if (hasReasoning) { 130 | reasoningResult = reasonChunks.join(""); 131 | } else { 132 | reasoningResult = chunks.join(""); 133 | } 134 | } 135 | 136 | logger.appendLine( 137 | `INFO: reasoning.model: ${provider.reasoningModel}, reasoning: ${reasoningResult}`, 138 | ); 139 | 140 | if (reasoningResult.trim() == "") { 141 | provider.sendMessage({ 142 | type: "addError", 143 | value: "Reasoning is empty.", 144 | autoScroll: provider.autoScroll, 145 | }); 146 | return; 147 | } 148 | 149 | /* add reasoning to context */ 150 | provider.chatHistory.push({ 151 | role: "user", 152 | content: `Reasoning: ${reasoningResult}`, 153 | }); 154 | 155 | /* add images after reasoning */ 156 | Object.entries(images).forEach(([title, content]) => { 157 | provider.chatHistory.push({ 158 | role: "user", 159 | content: [ 160 | { 161 | type: "text", 162 | text: `Image: ${title}`, 163 | }, 164 | { 165 | type: "image", 166 | image: content, 167 | }, 168 | ], 169 | }); 170 | }); 171 | 172 | /* step 2: perform chat with reasoning in context */ 173 | const chunks = []; 174 | const reasonChunks = []; 175 | // Add a counter for tool calls to generate unique IDs 176 | let toolCallCounter = 0; 177 | const result = await streamText({ 178 | system: provider.modelConfig.systemPrompt, 179 | model: provider.apiChat, 180 | messages: provider.chatHistory, 181 | abortSignal: provider.abortController?.signal, 182 | tools: provider.toolSet?.tools || undefined, 183 | maxSteps: provider.maxSteps, 184 | headers: getHeaders(), 185 | ...(isOpenAIOModel(provider.model ? provider.model : "") && { 186 | providerOptions: { 187 | openai: { 188 | reasoningEffort: provider.reasoningEffort, 189 | ...provider.modelConfig.maxTokens > 0 && { 190 | maxCompletionTokens: provider.modelConfig.maxTokens, 191 | }, 192 | }, 193 | }, 194 | }), 195 | ...(!isOpenAIOModel(provider.model ? provider.model : "") && { 196 | maxTokens: provider.modelConfig.maxTokens > 0 ? provider.modelConfig.maxTokens : undefined, 197 | temperature: provider.modelConfig.temperature, 198 | // topP: provider.modelConfig.topP, 199 | }), 200 | }); 201 | for await (const part of result.fullStream) { 202 | // logger.appendLine(`INFO: deepclaude.model: ${provider.model} deepclaude.question: ${question} response: ${JSON.stringify(part, null, 2)}`); 203 | switch (part.type) { 204 | case "text-delta": { 205 | updateResponse(part.textDelta); 206 | chunks.push(part.textDelta); 207 | break; 208 | } 209 | case "reasoning": { 210 | updateReasoning(part.textDelta, 2); // Second reasoning phase (chat phase) 211 | reasonChunks.push(part.textDelta); 212 | break; 213 | } 214 | case "tool-call": { 215 | let formattedArgs = part.args; 216 | if (typeof formattedArgs === 'string') { 217 | try { 218 | formattedArgs = JSON.parse(formattedArgs); 219 | } catch (e) { 220 | // If parsing fails, use the original string 221 | // @ts-ignore 222 | formattedArgs = part.args; 223 | } 224 | } 225 | 226 | // Generate a unique ID for this tool call 227 | toolCallCounter++; 228 | const toolCallId = `tool-call-${Date.now()}-${toolCallCounter}`; 229 | 230 | // Create tool icon based on the tool name 231 | const toolIcon = ` 232 | 233 | 234 | `; 235 | 236 | // Create an enhanced collapsible HTML structure for the tool call 237 | const toolCallHtml = ` 238 |
239 |
240 | 241 | 242 | 243 |
244 | ${toolIcon} 245 | ${part.toolName} 246 |
247 | Running 248 |
249 | 264 |
`; 265 | 266 | updateResponse(toolCallHtml); 267 | chunks.push(toolCallHtml); 268 | break; 269 | } 270 | 271 | // @ts-ignore 272 | case "tool-result": { 273 | // @ts-ignore 274 | logger.appendLine(`INFO: Tool ${part.toolName} result received: ${JSON.stringify(part.result)}`); 275 | 276 | // @ts-ignore 277 | let formattedResult = part.result; 278 | if (typeof formattedResult === 'string') { 279 | try { 280 | formattedResult = JSON.parse(formattedResult); 281 | } catch (e) { 282 | // If parsing fails, use the original string 283 | // @ts-ignore 284 | formattedResult = part.result; 285 | } 286 | } 287 | 288 | // Create a special marker for tool results that will be processed by tool-call.js 289 | // @ts-ignore 290 | // Store the complete result object with full structure to allow proper extraction in tool-call.js 291 | const toolResultText = ` 292 | ${JSON.stringify(formattedResult)} 293 | `; 294 | 295 | updateResponse(toolResultText); 296 | chunks.push(toolResultText); 297 | break; 298 | } 299 | 300 | case "error": 301 | provider.sendMessage({ 302 | type: "addError", 303 | value: part.error, 304 | autoScroll: provider.autoScroll, 305 | }); 306 | break; 307 | 308 | default: { 309 | logger.appendLine( 310 | `INFO: deepclaude.model: ${provider.model} deepclaude.question: ${question} response: ${JSON.stringify(part, null, 2)}`, 311 | ); 312 | break; 313 | } 314 | } 315 | } 316 | 317 | provider.response = chunks.join(""); 318 | provider.reasoning = reasonChunks.join(""); 319 | 320 | // Save both the text response and tool calls in the chat history 321 | const assistantResponse: any = { 322 | role: "assistant", 323 | content: chunks.join("") 324 | }; 325 | 326 | provider.chatHistory.push(assistantResponse); 327 | logger.appendLine(`INFO: deepclaude.response: ${provider.response}`); 328 | } catch (error) { 329 | logger.appendLine( 330 | `ERROR: deepclaude.model: ${provider.model} failed with error: ${error}, backtrace: ${new Error().stack}`, 331 | ); 332 | throw error; 333 | } 334 | } 335 | -------------------------------------------------------------------------------- /src/extension.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @author Pengfei Ni 3 | * 4 | * @license 5 | * Copyright (c) 2022 - 2023, Ali Gençay 6 | * Copyright (c) 2024 - Present, Pengfei Ni 7 | * 8 | * All rights reserved. Code licensed under the ISC license 9 | * 10 | * The above copyright notice and this permission notice shall be included in all 11 | * copies or substantial portions of the Software. 12 | */ 13 | 14 | import * as vscode from "vscode"; 15 | import ChatGptViewProvider from "./chatgpt-view-provider"; 16 | // import { registerMCPToolsWithVSCode } from './github-copilot'; 17 | import MCPServerProvider from "./mcp-server-provider"; 18 | import PromptManagerProvider from "./prompt-manager-provider"; 19 | import { PromptStore } from "./types"; 20 | 21 | const menuCommands = [ 22 | "addTests", 23 | "findProblems", 24 | "optimize", 25 | "explain", 26 | "addComments", 27 | "completeCode", 28 | "generateCode", 29 | "customPrompt1", 30 | "customPrompt2", 31 | "adhoc", 32 | ]; 33 | 34 | export async function activate(context: vscode.ExtensionContext) { 35 | let adhocCommandPrefix: string = 36 | context.globalState.get("chatgpt-adhoc-prompt") || ""; 37 | 38 | const provider = new ChatGptViewProvider(context); 39 | 40 | const view = vscode.window.registerWebviewViewProvider( 41 | "chatgpt-copilot.view", 42 | provider, 43 | { 44 | webviewOptions: { 45 | retainContextWhenHidden: true, 46 | }, 47 | }, 48 | ); 49 | 50 | const freeText = vscode.commands.registerCommand( 51 | "chatgpt-copilot.freeText", 52 | async () => { 53 | const value = await vscode.window.showInputBox({ 54 | prompt: "Ask anything...", 55 | }); 56 | 57 | if (value) { 58 | provider?.sendApiRequest(value, { command: "freeText" }); 59 | } 60 | }, 61 | ); 62 | 63 | const resetThread = vscode.commands.registerCommand( 64 | "chatgpt-copilot.clearConversation", 65 | async () => { 66 | provider?.sendMessage({ type: "clearConversation" }, true); 67 | }, 68 | ); 69 | 70 | const exportConversation = vscode.commands.registerCommand( 71 | "chatgpt-copilot.exportConversation", 72 | async () => { 73 | provider?.sendMessage({ type: "exportConversation" }, true); 74 | }, 75 | ); 76 | 77 | const clearSession = vscode.commands.registerCommand( 78 | "chatgpt-copilot.clearSession", 79 | () => { 80 | context.globalState.update("chatgpt-session-token", null); 81 | context.globalState.update("chatgpt-clearance-token", null); 82 | context.globalState.update("chatgpt-user-agent", null); 83 | context.globalState.update("chatgpt-gpt3-apiKey", null); 84 | provider?.clearSession(); 85 | provider?.sendMessage({ type: "clearConversation" }, true); 86 | }, 87 | ); 88 | 89 | const configChanged = vscode.workspace.onDidChangeConfiguration((e) => { 90 | if (e.affectsConfiguration("chatgpt.response.showNotification")) { 91 | provider.subscribeToResponse = 92 | vscode.workspace 93 | .getConfiguration("chatgpt") 94 | .get("response.showNotification") || false; 95 | } 96 | 97 | if (e.affectsConfiguration("chatgpt.response.autoScroll")) { 98 | provider.autoScroll = !!vscode.workspace 99 | .getConfiguration("chatgpt") 100 | .get("response.autoScroll"); 101 | } 102 | 103 | if (e.affectsConfiguration("chatgpt.gpt3.model")) { 104 | provider.model = vscode.workspace 105 | .getConfiguration("chatgpt") 106 | .get("gpt3.model"); 107 | } 108 | 109 | if (e.affectsConfiguration("chatgpt.gpt3.customModel")) { 110 | if (provider.model === "custom") { 111 | provider.model = vscode.workspace 112 | .getConfiguration("chatgpt") 113 | .get("gpt3.customModel"); 114 | } 115 | } 116 | 117 | if ( 118 | e.affectsConfiguration("chatgpt.gpt3.provider") || 119 | e.affectsConfiguration("chatgpt.gpt3.apiBaseUrl") || 120 | e.affectsConfiguration("chatgpt.gpt3.model") || 121 | e.affectsConfiguration("chatgpt.gpt3.apiKey") || 122 | e.affectsConfiguration("chatgpt.gpt3.customModel") || 123 | e.affectsConfiguration("chatgpt.gpt3.organization") || 124 | e.affectsConfiguration("chatgpt.gpt3.maxTokens") || 125 | e.affectsConfiguration("chatgpt.gpt3.temperature") || 126 | e.affectsConfiguration("chatgpt.gpt3.reasoning.provider") || 127 | e.affectsConfiguration("chatgpt.gpt3.reasoning.model") || 128 | e.affectsConfiguration("chatgpt.gpt3.reasoning.apiKey") || 129 | e.affectsConfiguration("chatgpt.gpt3.reasoning.apiBaseUrl") || 130 | e.affectsConfiguration("chatgpt.gpt3.reasoning.organization") || 131 | e.affectsConfiguration("chatgpt.systemPrompt") || 132 | e.affectsConfiguration("chatgpt.gpt3.top_p") 133 | ) { 134 | provider.prepareConversation(true); 135 | } 136 | 137 | if ( 138 | e.affectsConfiguration("chatgpt.promptPrefix") || 139 | e.affectsConfiguration("chatgpt.gpt3.generateCode-enabled") || 140 | e.affectsConfiguration("chatgpt.gpt3.model") 141 | ) { 142 | setContext(); 143 | } 144 | }); 145 | 146 | const adhocCommand = vscode.commands.registerCommand( 147 | "chatgpt-copilot.adhoc", 148 | async () => { 149 | const editor = vscode.window.activeTextEditor; 150 | 151 | if (!editor) { 152 | return; 153 | } 154 | 155 | const selection = editor.document.getText(editor.selection); 156 | let dismissed = false; 157 | if (selection) { 158 | await vscode.window 159 | .showInputBox({ 160 | title: "Add prefix to your ad-hoc command", 161 | prompt: 162 | "Prefix your code with your custom prompt. i.e. Explain this", 163 | ignoreFocusOut: true, 164 | placeHolder: "Ask anything...", 165 | value: adhocCommandPrefix, 166 | }) 167 | .then((value) => { 168 | if (!value) { 169 | dismissed = true; 170 | return; 171 | } 172 | 173 | adhocCommandPrefix = value.trim() || ""; 174 | context.globalState.update( 175 | "chatgpt-adhoc-prompt", 176 | adhocCommandPrefix, 177 | ); 178 | }); 179 | 180 | if (!dismissed && adhocCommandPrefix?.length > 0) { 181 | provider?.sendApiRequest(adhocCommandPrefix, { 182 | command: "adhoc", 183 | code: selection, 184 | }); 185 | } 186 | } 187 | }, 188 | ); 189 | 190 | const generateCodeCommand = vscode.commands.registerCommand( 191 | `chatgpt-copilot.generateCode`, 192 | () => { 193 | const editor = vscode.window.activeTextEditor; 194 | 195 | if (!editor) { 196 | return; 197 | } 198 | 199 | const selection = editor.document.getText(editor.selection); 200 | if (selection) { 201 | provider?.sendApiRequest(selection, { 202 | command: "generateCode", 203 | language: editor.document.languageId, 204 | }); 205 | } 206 | }, 207 | ); 208 | 209 | // Skip AdHoc - as it was registered earlier 210 | const registeredCommands = menuCommands 211 | .filter((command) => command !== "adhoc" && command !== "generateCode") 212 | .map((command) => 213 | vscode.commands.registerCommand(`chatgpt-copilot.${command}`, () => { 214 | const prompt = vscode.workspace 215 | .getConfiguration("chatgpt") 216 | .get(`promptPrefix.${command}`); 217 | const editor = vscode.window.activeTextEditor; 218 | 219 | if (!editor) { 220 | return; 221 | } 222 | 223 | const selection = editor.document.getText(editor.selection); 224 | if (selection && prompt) { 225 | provider?.sendApiRequest(prompt, { 226 | command, 227 | code: selection, 228 | language: editor.document.languageId, 229 | }); 230 | } 231 | }), 232 | ); 233 | 234 | const promptManager = new PromptManagerProvider(context); 235 | const promptManagerView = vscode.window.registerWebviewViewProvider( 236 | "chatgpt-copilot.promptManager", 237 | promptManager, 238 | ); 239 | 240 | const managePrompts = vscode.commands.registerCommand( 241 | "chatgpt-copilot.managePrompts", 242 | async () => { 243 | await vscode.commands.executeCommand( 244 | "chatgpt-copilot.promptManager.focus", 245 | ); 246 | }, 247 | ); 248 | 249 | const debugPrompts = vscode.commands.registerCommand( 250 | "chatgpt-copilot.debugPrompts", 251 | async () => { 252 | const prompts = context.globalState.get("prompts"); 253 | vscode.window.showInformationMessage( 254 | `Stored prompts: ${JSON.stringify(prompts, null, 2)}`, 255 | ); 256 | }, 257 | ); 258 | 259 | const togglePromptManager = vscode.commands.registerCommand( 260 | "chatgpt-copilot.togglePromptManager", 261 | async () => { 262 | const panel = vscode.window.createWebviewPanel( 263 | "chatgpt-copilot.promptManager", 264 | "ChatGPT: Prompt Manager", 265 | vscode.ViewColumn.Beside, 266 | { 267 | enableScripts: true, 268 | retainContextWhenHidden: true, 269 | localResourceRoots: [context.extensionUri], 270 | }, 271 | ); 272 | 273 | const promptManager = new PromptManagerProvider(context); 274 | promptManager.setPanel(panel); 275 | panel.webview.html = promptManager.getWebviewContent(panel.webview); 276 | 277 | panel.webview.onDidReceiveMessage(async (data) => { 278 | switch (data.type) { 279 | case "addPrompt": 280 | promptManager.addPrompt(data.prompt); 281 | break; 282 | case "updatePrompt": 283 | promptManager.updatePrompt(data.prompt); 284 | break; 285 | case "deletePrompt": 286 | promptManager.deletePrompt(data.id); 287 | break; 288 | case "getPrompts": 289 | panel.webview.postMessage({ 290 | type: "updatePrompts", 291 | prompts: promptManager.getPrompts(), 292 | }); 293 | break; 294 | } 295 | }); 296 | 297 | panel.onDidDispose(() => { 298 | promptManager.setPanel(undefined); 299 | }); 300 | }, 301 | ); 302 | 303 | let addCurrentFileCommand = vscode.commands.registerCommand( 304 | "chatgpt-copilot.addCurrentFile", 305 | () => { 306 | provider.addCurrentFileToContext(); 307 | }, 308 | ); 309 | 310 | const mcpServerProvider = new MCPServerProvider(context); 311 | const mcpServerView = vscode.window.registerWebviewViewProvider( 312 | "chatgpt-copilot.mcpServers", 313 | mcpServerProvider, 314 | ); 315 | 316 | const openMCPServers = vscode.commands.registerCommand( 317 | "chatgpt-copilot.openMCPServers", 318 | () => { 319 | const panel = vscode.window.createWebviewPanel( 320 | "chatgpt-copilot.mcpServers", 321 | "ChatGPT: MCP Servers", 322 | vscode.ViewColumn.One, 323 | { 324 | enableScripts: true, 325 | retainContextWhenHidden: true, 326 | localResourceRoots: [context.extensionUri], 327 | }, 328 | ); 329 | 330 | panel.webview.html = mcpServerProvider.getWebviewContent(panel.webview); 331 | mcpServerProvider.setPanel(panel); 332 | 333 | panel.onDidDispose(() => { 334 | mcpServerProvider.setPanel(undefined); 335 | }); 336 | 337 | panel.webview.onDidReceiveMessage(async (data) => { 338 | switch (data.type) { 339 | case "addServer": 340 | mcpServerProvider.addServer(data.server); 341 | break; 342 | case "updateServer": 343 | mcpServerProvider.updateServer(data.server); 344 | break; 345 | case "deleteServer": 346 | mcpServerProvider.deleteServer(data.id); 347 | break; 348 | case "toggleServerEnabled": 349 | mcpServerProvider.toggleServerEnabled(data.id); 350 | break; 351 | case "getServers": 352 | panel.webview.postMessage({ 353 | type: "updateServers", 354 | servers: mcpServerProvider.getServers(), 355 | }); 356 | break; 357 | } 358 | }); 359 | }, 360 | ); 361 | 362 | context.subscriptions.push( 363 | view, 364 | freeText, 365 | resetThread, 366 | exportConversation, 367 | clearSession, 368 | configChanged, 369 | adhocCommand, 370 | generateCodeCommand, 371 | ...registeredCommands, 372 | promptManagerView, 373 | managePrompts, 374 | debugPrompts, 375 | togglePromptManager, 376 | addCurrentFileCommand, 377 | mcpServerView, 378 | openMCPServers, 379 | ); 380 | 381 | const setContext = () => { 382 | menuCommands.forEach((command) => { 383 | if (command === "generateCode") { 384 | let generateCodeEnabled = !!vscode.workspace 385 | .getConfiguration("chatgpt") 386 | .get("gpt3.generateCode-enabled"); 387 | const modelName = vscode.workspace 388 | .getConfiguration("chatgpt") 389 | .get("gpt3.model") as string; 390 | generateCodeEnabled = 391 | generateCodeEnabled && modelName.startsWith("code-"); 392 | vscode.commands.executeCommand( 393 | "setContext", 394 | "generateCode-enabled", 395 | generateCodeEnabled, 396 | ); 397 | } else { 398 | const enabled = !!vscode.workspace 399 | .getConfiguration("chatgpt.promptPrefix") 400 | .get(`${command}-enabled`); 401 | vscode.commands.executeCommand( 402 | "setContext", 403 | `${command}-enabled`, 404 | enabled, 405 | ); 406 | } 407 | }); 408 | }; 409 | 410 | setContext(); 411 | } 412 | 413 | export function deactivate() { } 414 | -------------------------------------------------------------------------------- /src/github-copilot.ts: -------------------------------------------------------------------------------- 1 | import { CoreMessage } from "ai"; 2 | import * as vscode from "vscode"; 3 | import ChatGptViewProvider from "./chatgpt-view-provider"; 4 | import { logger } from "./logger"; 5 | import { 6 | executePromptToolCall, 7 | generateToolDescriptions 8 | } from "./prompt-based-tools"; 9 | import { ToolCallParser } from "./tool-call-parser"; 10 | import { PromptBasedToolConfig } from "./types"; 11 | 12 | /** 13 | * Get prompt-based tool configuration from VSCode settings 14 | */ 15 | function getPromptBasedToolConfig(): PromptBasedToolConfig { 16 | const configuration = vscode.workspace.getConfiguration("chatgpt"); 17 | 18 | return { 19 | enabled: true, // Always true for GitHub Copilot 20 | toolCallPattern: "", 21 | maxToolCalls: configuration.get("gpt3.maxSteps") || 15, 22 | }; 23 | } 24 | 25 | export async function chatCopilot( 26 | provider: ChatGptViewProvider, 27 | question: string, 28 | images: Record, 29 | startResponse: () => void, 30 | updateResponse: (message: string) => void, 31 | updateReasoning?: (message: string, roundNumber?: number) => void, 32 | ) { 33 | logger.appendLine( 34 | `INFO: chatgpt.model: ${provider.model} chatgpt.question: ${question.trim()}`, 35 | ); 36 | 37 | const promptToolConfig = getPromptBasedToolConfig(); 38 | logger.appendLine(`INFO: Using prompt-based tools: ${promptToolConfig.enabled}, model: ${provider.model}`); 39 | 40 | const models = await vscode.lm.selectChatModels({ 41 | vendor: "copilot", 42 | }); 43 | logger.appendLine(`INFO: available Github copilot models: ${models.map(m => m.family).join(', ')}`); 44 | if (models.length === 0) { 45 | provider.sendMessage({ 46 | type: "addError", 47 | value: `No supported models found from Github Copilot, have you logged in?`, 48 | autoScroll: provider.autoScroll, 49 | }); 50 | return; 51 | } 52 | 53 | let model: vscode.LanguageModelChat | undefined; 54 | try { 55 | [model] = await vscode.lm.selectChatModels({ 56 | vendor: "copilot", 57 | family: provider.model, 58 | }); 59 | } catch (err) { 60 | provider.sendMessage({ 61 | type: "addError", 62 | value: JSON.stringify(err, null, 2), 63 | autoScroll: provider.autoScroll, 64 | }); 65 | logger.appendLine(`ERROR: ${err}`); 66 | return; 67 | } 68 | 69 | if (!model) { 70 | provider.sendMessage({ 71 | type: "addError", 72 | value: `Model ${provider.model} not supported.`, 73 | autoScroll: provider.autoScroll, 74 | }); 75 | logger.appendLine(`ERROR: Model ${provider.model} not supported.`); 76 | return; 77 | } 78 | 79 | var chatMessage: CoreMessage = { 80 | role: "user", 81 | content: question, 82 | }; 83 | 84 | /* placeholder for response */ 85 | startResponse(); 86 | 87 | const chunks: string[] = []; 88 | const reasonChunks: string[] = []; 89 | let toolCallCounter = 0; 90 | provider.chatHistory.push(chatMessage); 91 | 92 | // Prepare system prompt with tool descriptions if using prompt-based tools 93 | let systemPrompt = provider.modelConfig.systemPrompt; 94 | if (provider.toolSet) { 95 | const toolDescriptions = generateToolDescriptions(provider.toolSet); 96 | if (toolDescriptions) { 97 | systemPrompt = systemPrompt ? `${systemPrompt}\n\n${toolDescriptions}` : toolDescriptions; 98 | logger.appendLine(`INFO: Added tool descriptions to system prompt`); 99 | } 100 | } 101 | 102 | // Implement tool call loop for prompt-based tools (like AI SDK does automatically) 103 | if (provider.toolSet) { 104 | toolCallCounter = await executeGitHubCopilotToolLoop( 105 | provider, 106 | model, 107 | systemPrompt, 108 | chunks, 109 | reasonChunks, 110 | toolCallCounter, 111 | updateResponse, 112 | updateReasoning 113 | ); 114 | } else { 115 | // Use standard GitHub Copilot without tools 116 | toolCallCounter = await executeStandardGitHubCopilotChat( 117 | provider, 118 | model, 119 | systemPrompt, 120 | chunks, 121 | reasonChunks, 122 | toolCallCounter, 123 | updateResponse, 124 | updateReasoning 125 | ); 126 | } 127 | 128 | provider.response = chunks.join(""); 129 | if (reasonChunks.join("") != "") { 130 | provider.reasoning = reasonChunks.join(""); 131 | } 132 | 133 | logger.appendLine( 134 | `INFO: chatgpt.model: ${provider.model}, chatgpt.question: ${question.trim()}, final response: ${provider.response}`, 135 | ); 136 | } 137 | 138 | /** 139 | * Execute GitHub Copilot tool loop (mimics AI SDK's automatic tool calling) 140 | */ 141 | async function executeGitHubCopilotToolLoop( 142 | provider: ChatGptViewProvider, 143 | model: vscode.LanguageModelChat, 144 | systemPrompt: string, 145 | chunks: string[], 146 | reasonChunks: string[], 147 | toolCallCounter: number, 148 | updateResponse: (message: string) => void, 149 | updateReasoning?: (message: string, roundNumber?: number) => void 150 | ): Promise { 151 | const maxSteps = provider.maxSteps || 15; 152 | let currentStep = 0; 153 | let conversationHistory = [...provider.chatHistory]; 154 | 155 | while (currentStep < maxSteps) { 156 | currentStep++; 157 | logger.appendLine(`INFO: GitHub Copilot tool loop step ${currentStep}/${maxSteps}`); 158 | 159 | // Prepare messages with system prompt 160 | const messagesWithSystem = [ 161 | { role: "system" as const, content: systemPrompt }, 162 | ...conversationHistory 163 | ]; 164 | const messages = convertToLMChatMessages(messagesWithSystem); 165 | 166 | // Make API call 167 | let chatResponse: vscode.LanguageModelChatResponse; 168 | try { 169 | const cancellationTokenSource = new vscode.CancellationTokenSource(); 170 | // If we have an abort controller, listen to it and cancel the token 171 | if (provider.abortController) { 172 | provider.abortController.signal.addEventListener('abort', () => { 173 | cancellationTokenSource.cancel(); 174 | }); 175 | } 176 | 177 | chatResponse = await model.sendRequest( 178 | messages, 179 | {}, 180 | cancellationTokenSource.token, 181 | ); 182 | } catch (err) { 183 | provider.sendMessage({ 184 | type: "addError", 185 | value: JSON.stringify(err, null, 2), 186 | autoScroll: provider.autoScroll, 187 | }); 188 | logger.appendLine(`ERROR: ${err}`); 189 | throw err; 190 | } 191 | 192 | let accumulatedText = ""; 193 | let stepChunks: string[] = []; 194 | 195 | // Process streaming response - collect all text first 196 | try { 197 | for await (const fragment of chatResponse.text) { 198 | accumulatedText += fragment; 199 | stepChunks.push(fragment); 200 | } 201 | } catch (err) { 202 | provider.sendMessage({ 203 | type: "addError", 204 | value: JSON.stringify(err, null, 2), 205 | autoScroll: provider.autoScroll, 206 | }); 207 | logger.appendLine(`ERROR: ${err}`); 208 | throw err; 209 | } 210 | 211 | // Check for tool calls in the accumulated text 212 | const toolCalls = ToolCallParser.parseToolCalls(accumulatedText); 213 | 214 | // If there are tool calls, only output text that comes before the first tool call 215 | if (toolCalls.length > 0) { 216 | const firstToolCallIndex = accumulatedText.indexOf(''); 217 | if (firstToolCallIndex > 0) { 218 | const textBeforeToolCalls = accumulatedText.substring(0, firstToolCallIndex).trim(); 219 | if (textBeforeToolCalls) { 220 | updateResponse(textBeforeToolCalls); 221 | chunks.push(textBeforeToolCalls); 222 | } 223 | } 224 | } else { 225 | // No tool calls, output the full response 226 | updateResponse(accumulatedText); 227 | chunks.push(accumulatedText); 228 | } 229 | 230 | if (toolCalls.length === 0) { 231 | // No tool calls found, conversation is complete 232 | logger.appendLine(`INFO: No tool calls found in step ${currentStep}, ending loop`); 233 | break; 234 | } 235 | 236 | // Execute tool calls and add results to conversation 237 | const toolResults: any[] = []; 238 | for (const toolCall of toolCalls) { 239 | toolCallCounter++; // Increment counter for each tool call 240 | 241 | // Create tool call UI (exactly like native tool calls) 242 | const toolCallHtml = createGitHubCopilotToolCallHtml(toolCall, toolCallCounter); 243 | updateResponse(toolCallHtml); 244 | chunks.push(toolCallHtml); 245 | 246 | // Execute tool 247 | const result = await executePromptToolCall(toolCall, provider.toolSet!); 248 | toolResults.push(result); 249 | 250 | // Create tool result UI (exactly like native tool calls) 251 | const toolResultHtml = createGitHubCopilotToolResultHtml(result, toolCallCounter); 252 | updateResponse(toolResultHtml); 253 | chunks.push(toolResultHtml); 254 | 255 | logger.appendLine(`INFO: Tool ${toolCall.toolName} executed with result: ${JSON.stringify(result.result)}`); 256 | } 257 | 258 | // Add assistant response with tool calls to conversation history 259 | // Only include the text before tool calls, not the tool call syntax itself 260 | const firstToolCallIndex = accumulatedText.indexOf(''); 261 | if (firstToolCallIndex > 0) { 262 | const textBeforeToolCalls = accumulatedText.substring(0, firstToolCallIndex).trim(); 263 | if (textBeforeToolCalls) { 264 | const assistantMessage: CoreMessage = { 265 | role: "assistant", 266 | content: textBeforeToolCalls, 267 | }; 268 | conversationHistory.push(assistantMessage); 269 | } 270 | } 271 | 272 | // Add tool results as user messages (this is how AI SDK does it) 273 | for (const result of toolResults) { 274 | const toolResultMessage: CoreMessage = { 275 | role: "user", 276 | content: `Tool ${result.toolName} result: ${JSON.stringify(result.result)}` 277 | }; 278 | conversationHistory.push(toolResultMessage); 279 | } 280 | 281 | // Continue the loop for the next step 282 | } 283 | 284 | // Update provider's chat history with the final conversation 285 | provider.chatHistory = conversationHistory; 286 | return toolCallCounter; 287 | } 288 | 289 | /** 290 | * Execute standard GitHub Copilot chat without tools 291 | */ 292 | async function executeStandardGitHubCopilotChat( 293 | provider: ChatGptViewProvider, 294 | model: vscode.LanguageModelChat, 295 | systemPrompt: string, 296 | chunks: string[], 297 | reasonChunks: string[], 298 | toolCallCounter: number, 299 | updateResponse: (message: string) => void, 300 | updateReasoning?: (message: string, roundNumber?: number) => void 301 | ): Promise { 302 | // Prepare messages with system prompt 303 | const messagesWithSystem = [ 304 | { role: "system" as const, content: systemPrompt }, 305 | ...provider.chatHistory 306 | ]; 307 | const messages = convertToLMChatMessages(messagesWithSystem); 308 | 309 | let chatResponse: vscode.LanguageModelChatResponse; 310 | try { 311 | const cancellationTokenSource = new vscode.CancellationTokenSource(); 312 | // If we have an abort controller, listen to it and cancel the token 313 | if (provider.abortController) { 314 | provider.abortController.signal.addEventListener('abort', () => { 315 | cancellationTokenSource.cancel(); 316 | }); 317 | } 318 | 319 | chatResponse = await model.sendRequest( 320 | messages, 321 | {}, 322 | cancellationTokenSource.token, 323 | ); 324 | } catch (err) { 325 | provider.sendMessage({ 326 | type: "addError", 327 | value: JSON.stringify(err, null, 2), 328 | autoScroll: provider.autoScroll, 329 | }); 330 | logger.appendLine(`ERROR: ${err}`); 331 | throw err; 332 | } 333 | 334 | try { 335 | for await (const fragment of chatResponse.text) { 336 | updateResponse(fragment); 337 | chunks.push(fragment); 338 | } 339 | } catch (err) { 340 | provider.sendMessage({ 341 | type: "addError", 342 | value: JSON.stringify(err, null, 2), 343 | autoScroll: provider.autoScroll, 344 | }); 345 | logger.appendLine(`ERROR: ${err}`); 346 | throw err; 347 | } 348 | 349 | // Add final assistant response to chat history 350 | const assistantResponse: CoreMessage = { 351 | role: "assistant", 352 | content: chunks.join("") 353 | }; 354 | provider.chatHistory.push(assistantResponse); 355 | 356 | return toolCallCounter; 357 | } 358 | 359 | function convertToLMChatMessages( 360 | messages: CoreMessage[], 361 | ): vscode.LanguageModelChatMessage[] { 362 | return messages.map((message) => { 363 | switch (message.role) { 364 | case "user": 365 | return vscode.LanguageModelChatMessage.User(message.content as string); 366 | case "assistant": 367 | return vscode.LanguageModelChatMessage.Assistant( 368 | message.content as string, 369 | ); 370 | case "system": 371 | return vscode.LanguageModelChatMessage.User(message.content as string); 372 | case "tool": 373 | return vscode.LanguageModelChatMessage.User( 374 | JSON.stringify(message.content), 375 | ); 376 | default: 377 | throw new Error(`Unknown role for ${JSON.stringify(message)}`); 378 | } 379 | }); 380 | } 381 | 382 | /** 383 | * Create HTML for GitHub Copilot tool calls 384 | */ 385 | function createGitHubCopilotToolCallHtml(toolCall: any, toolCallCounter: number): string { 386 | const toolCallId = `github-copilot-tool-call-${Date.now()}-${toolCallCounter}`; 387 | const toolIcon = ` 388 | 389 | 390 | `; 391 | 392 | return ` 393 |
394 |
395 | 396 | 397 | 398 |
399 | ${toolIcon} 400 | ${toolCall.toolName} (GitHub Copilot) 401 |
402 | Running 403 |
404 | 419 |
`; 420 | } 421 | 422 | /** 423 | * Create HTML for GitHub Copilot tool results 424 | */ 425 | function createGitHubCopilotToolResultHtml(result: any, toolCallCounter: number): string { 426 | return ` 427 | ${JSON.stringify(result.result)} 428 | `; 429 | } 430 | -------------------------------------------------------------------------------- /src/llms.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable eqeqeq */ 2 | /* eslint-disable @typescript-eslint/naming-convention */ 3 | /** 4 | * @author Pengfei Ni 5 | * 6 | * @license 7 | * Copyright (c) 2024 - Present, Pengfei Ni 8 | * 9 | * All rights reserved. Code licensed under the ISC license 10 | * 11 | * The above copyright notice and this permission notice shall be included in all 12 | * copies or substantial portions of the Software. 13 | */ 14 | import { createAnthropic } from "@ai-sdk/anthropic"; 15 | import { createDeepSeek } from "@ai-sdk/deepseek"; 16 | import { createGoogleGenerativeAI } from "@ai-sdk/google"; 17 | import { createGroq } from "@ai-sdk/groq"; 18 | import { createMistral } from "@ai-sdk/mistral"; 19 | import { createPerplexity } from "@ai-sdk/perplexity"; 20 | import { createReplicate } from '@ai-sdk/replicate'; 21 | import { createTogetherAI } from "@ai-sdk/togetherai"; 22 | import { createXai } from "@ai-sdk/xai"; 23 | import { createOpenRouter } from "@openrouter/ai-sdk-provider"; 24 | import { createAzure } from "@quail-ai/azure-ai-provider"; 25 | import { extractReasoningMiddleware, wrapLanguageModel } from "ai"; 26 | import { createOllama } from "ollama-ai-provider"; 27 | import ChatGptViewProvider from "./chatgpt-view-provider"; 28 | import { ModelConfig } from "./model-config"; 29 | import { isReasoningModel } from "./types"; 30 | 31 | // initClaudeModel initializes the Claude model with the given parameters. 32 | export async function initClaudeModel( 33 | viewProvider: ChatGptViewProvider, 34 | config: ModelConfig, 35 | ) { 36 | let apiBaseUrl = config.apiBaseUrl; 37 | if (!apiBaseUrl || apiBaseUrl == "https://api.openai.com/v1") { 38 | apiBaseUrl = "https://api.anthropic.com/v1"; 39 | } 40 | 41 | const ai = createAnthropic({ 42 | baseURL: apiBaseUrl, 43 | apiKey: config.apiKey, 44 | }); 45 | if (config.isReasoning) { 46 | viewProvider.apiReasoning = wrapLanguageModel({ 47 | model: ai.languageModel( 48 | viewProvider.reasoningModel 49 | ? viewProvider.reasoningModel 50 | : "claude-3-5-sonnet-20240620", 51 | ), 52 | middleware: extractReasoningMiddleware({ tagName: "think" }), 53 | }); 54 | } else { 55 | viewProvider.apiChat = ai.languageModel( 56 | viewProvider.model ? viewProvider.model : "claude-3-5-sonnet-20240620", 57 | ); 58 | } 59 | } 60 | 61 | // initGeminiModel initializes the Gemini model with the given parameters. 62 | export async function initGeminiModel( 63 | viewProvider: ChatGptViewProvider, 64 | config: ModelConfig, 65 | ) { 66 | let apiBaseUrl = config.apiBaseUrl; 67 | if (!apiBaseUrl || apiBaseUrl == "https://api.openai.com/v1") { 68 | apiBaseUrl = "https://generativelanguage.googleapis.com/v1beta"; 69 | } 70 | 71 | let ai = createGoogleGenerativeAI({ 72 | baseURL: apiBaseUrl, 73 | apiKey: config.apiKey, 74 | }); 75 | 76 | if (config.isReasoning) { 77 | const model = viewProvider.reasoningModel 78 | ? viewProvider.reasoningModel 79 | : "gemini-1.5-flash-latest"; 80 | viewProvider.apiReasoning = wrapLanguageModel({ 81 | model: ai(model), 82 | middleware: extractReasoningMiddleware({ tagName: "think" }), 83 | }); 84 | 85 | if (config.searchGrounding) { 86 | viewProvider.apiReasoning = wrapLanguageModel({ 87 | model: ai(model, { 88 | useSearchGrounding: config.searchGrounding, 89 | }), 90 | middleware: extractReasoningMiddleware({ tagName: "think" }), 91 | }); 92 | } 93 | } else { 94 | const model = viewProvider.model 95 | ? viewProvider.model 96 | : "gemini-1.5-flash-latest"; 97 | viewProvider.apiChat = ai(model); 98 | if (config.searchGrounding) { 99 | viewProvider.apiChat = ai(model, { 100 | useSearchGrounding: config.searchGrounding, 101 | }); 102 | } 103 | } 104 | } 105 | 106 | export async function initOllamaModel( 107 | viewProvider: ChatGptViewProvider, 108 | config: ModelConfig, 109 | ) { 110 | let apiBaseUrl = config.apiBaseUrl; 111 | if (!apiBaseUrl || apiBaseUrl == "https://api.openai.com/v1") { 112 | apiBaseUrl = "http://localhost:11434/api"; 113 | } 114 | 115 | const ai = createOllama({ 116 | baseURL: apiBaseUrl, 117 | }); 118 | 119 | if (config.isReasoning) { 120 | const model = viewProvider.reasoningModel 121 | ? viewProvider.reasoningModel 122 | : "deepseek-r1"; 123 | viewProvider.apiReasoning = wrapLanguageModel({ 124 | model: ai.languageModel(model), 125 | middleware: extractReasoningMiddleware({ tagName: "think" }), 126 | }); 127 | } else { 128 | const model = viewProvider.model ? viewProvider.model : "deepseek-r1"; 129 | if (isReasoningModel(model)) { 130 | viewProvider.apiChat = wrapLanguageModel({ 131 | model: ai.languageModel(model), 132 | middleware: extractReasoningMiddleware({ tagName: "think" }), 133 | }); 134 | } else { 135 | viewProvider.apiChat = ai.languageModel(model); 136 | } 137 | } 138 | } 139 | 140 | export async function initMistralModel( 141 | viewProvider: ChatGptViewProvider, 142 | config: ModelConfig, 143 | ) { 144 | let apiBaseUrl = config.apiBaseUrl; 145 | if (!apiBaseUrl || apiBaseUrl == "https://api.openai.com/v1") { 146 | apiBaseUrl = "https://api.mistral.ai/v1"; 147 | } 148 | 149 | const ai = createMistral({ 150 | baseURL: apiBaseUrl, 151 | apiKey: config.apiKey, 152 | }); 153 | 154 | if (config.isReasoning) { 155 | viewProvider.apiReasoning = wrapLanguageModel({ 156 | model: ai.languageModel( 157 | viewProvider.reasoningModel 158 | ? viewProvider.reasoningModel 159 | : "deepseek-r1", 160 | ), 161 | middleware: extractReasoningMiddleware({ tagName: "think" }), 162 | }); 163 | } else { 164 | viewProvider.apiChat = ai.languageModel( 165 | viewProvider.model ? viewProvider.model : "deepseek-r1", 166 | ); 167 | } 168 | } 169 | 170 | export async function initXAIModel( 171 | viewProvider: ChatGptViewProvider, 172 | config: ModelConfig, 173 | ) { 174 | let apiBaseUrl = config.apiBaseUrl; 175 | if (!apiBaseUrl || apiBaseUrl == "https://api.openai.com/v1") { 176 | apiBaseUrl = "https://api.x.ai/v1"; 177 | } 178 | 179 | const ai = createXai({ 180 | baseURL: apiBaseUrl, 181 | apiKey: config.apiKey, 182 | }); 183 | if (config.isReasoning) { 184 | viewProvider.apiReasoning = wrapLanguageModel({ 185 | model: ai.languageModel( 186 | viewProvider.reasoningModel ? viewProvider.reasoningModel : "grok-beta", 187 | ), 188 | middleware: extractReasoningMiddleware({ tagName: "think" }), 189 | }); 190 | } else { 191 | viewProvider.apiChat = ai.languageModel( 192 | viewProvider.model ? viewProvider.model : "grok-beta", 193 | ); 194 | } 195 | } 196 | 197 | export async function initTogetherModel( 198 | viewProvider: ChatGptViewProvider, 199 | config: ModelConfig, 200 | ) { 201 | let apiBaseUrl = config.apiBaseUrl; 202 | if (!apiBaseUrl || apiBaseUrl == "https://api.openai.com/v1") { 203 | apiBaseUrl = "https://api.together.xyz/v1"; 204 | } 205 | 206 | const ai = createTogetherAI({ 207 | apiKey: config.apiKey, 208 | baseURL: apiBaseUrl, 209 | }); 210 | 211 | if (config.isReasoning) { 212 | const model = viewProvider.reasoningModel 213 | ? viewProvider.reasoningModel 214 | : "deepseek-ai/DeepSeek-R1"; 215 | 216 | viewProvider.apiReasoning = wrapLanguageModel({ 217 | model: ai.languageModel(model), 218 | middleware: extractReasoningMiddleware({ tagName: "think" }), 219 | }); 220 | } else { 221 | const model = viewProvider.model 222 | ? viewProvider.model 223 | : "deepseek-ai/DeepSeek-R1"; 224 | 225 | if (isReasoningModel(model)) { 226 | viewProvider.apiChat = wrapLanguageModel({ 227 | model: ai.languageModel(model), 228 | middleware: extractReasoningMiddleware({ tagName: "think" }), 229 | }); 230 | } else { 231 | viewProvider.apiChat = ai.languageModel(model); 232 | } 233 | } 234 | } 235 | 236 | export async function initDeepSeekModel( 237 | viewProvider: ChatGptViewProvider, 238 | config: ModelConfig, 239 | ) { 240 | let apiBaseUrl = config.apiBaseUrl; 241 | if (!apiBaseUrl || apiBaseUrl == "https://api.openai.com/v1") { 242 | apiBaseUrl = "https://api.deepseek.com/v1"; 243 | } 244 | 245 | const ai = createDeepSeek({ 246 | apiKey: config.apiKey, 247 | baseURL: apiBaseUrl, 248 | }); 249 | 250 | if (config.isReasoning) { 251 | const model = viewProvider.reasoningModel 252 | ? viewProvider.reasoningModel 253 | : "deepseek-chat"; 254 | 255 | viewProvider.apiReasoning = wrapLanguageModel({ 256 | model: ai.languageModel(model), 257 | middleware: extractReasoningMiddleware({ tagName: "think" }), 258 | }); 259 | } else { 260 | const model = viewProvider.model ? viewProvider.model : "deepseek-chat"; 261 | 262 | if (isReasoningModel(model)) { 263 | viewProvider.apiChat = wrapLanguageModel({ 264 | model: ai.languageModel(model), 265 | middleware: extractReasoningMiddleware({ tagName: "think" }), 266 | }); 267 | } else { 268 | viewProvider.apiChat = ai.languageModel(model); 269 | } 270 | } 271 | } 272 | 273 | export async function initGroqModel( 274 | viewProvider: ChatGptViewProvider, 275 | config: ModelConfig, 276 | ) { 277 | let apiBaseUrl = config.apiBaseUrl; 278 | if (!apiBaseUrl || apiBaseUrl == "https://api.openai.com/v1") { 279 | apiBaseUrl = "https://api.groq.com/openai/v1"; 280 | } 281 | 282 | const ai = createGroq({ 283 | apiKey: config.apiKey, 284 | baseURL: apiBaseUrl, 285 | }); 286 | 287 | if (config.isReasoning) { 288 | const model = viewProvider.reasoningModel 289 | ? viewProvider.reasoningModel 290 | : "gemma2-9b-it"; 291 | 292 | viewProvider.apiReasoning = wrapLanguageModel({ 293 | model: ai.languageModel(model), 294 | middleware: extractReasoningMiddleware({ tagName: "think" }), 295 | }); 296 | } else { 297 | const model = viewProvider.model ? viewProvider.model : "gemma2-9b-it"; 298 | if (isReasoningModel(model)) { 299 | viewProvider.apiChat = wrapLanguageModel({ 300 | model: ai.languageModel(model), 301 | middleware: extractReasoningMiddleware({ tagName: "think" }), 302 | }); 303 | } else { 304 | viewProvider.apiChat = ai.languageModel(model); 305 | } 306 | } 307 | } 308 | 309 | export async function initPerplexityModel( 310 | viewProvider: ChatGptViewProvider, 311 | config: ModelConfig, 312 | ) { 313 | let apiBaseUrl = config.apiBaseUrl; 314 | if (!apiBaseUrl || apiBaseUrl == "https://api.openai.com/v1") { 315 | apiBaseUrl = "https://api.perplexity.ai"; 316 | } 317 | 318 | const ai = createPerplexity({ 319 | apiKey: config.apiKey, 320 | baseURL: apiBaseUrl, 321 | }); 322 | 323 | viewProvider.apiChat = ai.languageModel( 324 | viewProvider.model ? viewProvider.model : "sonar-pro", 325 | ); 326 | } 327 | 328 | export async function initOpenRouterModel( 329 | viewProvider: ChatGptViewProvider, 330 | config: ModelConfig, 331 | ) { 332 | const ai = createOpenRouter({ 333 | apiKey: config.apiKey, 334 | }); 335 | 336 | if (config.isReasoning) { 337 | const model = viewProvider.reasoningModel 338 | ? viewProvider.reasoningModel 339 | : "anthropic/claude-3.5-sonnet"; 340 | 341 | viewProvider.apiReasoning = wrapLanguageModel({ 342 | model: ai.languageModel(model), 343 | middleware: extractReasoningMiddleware({ tagName: "think" }), 344 | }); 345 | } else { 346 | const model = viewProvider.model 347 | ? viewProvider.model 348 | : "anthropic/claude-3.5-sonnet"; 349 | 350 | if (isReasoningModel(model)) { 351 | viewProvider.apiChat = wrapLanguageModel({ 352 | model: ai.languageModel(model), 353 | middleware: extractReasoningMiddleware({ tagName: "think" }), 354 | }); 355 | } else { 356 | viewProvider.apiChat = ai.languageModel(model); 357 | } 358 | } 359 | } 360 | 361 | export async function initAzureAIModel( 362 | viewProvider: ChatGptViewProvider, 363 | config: ModelConfig, 364 | ) { 365 | const azureAPIVersion = "2025-02-01-preview"; 366 | let apiBaseUrl = config.apiBaseUrl; 367 | 368 | const ai = createAzure({ 369 | apiKey: config.apiKey, 370 | endpoint: apiBaseUrl, 371 | apiVersion: azureAPIVersion, 372 | }); 373 | 374 | if (config.isReasoning) { 375 | const model = viewProvider.reasoningModel 376 | ? viewProvider.reasoningModel 377 | : "DeepSeek-R1"; 378 | 379 | viewProvider.apiReasoning = wrapLanguageModel({ 380 | model: ai.languageModel(model), 381 | middleware: extractReasoningMiddleware({ tagName: "think" }), 382 | }); 383 | } else { 384 | const model = viewProvider.model ? viewProvider.model : "DeepSeek-R1"; 385 | if (isReasoningModel(model)) { 386 | viewProvider.apiChat = wrapLanguageModel({ 387 | model: ai.languageModel(model), 388 | middleware: extractReasoningMiddleware({ tagName: "think" }), 389 | }); 390 | } else { 391 | viewProvider.apiChat = ai.languageModel(model); 392 | } 393 | } 394 | } 395 | 396 | // TODO: pending https://github.com/vercel/ai/issues/4918 to support language model. 397 | export async function initReplicateModel( 398 | viewProvider: ChatGptViewProvider, 399 | config: ModelConfig, 400 | ) { 401 | let apiBaseUrl = config.apiBaseUrl; 402 | if (!apiBaseUrl || apiBaseUrl == "https://api.openai.com/v1") { 403 | apiBaseUrl = "https://api.replicate.com/v1"; 404 | } 405 | 406 | const ai = createReplicate({ 407 | apiToken: config.apiKey, 408 | baseURL: apiBaseUrl, 409 | }); 410 | 411 | if (config.isReasoning) { 412 | const model = viewProvider.reasoningModel 413 | ? viewProvider.reasoningModel 414 | : "deepseek-ai/deepseek-r1"; 415 | 416 | viewProvider.apiReasoning = wrapLanguageModel({ 417 | model: ai.languageModel(model), 418 | middleware: extractReasoningMiddleware({ tagName: "think" }), 419 | }); 420 | } else { 421 | const model = viewProvider.model ? viewProvider.model : "deepseek-ai/deepseek-r1"; 422 | if (isReasoningModel(model)) { 423 | viewProvider.apiChat = wrapLanguageModel({ 424 | model: ai.languageModel(model), 425 | middleware: extractReasoningMiddleware({ tagName: "think" }), 426 | }); 427 | } else { 428 | viewProvider.apiChat = ai.languageModel(model); 429 | } 430 | } 431 | } -------------------------------------------------------------------------------- /src/logger.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @author Pengfei Ni 3 | * 4 | * @license 5 | * Copyright (c) 2024 - Present, Pengfei Ni 6 | * 7 | * All rights reserved. Code licensed under the ISC license 8 | * 9 | * The above copyright notice and this permission notice shall be included in all 10 | * copies or substantial portions of the Software. 11 | */ 12 | import * as vscode from "vscode"; 13 | 14 | export const logger = vscode.window.createOutputChannel("ChatGPT Copilot"); 15 | -------------------------------------------------------------------------------- /src/mcp-server-provider.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * MCP Server Manager for VS Code Extension 3 | */ 4 | import { v4 as uuidv4 } from "uuid"; 5 | import * as vscode from "vscode"; 6 | 7 | export interface MCPServer { 8 | id: string; 9 | name: string; 10 | type: string; // "sse", "stdio", or "streamable-http" 11 | isEnabled: boolean; 12 | command?: string; 13 | url?: string; 14 | arguments?: string[]; 15 | env?: Record; 16 | tools?: string[]; 17 | headers?: Record; // Added headers for HTTP/SSE requests 18 | } 19 | 20 | export interface MCPServerStore { 21 | servers: MCPServer[]; 22 | addServer: (server: MCPServer) => void; 23 | removeServer: (id: string) => void; 24 | updateServer: (id: string, updates: Partial) => void; 25 | toggleServerEnabled: (id: string) => void; 26 | } 27 | 28 | export default class MCPServerProvider implements vscode.WebviewViewProvider { 29 | private webView?: vscode.WebviewView; 30 | private store: { servers: MCPServer[]; } = { servers: [] }; 31 | private _panel?: vscode.WebviewPanel; 32 | 33 | constructor(private context: vscode.ExtensionContext) { 34 | this.loadServers(); 35 | } 36 | 37 | private loadServers() { 38 | this.store = this.context.globalState.get<{ servers: MCPServer[]; }>( 39 | "mcpServers", 40 | { servers: [] }, 41 | ); 42 | } 43 | 44 | private saveServers() { 45 | this.context.globalState.update("mcpServers", this.store); 46 | } 47 | 48 | public resolveWebviewView( 49 | webviewView: vscode.WebviewView, 50 | _context: vscode.WebviewViewResolveContext, 51 | _token: vscode.CancellationToken, 52 | ) { 53 | this.webView = webviewView; 54 | webviewView.webview.options = { 55 | enableScripts: true, 56 | localResourceRoots: [this.context.extensionUri], 57 | }; 58 | 59 | webviewView.webview.html = this.getWebviewContent(webviewView.webview); 60 | 61 | webviewView.webview.onDidReceiveMessage(async (data) => { 62 | switch (data.type) { 63 | case "addServer": 64 | this.addServer(data.server); 65 | break; 66 | case "updateServer": 67 | this.updateServer(data.server); 68 | break; 69 | case "deleteServer": 70 | this.deleteServer(data.id); 71 | break; 72 | case "toggleServerEnabled": 73 | this.toggleServerEnabled(data.id); 74 | break; 75 | case "getServers": 76 | this.handleGetServers(webviewView.webview); 77 | break; 78 | } 79 | }); 80 | } 81 | 82 | public addServer(server: Omit) { 83 | const newServer: MCPServer = { 84 | id: uuidv4(), 85 | ...server, 86 | }; 87 | this.store.servers.push(newServer); 88 | this.saveServers(); 89 | this.sendServersToAll(this.store.servers); 90 | } 91 | 92 | public updateServer(server: MCPServer) { 93 | const index = this.store.servers.findIndex((s) => s.id === server.id); 94 | if (index !== -1) { 95 | this.store.servers[index] = { 96 | ...this.store.servers[index], 97 | ...server, 98 | }; 99 | this.saveServers(); 100 | this.sendServersToAll(this.store.servers); 101 | } else { 102 | console.error(`Server with id ${server.id} not found for update`); 103 | } 104 | } 105 | 106 | public deleteServer(id: string) { 107 | const initialLength = this.store.servers.length; 108 | this.store.servers = this.store.servers.filter((s) => s.id !== id); 109 | 110 | if (this.store.servers.length === initialLength) { 111 | console.error(`Server with id ${id} not found for deletion`); 112 | return; 113 | } 114 | 115 | this.saveServers(); 116 | this.sendServersToAll(this.store.servers); 117 | } 118 | 119 | public toggleServerEnabled(id: string) { 120 | const index = this.store.servers.findIndex((s) => s.id === id); 121 | if (index !== -1) { 122 | this.store.servers[index].isEnabled = 123 | !this.store.servers[index].isEnabled; 124 | this.saveServers(); 125 | this.sendServersToAll(this.store.servers); 126 | } 127 | } 128 | 129 | private sendServersToAll(servers: MCPServer[]) { 130 | this.webView?.webview.postMessage({ 131 | type: "updateServers", 132 | servers: servers, 133 | }); 134 | 135 | if (this._panel?.webview) { 136 | this._panel.webview.postMessage({ 137 | type: "updateServers", 138 | servers: servers, 139 | }); 140 | } 141 | } 142 | 143 | public getServers() { 144 | return this.store.servers; 145 | } 146 | 147 | public setPanel(panel: vscode.WebviewPanel | undefined) { 148 | this._panel = panel; 149 | } 150 | 151 | private handleGetServers(webview: vscode.Webview) { 152 | webview.postMessage({ 153 | type: "updateServers", 154 | servers: this.store.servers, 155 | }); 156 | } 157 | 158 | public getWebviewContent(webview: vscode.Webview) { 159 | const scriptUri = webview.asWebviewUri( 160 | vscode.Uri.joinPath(this.context.extensionUri, "media", "mcp-servers.js"), 161 | ); 162 | const stylesUri = webview.asWebviewUri( 163 | vscode.Uri.joinPath( 164 | this.context.extensionUri, 165 | "media", 166 | "mcp-servers.css", 167 | ), 168 | ); 169 | const mcpIconUri = webview.asWebviewUri( 170 | vscode.Uri.joinPath(this.context.extensionUri, "media", "mcp.svg"), 171 | ); 172 | 173 | return ` 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 |
182 |
183 |
184 | MCP 185 |

MCP Servers

186 |
187 | 193 |
194 | 195 |
196 | ℹ️ 197 |

198 | Model Context Protocol offers external tools to AI Agents. You can learn more 199 | here. 200 |

201 |
202 | 203 |
204 |
205 | 214 |
215 |
216 | 217 | 218 | `; 219 | } 220 | } 221 | -------------------------------------------------------------------------------- /src/mcp.ts: -------------------------------------------------------------------------------- 1 | import { Client } from "@modelcontextprotocol/sdk/client/index.js"; 2 | import { SSEClientTransport } from "@modelcontextprotocol/sdk/client/sse.js"; 3 | import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js"; 4 | import { StreamableHTTPClientTransport } from "@modelcontextprotocol/sdk/client/streamableHttp.js"; 5 | import { Transport } from "@modelcontextprotocol/sdk/shared/transport.js"; 6 | import type { Tool } from "ai"; 7 | import { jsonSchema, tool } from "ai"; 8 | import { EventSource } from 'eventsource'; 9 | import { JSONSchema7 } from "json-schema"; 10 | import { logger } from "./logger"; 11 | 12 | // define EventSource globally 13 | globalThis.EventSource = EventSource; 14 | 15 | export type MCPServerConfig = { 16 | mcpServers: { 17 | [key: string]: { 18 | command: string; 19 | args: string[]; 20 | url: string; 21 | env?: Record; 22 | isEnabled: boolean; 23 | type: string; // "sse", "stdio", or "streamable-http" 24 | headers?: Record; // Added headers for HTTP/SSE requests 25 | }; 26 | }; 27 | 28 | /** 29 | * Optional callback that will be called when a tool is executed 30 | * Useful for timing, logging, or other instrumentation 31 | */ 32 | onCallTool?: ( 33 | serverName: string, 34 | toolName: string, 35 | args: any, 36 | result: string | Promise, 37 | ) => void; 38 | }; 39 | 40 | /** 41 | * The resulting tool set with tools and clients 42 | */ 43 | export type ToolSet = { 44 | tools: { 45 | [key: string]: Tool; 46 | }; 47 | clients: { 48 | [key: string]: Client; 49 | }; 50 | transports: { 51 | [key: string]: Transport; 52 | }; 53 | }; 54 | 55 | /** 56 | * Creates a set of tools from MCP servers that can be used with the AI SDK 57 | * @param config Configuration for the tool set 58 | * @returns A promise that resolves to the tool set 59 | */ 60 | export async function createToolSet(config: MCPServerConfig): Promise { 61 | let toolset: ToolSet = { 62 | tools: {}, 63 | clients: {}, 64 | transports: {}, 65 | }; 66 | 67 | // Initialize all server connections and fetch their tools 68 | for (const [serverName, serverConfig] of Object.entries(config.mcpServers)) { 69 | if (!serverConfig.isEnabled) { 70 | continue; 71 | } 72 | 73 | let transport: Transport; 74 | try { 75 | if (serverConfig.type === "sse") { 76 | transport = new SSEClientTransport(new URL(serverConfig.url), { 77 | requestInit: { 78 | headers: serverConfig.headers, 79 | }, 80 | eventSourceInit: { 81 | fetch: (url, init) => { 82 | const headers = new Headers(init?.headers || {}); 83 | return fetch(url, { 84 | ...init, 85 | headers 86 | }); 87 | } 88 | } 89 | }); 90 | } else if (serverConfig.type === "streamable-http") { 91 | transport = new StreamableHTTPClientTransport(new URL(serverConfig.url), { 92 | requestInit: { 93 | headers: serverConfig.headers, 94 | }, 95 | }); 96 | } else { 97 | transport = new StdioClientTransport({ 98 | command: serverConfig.command, 99 | args: serverConfig.args, 100 | env: { 101 | ...serverConfig.env, 102 | ...(process.env.PATH ? { PATH: process.env.PATH } : {}), 103 | }, 104 | stderr: "pipe", 105 | }); 106 | } 107 | 108 | transport.onerror = async (error) => { 109 | logger.appendLine(`ERROR: MCP server ${serverName} error: ${error}`); 110 | await transport.close(); 111 | }; 112 | toolset.transports[serverName] = transport; 113 | await transport.start(); 114 | transport.start = async () => { }; // No-op now, .connect() won't fail 115 | 116 | const client = new Client( 117 | { 118 | name: "ChatGPT Copilot (VSCode Extension)", 119 | version: "1.0.0", 120 | }, 121 | { 122 | capabilities: {}, 123 | }, 124 | ); 125 | toolset.clients[serverName] = client; 126 | await client.connect(transport); 127 | 128 | // Get list of tools and add them to the toolset 129 | const toolList = await client.listTools(); 130 | for (const t of toolList.tools) { 131 | let toolName = t.name; 132 | const parameters = jsonSchema(t.inputSchema as JSONSchema7); 133 | if (parameters.jsonSchema.additionalProperties == null) { 134 | parameters.jsonSchema.additionalProperties = false; 135 | } 136 | // const parameters = jsonSchema( 137 | // Object.fromEntries( 138 | // Object.entries(t.inputSchema as JSONSchema7) 139 | // .filter(([key]) => key !== "additionalProperties" && key !== "$schema") 140 | // ) 141 | // ); 142 | toolset.tools[toolName] = tool({ 143 | description: t.description || toolName, 144 | parameters: parameters, 145 | execute: async (args) => { 146 | const result = await client.callTool({ 147 | name: t.name, 148 | arguments: args as Record, 149 | }); 150 | const strResult = JSON.stringify(result); 151 | if (config.onCallTool) { 152 | config.onCallTool(serverName, toolName, args, strResult); 153 | } 154 | return strResult; 155 | }, 156 | }); 157 | } 158 | } catch (error) { 159 | logger.appendLine(`ERROR: MCP server ${serverName} failed: ${error}`); 160 | continue; 161 | } 162 | } 163 | 164 | return toolset; 165 | } 166 | 167 | /** 168 | * Closes all clients in a tool set 169 | * @param toolSet The tool set to close 170 | */ 171 | export async function closeToolSet(toolSet: ToolSet): Promise { 172 | for (const client of Object.values(toolSet.clients)) { 173 | await client.close(); 174 | } 175 | } 176 | -------------------------------------------------------------------------------- /src/model-config.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable eqeqeq */ 2 | /* eslint-disable @typescript-eslint/naming-convention */ 3 | /** 4 | * @author Pengfei Ni 5 | * 6 | * @license 7 | * Copyright (c) 2024 - Present, Pengfei Ni 8 | * 9 | * All rights reserved. Code licensed under the ISC license 10 | * 11 | * The above copyright notice and this permission notice shall be included in all 12 | * copies or substantial portions of the Software. 13 | */ 14 | 15 | export class ModelConfig { 16 | public provider; 17 | public apiKey: string; 18 | public apiBaseUrl: string; 19 | public maxTokens: number; 20 | public temperature: number; 21 | public topP: number; 22 | public organization: string; 23 | public systemPrompt: string; 24 | public systemPromptOverride: string; 25 | public searchGrounding: boolean; 26 | public isReasoning: boolean; 27 | 28 | constructor({ 29 | provider, 30 | apiKey, 31 | apiBaseUrl, 32 | maxTokens, 33 | temperature, 34 | topP, 35 | organization, 36 | systemPrompt, 37 | systemPromptOverride, 38 | searchGrounding, 39 | isReasoning, 40 | }: { 41 | provider: string; 42 | apiKey: string; 43 | apiBaseUrl: string; 44 | maxTokens: number; 45 | temperature: number; 46 | topP: number; 47 | organization: string; 48 | systemPrompt: string; 49 | systemPromptOverride?: string; 50 | searchGrounding?: boolean; 51 | isReasoning?: boolean; 52 | }) { 53 | this.provider = provider; 54 | this.apiKey = apiKey; 55 | this.apiBaseUrl = apiBaseUrl; 56 | this.maxTokens = maxTokens; 57 | this.temperature = temperature; 58 | this.topP = topP; 59 | this.organization = organization; 60 | this.systemPrompt = systemPrompt; 61 | this.systemPromptOverride = systemPromptOverride ?? ""; 62 | this.searchGrounding = searchGrounding ?? false; 63 | this.isReasoning = isReasoning ?? false; 64 | } 65 | } 66 | 67 | export function getHeaders() { 68 | return { 69 | "User-Agent": "ChatGPT Copilot (VSCode Extension)", 70 | "X-Title": "ChatGPT Copilot (VSCode Extension)", 71 | "HTTP-Referer": "https://github.com/feiskyer/chatgpt-copilot", 72 | }; 73 | } 74 | -------------------------------------------------------------------------------- /src/openai-legacy.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable eqeqeq */ 2 | /* eslint-disable @typescript-eslint/naming-convention */ 3 | /** 4 | * @author Pengfei Ni 5 | * 6 | * @license 7 | * Copyright (c) 2024 - Present, Pengfei Ni 8 | * 9 | * All rights reserved. Code licensed under the ISC license 10 | * 11 | * The above copyright notice and this permission notice shall be included in all 12 | * copies or substantial portions of the Software. 13 | */ 14 | import { createAzure } from "@ai-sdk/azure"; 15 | import { createOpenAI } from "@ai-sdk/openai"; 16 | import { CoreMessage, streamText } from "ai"; 17 | import ChatGptViewProvider from "./chatgpt-view-provider"; 18 | import { logger } from "./logger"; 19 | import { ModelConfig, getHeaders } from "./model-config"; 20 | 21 | const azureAPIVersion = "2025-02-01-preview"; 22 | 23 | // initGptLegacyModel initializes the GPT legacy model. 24 | export function initGptLegacyModel( 25 | viewProvider: ChatGptViewProvider, 26 | config: ModelConfig, 27 | ) { 28 | if (config.apiBaseUrl?.includes("openai.azure.com")) { 29 | const instanceName = config.apiBaseUrl.split(".")[0].split("//")[1]; 30 | const deployName = 31 | config.apiBaseUrl.split("/")[config.apiBaseUrl.split("/").length - 1]; 32 | 33 | viewProvider.model = deployName; 34 | const azure = createAzure({ 35 | resourceName: instanceName, 36 | apiKey: config.apiKey, 37 | apiVersion: azureAPIVersion, 38 | }); 39 | if (config.isReasoning) { 40 | viewProvider.apiReasoning = azure.completion(deployName); 41 | } else { 42 | viewProvider.apiCompletion = azure.completion(deployName); 43 | } 44 | } else { 45 | // OpenAI 46 | const openai = createOpenAI({ 47 | baseURL: config.apiBaseUrl, 48 | apiKey: config.apiKey, 49 | organization: config.organization, 50 | }); 51 | if (config.isReasoning) { 52 | viewProvider.apiReasoning = openai.completion( 53 | viewProvider.reasoningModel ? viewProvider.reasoningModel : "o1-mini", 54 | ); 55 | } else { 56 | viewProvider.apiCompletion = openai.completion( 57 | viewProvider.model ? viewProvider.model : "gpt-4o", 58 | ); 59 | } 60 | } 61 | } 62 | 63 | // chatCompletion is a function that completes the chat. 64 | export async function chatCompletion( 65 | provider: ChatGptViewProvider, 66 | question: string, 67 | images: Record, 68 | startResponse: () => void, 69 | updateResponse: (message: string) => void, 70 | ) { 71 | if (!provider.apiCompletion) { 72 | throw new Error("apiCompletion is not defined"); 73 | } 74 | 75 | var chatMessage: CoreMessage = { 76 | role: "user", 77 | content: [ 78 | { 79 | type: "text", 80 | text: question, 81 | }, 82 | ], 83 | }; 84 | Object.entries(images).forEach(([_, content]) => { 85 | (chatMessage.content as any[]).push({ 86 | type: "image", 87 | image: content, 88 | }); 89 | }); 90 | 91 | /* placeholder for response */ 92 | startResponse(); 93 | logger.appendLine( 94 | `INFO: chatgpt.model: ${provider.model} chatgpt.question: ${question}`, 95 | ); 96 | 97 | provider.chatHistory.push(chatMessage); 98 | let prompt = ""; 99 | for (const message of provider.chatHistory) { 100 | prompt += `${message.role === "user" ? "Human:" : "AI:"} ${message.content}\n`; 101 | } 102 | prompt += `AI: `; 103 | 104 | const result = await streamText({ 105 | system: provider.modelConfig.systemPrompt, 106 | model: provider.apiCompletion, 107 | prompt: prompt, 108 | maxTokens: provider.modelConfig.maxTokens > 0 ? provider.modelConfig.maxTokens : undefined, 109 | temperature: provider.modelConfig.temperature, 110 | abortSignal: provider.abortController?.signal, 111 | tools: provider.toolSet?.tools || undefined, 112 | headers: getHeaders(), 113 | }); 114 | const chunks = []; 115 | for await (const textPart of result.textStream) { 116 | updateResponse(textPart); 117 | chunks.push(textPart); 118 | } 119 | provider.response = chunks.join(""); 120 | provider.chatHistory.push({ role: "assistant", content: chunks.join("") }); 121 | logger.appendLine(`INFO: chatgpt.response: ${provider.response}`); 122 | } 123 | -------------------------------------------------------------------------------- /src/openai.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable eqeqeq */ 2 | /* eslint-disable @typescript-eslint/naming-convention */ 3 | /** 4 | * @author Pengfei Ni 5 | * 6 | * @license 7 | * Copyright (c) 2024 - Present, Pengfei Ni 8 | * 9 | * All rights reserved. Code licensed under the ISC license 10 | * 11 | * The above copyright notice and this permission notice shall be included in all 12 | * copies or substantial portions of the Software. 13 | */ 14 | import { createAzure } from "@ai-sdk/azure"; 15 | import { createOpenAI } from "@ai-sdk/openai"; 16 | import { 17 | CoreMessage, 18 | extractReasoningMiddleware, 19 | streamText, 20 | wrapLanguageModel 21 | } from "ai"; 22 | import ChatGptViewProvider from "./chatgpt-view-provider"; 23 | import { logger } from "./logger"; 24 | import { getHeaders, ModelConfig } from "./model-config"; 25 | import { isOpenAIOModel, isReasoningModel } from "./types"; 26 | import { fetchOpenAI } from "./utils"; 27 | 28 | const azureAPIVersion = "2025-02-01-preview"; 29 | 30 | // initGptModel initializes the GPT model. 31 | export async function initGptModel( 32 | viewProvider: ChatGptViewProvider, 33 | config: ModelConfig, 34 | ) { 35 | // AzureOpenAI 36 | if (config.apiBaseUrl?.includes("openai.azure.com")) { 37 | const instanceName = config.apiBaseUrl.split(".")[0].split("//")[1]; 38 | const deployName = 39 | config.apiBaseUrl.split("/")[config.apiBaseUrl.split("/").length - 1]; 40 | 41 | const azure = createAzure({ 42 | resourceName: instanceName, 43 | apiKey: config.apiKey, 44 | apiVersion: azureAPIVersion, 45 | fetch: fetchOpenAI, // workaround for https://github.com/vercel/ai/issues/4662 46 | }); 47 | 48 | if (config.isReasoning) { 49 | viewProvider.apiReasoning = wrapLanguageModel({ 50 | model: azure.languageModel(deployName), 51 | middleware: extractReasoningMiddleware({ tagName: "think" }), 52 | }); 53 | } else { 54 | if (isReasoningModel(deployName)) { 55 | viewProvider.apiChat = wrapLanguageModel({ 56 | model: azure.languageModel(deployName), 57 | middleware: extractReasoningMiddleware({ tagName: "think" }), 58 | }); 59 | } else { 60 | viewProvider.apiChat = azure.languageModel(deployName); 61 | } 62 | } 63 | } else { 64 | // OpenAI 65 | const openai = createOpenAI({ 66 | baseURL: config.apiBaseUrl, 67 | apiKey: config.apiKey, 68 | organization: config.organization, 69 | fetch: fetchOpenAI, // workaround for https://github.com/vercel/ai/issues/4662 70 | }); 71 | 72 | if (config.isReasoning) { 73 | const model = viewProvider.reasoningModel 74 | ? viewProvider.reasoningModel 75 | : "o3-mini"; 76 | viewProvider.apiReasoning = wrapLanguageModel({ 77 | model: openai.languageModel(model), 78 | middleware: extractReasoningMiddleware({ tagName: "think" }), 79 | }); 80 | } else { 81 | const model = viewProvider.model ? viewProvider.model : "gpt-4o"; 82 | if (isReasoningModel(model)) { 83 | viewProvider.apiChat = wrapLanguageModel({ 84 | model: openai.languageModel(model), 85 | middleware: extractReasoningMiddleware({ tagName: "think" }), 86 | }); 87 | } else { 88 | viewProvider.apiChat = openai.languageModel(model); 89 | } 90 | } 91 | } 92 | } 93 | 94 | // chatGpt is a function that completes the chat. 95 | export async function chatGpt( 96 | provider: ChatGptViewProvider, 97 | question: string, 98 | images: Record, 99 | startResponse: () => void, 100 | updateResponse: (message: string) => void, 101 | updateReasoning: (message: string, roundNumber?: number) => void, 102 | ) { 103 | if (!provider.apiChat) { 104 | throw new Error("apiChat is undefined"); 105 | } 106 | 107 | try { 108 | logger.appendLine( 109 | `INFO: chatgpt.model: ${provider.model} chatgpt.question: ${question.trim()}`, 110 | ); 111 | 112 | var chatMessage: CoreMessage = { 113 | role: "user", 114 | content: [ 115 | { 116 | type: "text", 117 | text: question, 118 | }, 119 | ], 120 | }; 121 | Object.entries(images).forEach(([_, content]) => { 122 | (chatMessage.content as any[]).push({ 123 | type: "image", 124 | image: content, 125 | }); 126 | }); 127 | 128 | /* placeholder for response */ 129 | startResponse(); 130 | 131 | const chunks = []; 132 | const reasonChunks = []; 133 | // Add a counter for tool calls to generate unique IDs 134 | let toolCallCounter = 0; 135 | provider.chatHistory.push(chatMessage); 136 | const modelName = provider.model ? provider.model : "gpt-4o"; 137 | var inputs: any = { 138 | system: provider.modelConfig.systemPrompt, 139 | model: provider.apiChat, 140 | messages: provider.chatHistory, 141 | abortSignal: provider.abortController?.signal, 142 | tools: provider.toolSet?.tools || undefined, 143 | maxSteps: provider.maxSteps, 144 | headers: getHeaders(), 145 | ...(isOpenAIOModel(modelName) && { 146 | providerOptions: { 147 | openai: { 148 | reasoningSummary: "auto", 149 | reasoningEffort: provider.reasoningEffort, 150 | ...(provider.modelConfig.maxTokens > 0 && { 151 | maxCompletionTokens: provider.modelConfig.maxTokens, 152 | }), 153 | }, 154 | }, 155 | }), 156 | ...(!isOpenAIOModel(modelName) && { 157 | maxTokens: provider.modelConfig.maxTokens > 0 ? provider.modelConfig.maxTokens : undefined, 158 | temperature: provider.modelConfig.temperature, 159 | // topP: provider.modelConfig.topP, 160 | }), 161 | }; 162 | const result = await streamText(inputs); 163 | for await (const part of result.fullStream) { 164 | // logger.appendLine(`INFO: chatgpt.model: ${provider.model} chatgpt.question: ${question.trim()} response: ${JSON.stringify(part, null, 2)}`); 165 | switch (part.type) { 166 | case "text-delta": { 167 | updateResponse(part.textDelta); 168 | chunks.push(part.textDelta); 169 | break; 170 | } 171 | case "reasoning": { 172 | updateReasoning(part.textDelta, 1); // Main chat only has one reasoning round 173 | reasonChunks.push(part.textDelta); 174 | break; 175 | } 176 | case "tool-call": { 177 | let formattedArgs = part.args; 178 | if (typeof formattedArgs === 'string') { 179 | try { 180 | formattedArgs = JSON.parse(formattedArgs); 181 | } catch (e) { 182 | // If parsing fails, use the original string 183 | // @ts-ignore 184 | formattedArgs = part.args; 185 | } 186 | } 187 | 188 | // Generate a unique ID for this tool call 189 | toolCallCounter++; 190 | const toolCallId = `tool-call-${Date.now()}-${toolCallCounter}`; 191 | 192 | // Create tool icon based on the tool name 193 | const toolIcon = ` 194 | 195 | 196 | `; 197 | 198 | // Create an enhanced collapsible HTML structure for the tool call 199 | const toolCallHtml = ` 200 |
201 |
202 | 203 | 204 | 205 |
206 | ${toolIcon} 207 | ${part.toolName} 208 |
209 | Running 210 |
211 | 226 |
`; 227 | 228 | updateResponse(toolCallHtml); 229 | chunks.push(toolCallHtml); 230 | break; 231 | } 232 | 233 | // @ts-ignore; 234 | case "tool-result": { 235 | // @ts-ignore 236 | logger.appendLine(`INFO: Tool ${part.toolName} result received: ${JSON.stringify(part.result)}`); 237 | 238 | // @ts-ignore 239 | let formattedResult = part.result; 240 | if (typeof formattedResult === 'string') { 241 | try { 242 | formattedResult = JSON.parse(formattedResult); 243 | } catch (e) { 244 | // If parsing fails, use the original string 245 | // @ts-ignore 246 | formattedResult = part.result; 247 | } 248 | } 249 | // Create a special marker for tool results that will be processed by tool-call.js 250 | // @ts-ignore 251 | // Store the complete result object with full structure to allow proper extraction in tool-call.js 252 | const toolResultText = ` 253 | ${JSON.stringify(formattedResult)} 254 | `; 255 | 256 | updateResponse(toolResultText); 257 | chunks.push(toolResultText); 258 | break; 259 | } 260 | 261 | case "error": { 262 | // raise the error to be caught by the catch block 263 | throw new Error(`${part.error}`); 264 | } 265 | 266 | 267 | default: { 268 | logger.appendLine( 269 | `INFO: chatgpt.model: ${provider.model}, chatgpt.question: ${question.trim()}, debug response: ${JSON.stringify(part)}`, 270 | ); 271 | break; 272 | } 273 | } 274 | } 275 | 276 | provider.response = chunks.join(""); 277 | if (reasonChunks.join("") != "") { 278 | provider.reasoning = reasonChunks.join(""); 279 | } 280 | const reasoning = await result.reasoning; 281 | if (reasoning && reasoning != "") { 282 | provider.reasoning = reasoning; 283 | updateReasoning(reasoning, 1); // Main chat only has one reasoning round 284 | } 285 | 286 | // Save both the text response and tool calls in the chat history 287 | const assistantResponse: any = { 288 | role: "assistant", 289 | content: chunks.join("") 290 | }; 291 | provider.chatHistory.push(assistantResponse); 292 | 293 | logger.appendLine( 294 | `INFO: chatgpt.model: ${provider.model}, chatgpt.question: ${question.trim()}, final response: ${provider.response}`, 295 | ); 296 | } catch (error) { 297 | logger.appendLine( 298 | `ERROR: chatgpt.model: ${provider.model} error: ${error}, backtrace: ${new Error().stack}`, 299 | ); 300 | provider.sendMessage({ 301 | type: "addError", 302 | value: `Error: ${error}`, 303 | autoScroll: provider.autoScroll, 304 | }); 305 | } 306 | } 307 | -------------------------------------------------------------------------------- /src/prompt-based-chat.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @author Pengfei Ni 3 | * 4 | * @license 5 | * Copyright (c) 2024 - Present, Pengfei Ni 6 | * 7 | * All rights reserved. Code licensed under the ISC license 8 | * 9 | * The above copyright notice and this permission notice shall be included in all 10 | * copies or substantial portions of the Software. 11 | */ 12 | 13 | import { CoreMessage, streamText } from "ai"; 14 | import * as vscode from "vscode"; 15 | import ChatGptViewProvider from "./chatgpt-view-provider"; 16 | import { logger } from "./logger"; 17 | import { getHeaders } from "./model-config"; 18 | import { 19 | executePromptToolCall, 20 | generateToolDescriptions 21 | } from "./prompt-based-tools"; 22 | import { ToolCallParser } from "./tool-call-parser"; 23 | import { isOpenAIOModel, PromptBasedToolConfig } from "./types"; 24 | 25 | /** 26 | * Get prompt-based tool configuration from VSCode settings 27 | */ 28 | function getPromptBasedToolConfig(): PromptBasedToolConfig { 29 | const configuration = vscode.workspace.getConfiguration("chatgpt"); 30 | 31 | return { 32 | enabled: configuration.get("promptBasedTools.enabled") || false, 33 | toolCallPattern: "", 34 | maxToolCalls: configuration.get("gpt3.maxSteps") || 15, 35 | }; 36 | } 37 | 38 | /** 39 | * Enhanced chatGpt function with prompt-based tool call support 40 | * Implements the complete tool call loop like AI SDK does automatically 41 | */ 42 | export async function chatGptWithPromptTools( 43 | provider: ChatGptViewProvider, 44 | question: string, 45 | images: Record, 46 | startResponse: () => void, 47 | updateResponse: (message: string) => void, 48 | updateReasoning: (message: string, roundNumber?: number) => void, 49 | ) { 50 | if (!provider.apiChat) { 51 | throw new Error("apiChat is undefined"); 52 | } 53 | 54 | try { 55 | logger.appendLine( 56 | `INFO: chatgpt.model: ${provider.model} chatgpt.question: ${question.trim()}`, 57 | ); 58 | 59 | const promptToolConfig = getPromptBasedToolConfig(); 60 | const modelName = provider.model ? provider.model : "gpt-4o"; 61 | logger.appendLine(`INFO: Using prompt-based tools: ${promptToolConfig.enabled}, model: ${modelName}`); 62 | 63 | var chatMessage: CoreMessage = { 64 | role: "user", 65 | content: [ 66 | { 67 | type: "text", 68 | text: question, 69 | }, 70 | ], 71 | }; 72 | Object.entries(images).forEach(([_, content]) => { 73 | (chatMessage.content as any[]).push({ 74 | type: "image", 75 | image: content, 76 | }); 77 | }); 78 | 79 | /* placeholder for response */ 80 | startResponse(); 81 | 82 | const chunks: string[] = []; 83 | const reasonChunks: string[] = []; 84 | let toolCallCounter = 0; 85 | provider.chatHistory.push(chatMessage); 86 | 87 | // Prepare system prompt with tool descriptions if using prompt-based tools 88 | let systemPrompt = provider.modelConfig.systemPrompt; 89 | if (promptToolConfig.enabled && provider.toolSet) { 90 | const toolDescriptions = generateToolDescriptions(provider.toolSet); 91 | if (toolDescriptions) { 92 | systemPrompt = systemPrompt ? `${systemPrompt}\n\n${toolDescriptions}` : toolDescriptions; 93 | logger.appendLine(`INFO: Added tool descriptions to system prompt`); 94 | } 95 | } 96 | 97 | // Implement tool call loop for prompt-based tools (like AI SDK does automatically) 98 | if (promptToolConfig.enabled && provider.toolSet) { 99 | toolCallCounter = await executePromptBasedToolLoop( 100 | provider, 101 | systemPrompt, 102 | modelName, 103 | chunks, 104 | reasonChunks, 105 | toolCallCounter, 106 | updateResponse, 107 | updateReasoning 108 | ); 109 | } else { 110 | // Use standard AI SDK with native tools 111 | toolCallCounter = await executeStandardChat( 112 | provider, 113 | systemPrompt, 114 | modelName, 115 | chunks, 116 | reasonChunks, 117 | toolCallCounter, 118 | updateResponse, 119 | updateReasoning 120 | ); 121 | } 122 | 123 | provider.response = chunks.join(""); 124 | if (reasonChunks.join("") != "") { 125 | provider.reasoning = reasonChunks.join(""); 126 | } 127 | 128 | logger.appendLine( 129 | `INFO: chatgpt.model: ${provider.model}, chatgpt.question: ${question.trim()}, final response: ${provider.response}`, 130 | ); 131 | } catch (error) { 132 | logger.appendLine( 133 | `ERROR: chatgpt.model: ${provider.model} error: ${error}, backtrace: ${new Error().stack}`, 134 | ); 135 | provider.sendMessage({ 136 | type: "addError", 137 | value: `Error: ${error}`, 138 | autoScroll: provider.autoScroll, 139 | }); 140 | } 141 | } 142 | 143 | /** 144 | * Execute prompt-based tool loop (mimics AI SDK's automatic tool calling) 145 | */ 146 | async function executePromptBasedToolLoop( 147 | provider: ChatGptViewProvider, 148 | systemPrompt: string, 149 | modelName: string, 150 | chunks: string[], 151 | reasonChunks: string[], 152 | toolCallCounter: number, 153 | updateResponse: (message: string) => void, 154 | updateReasoning: (message: string, roundNumber?: number) => void 155 | ): Promise { 156 | const maxSteps = provider.maxSteps || 15; 157 | let currentStep = 0; 158 | let conversationHistory = [...provider.chatHistory]; 159 | 160 | while (currentStep < maxSteps) { 161 | currentStep++; 162 | logger.appendLine(`INFO: Prompt-based tool loop step ${currentStep}/${maxSteps}`); 163 | 164 | // Make API call 165 | const inputs: any = { 166 | system: systemPrompt, 167 | model: provider.apiChat, 168 | messages: conversationHistory, 169 | abortSignal: provider.abortController?.signal, 170 | maxSteps: 1, // Single step for manual control 171 | headers: getHeaders(), 172 | ...(isOpenAIOModel(modelName) && { 173 | providerOptions: { 174 | openai: { 175 | reasoningSummary: "auto", 176 | reasoningEffort: provider.reasoningEffort, 177 | ...(provider.modelConfig.maxTokens > 0 && { 178 | maxCompletionTokens: provider.modelConfig.maxTokens, 179 | }), 180 | }, 181 | }, 182 | }), 183 | ...(!isOpenAIOModel(modelName) && { 184 | maxTokens: provider.modelConfig.maxTokens > 0 ? provider.modelConfig.maxTokens : undefined, 185 | temperature: provider.modelConfig.temperature, 186 | }), 187 | }; 188 | 189 | const result = await streamText(inputs); 190 | let accumulatedText = ""; 191 | let stepChunks: string[] = []; 192 | 193 | // Process streaming response 194 | for await (const part of result.fullStream) { 195 | switch (part.type) { 196 | case "text-delta": { 197 | accumulatedText += part.textDelta; 198 | updateResponse(part.textDelta); 199 | chunks.push(part.textDelta); 200 | stepChunks.push(part.textDelta); 201 | break; 202 | } 203 | case "reasoning": { 204 | updateReasoning(part.textDelta, currentStep); 205 | reasonChunks.push(part.textDelta); 206 | break; 207 | } 208 | case "error": { 209 | throw new Error(`${part.error}`); 210 | } 211 | default: { 212 | logger.appendLine( 213 | `INFO: prompt-based-tools step ${currentStep}: ${JSON.stringify(part)}`, 214 | ); 215 | break; 216 | } 217 | } 218 | } 219 | 220 | // Check for tool calls in the accumulated text 221 | const toolCalls = ToolCallParser.parseToolCalls(accumulatedText); 222 | 223 | if (toolCalls.length === 0) { 224 | // No tool calls found, conversation is complete 225 | logger.appendLine(`INFO: No tool calls found in step ${currentStep}, ending loop`); 226 | break; 227 | } 228 | 229 | // Execute tool calls and add results to conversation 230 | const toolResults: any[] = []; 231 | for (const toolCall of toolCalls) { 232 | toolCallCounter++; // Increment counter for each tool call 233 | 234 | // Create tool call UI (exactly like native tool calls) 235 | const toolCallHtml = createPromptToolCallHtml(toolCall, toolCallCounter); 236 | updateResponse(toolCallHtml); 237 | chunks.push(toolCallHtml); 238 | 239 | // Execute tool 240 | const result = await executePromptToolCall(toolCall, provider.toolSet!); 241 | toolResults.push(result); 242 | 243 | // Create tool result UI (exactly like native tool calls) 244 | const toolResultHtml = createPromptToolResultHtml(result, toolCallCounter); 245 | updateResponse(toolResultHtml); 246 | chunks.push(toolResultHtml); 247 | 248 | logger.appendLine(`INFO: Tool ${toolCall.toolName} executed with result: ${JSON.stringify(result.result)}`); 249 | } 250 | 251 | // Add assistant response with tool calls to conversation history 252 | const assistantMessage: CoreMessage = { 253 | role: "assistant", 254 | content: stepChunks.join(""), 255 | }; 256 | conversationHistory.push(assistantMessage); 257 | 258 | // Add tool results as user messages (this is how AI SDK does it) 259 | for (const result of toolResults) { 260 | const toolResultMessage: CoreMessage = { 261 | role: "user", 262 | content: `Tool ${result.toolName} result: ${JSON.stringify(result.result)}` 263 | }; 264 | conversationHistory.push(toolResultMessage); 265 | } 266 | 267 | // Continue the loop for the next step 268 | } 269 | 270 | // Update provider's chat history with the final conversation 271 | provider.chatHistory = conversationHistory; 272 | return toolCallCounter; 273 | } 274 | 275 | /** 276 | * Execute standard chat with native AI SDK tools 277 | */ 278 | async function executeStandardChat( 279 | provider: ChatGptViewProvider, 280 | systemPrompt: string, 281 | modelName: string, 282 | chunks: string[], 283 | reasonChunks: string[], 284 | toolCallCounter: number, 285 | updateResponse: (message: string) => void, 286 | updateReasoning: (message: string, roundNumber?: number) => void 287 | ): Promise { 288 | const inputs: any = { 289 | system: systemPrompt, 290 | model: provider.apiChat, 291 | messages: provider.chatHistory, 292 | abortSignal: provider.abortController?.signal, 293 | tools: provider.toolSet?.tools || undefined, 294 | maxSteps: provider.maxSteps, 295 | headers: getHeaders(), 296 | ...(isOpenAIOModel(modelName) && { 297 | providerOptions: { 298 | openai: { 299 | reasoningSummary: "auto", 300 | reasoningEffort: provider.reasoningEffort, 301 | ...(provider.modelConfig.maxTokens > 0 && { 302 | maxCompletionTokens: provider.modelConfig.maxTokens, 303 | }), 304 | }, 305 | }, 306 | }), 307 | ...(!isOpenAIOModel(modelName) && { 308 | maxTokens: provider.modelConfig.maxTokens > 0 ? provider.modelConfig.maxTokens : undefined, 309 | temperature: provider.modelConfig.temperature, 310 | }), 311 | }; 312 | 313 | const result = await streamText(inputs); 314 | for await (const part of result.fullStream) { 315 | switch (part.type) { 316 | case "text-delta": { 317 | updateResponse(part.textDelta); 318 | chunks.push(part.textDelta); 319 | break; 320 | } 321 | case "reasoning": { 322 | updateReasoning(part.textDelta, 1); // Standard chat only has one reasoning round 323 | reasonChunks.push(part.textDelta); 324 | break; 325 | } 326 | case "tool-call": { 327 | toolCallCounter++; 328 | const toolCallHtml = createToolCallHtml(part, toolCallCounter); 329 | updateResponse(toolCallHtml); 330 | chunks.push(toolCallHtml); 331 | break; 332 | } 333 | case "error": { 334 | throw new Error(`${part.error}`); 335 | } 336 | default: { 337 | logger.appendLine( 338 | `INFO: standard chat: ${JSON.stringify(part)}`, 339 | ); 340 | break; 341 | } 342 | } 343 | } 344 | 345 | // Add final assistant response to chat history 346 | const assistantResponse: CoreMessage = { 347 | role: "assistant", 348 | content: chunks.join("") 349 | }; 350 | provider.chatHistory.push(assistantResponse); 351 | 352 | return toolCallCounter; 353 | } 354 | 355 | /** 356 | * Create HTML for native tool calls 357 | */ 358 | function createToolCallHtml(part: any, toolCallCounter: number): string { 359 | let formattedArgs = part.args; 360 | if (typeof formattedArgs === 'string') { 361 | try { 362 | formattedArgs = JSON.parse(formattedArgs); 363 | } catch (e) { 364 | formattedArgs = part.args; 365 | } 366 | } 367 | 368 | const toolCallId = `tool-call-${Date.now()}-${toolCallCounter}`; 369 | const toolIcon = ` 370 | 371 | 372 | `; 373 | 374 | return ` 375 |
376 |
377 | 378 | 379 | 380 |
381 | ${toolIcon} 382 | ${part.toolName} 383 |
384 | Running 385 |
386 | 401 |
`; 402 | } 403 | 404 | /** 405 | * Create HTML for prompt-based tool calls 406 | */ 407 | function createPromptToolCallHtml(toolCall: any, toolCallCounter: number): string { 408 | const toolCallId = `prompt-tool-call-${Date.now()}-${toolCallCounter}`; 409 | const toolIcon = ` 410 | 411 | 412 | `; 413 | 414 | return ` 415 |
416 |
417 | 418 | 419 | 420 |
421 | ${toolIcon} 422 | ${toolCall.toolName} (prompt-based) 423 |
424 | Running 425 |
426 | 441 |
`; 442 | } 443 | 444 | /** 445 | * Create HTML for prompt-based tool results 446 | */ 447 | function createPromptToolResultHtml(result: any, toolCallCounter: number): string { 448 | return ` 449 | ${JSON.stringify(result.result)} 450 | `; 451 | } 452 | -------------------------------------------------------------------------------- /src/prompt-based-tools.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @author Pengfei Ni 3 | * 4 | * @license 5 | * Copyright (c) 2024 - Present, Pengfei Ni 6 | * 7 | * All rights reserved. Code licensed under the ISC license 8 | * 9 | * The above copyright notice and this permission notice shall be included in all 10 | * copies or substantial portions of the Software. 11 | */ 12 | 13 | import { logger } from "./logger"; 14 | import { ToolSet } from "./mcp"; 15 | import { PromptBasedToolCall, PromptBasedToolConfig, PromptBasedToolResult } from "./types"; 16 | 17 | /** 18 | * Default configuration for prompt-based tool calls 19 | */ 20 | export const DEFAULT_PROMPT_TOOL_CONFIG: PromptBasedToolConfig = { 21 | enabled: false, 22 | toolCallPattern: "", 23 | maxToolCalls: 10, 24 | }; 25 | 26 | /** 27 | * Generates tool descriptions for inclusion in system prompts 28 | */ 29 | export function generateToolDescriptions(toolSet: ToolSet): string { 30 | if (!toolSet?.tools || Object.keys(toolSet.tools).length === 0) { 31 | return ""; 32 | } 33 | 34 | const toolDescriptions = Object.entries(toolSet.tools).map(([name, tool]) => { 35 | const description = tool.description || name; 36 | const parameters = tool.parameters || {}; 37 | 38 | // Extract parameter information 39 | const paramInfo = Object.entries(parameters.properties || {}).map(([paramName, paramDef]: [string, any]) => { 40 | const required = parameters.required?.includes(paramName) ? " (required)" : " (optional)"; 41 | const type = paramDef.type || "any"; 42 | const desc = paramDef.description || ""; 43 | return ` - ${paramName} (${type})${required}: ${desc}`; 44 | }).join("\n"); 45 | 46 | return `**${name}**: ${description} 47 | Parameters: 48 | ${paramInfo || " No parameters"}`; 49 | }).join("\n\n"); 50 | 51 | return `# Available Tools 52 | 53 | You have access to the following tools. To use a tool, format your response with the tool call syntax shown below: 54 | 55 | 56 | function_name 57 | 58 | { 59 | "param1": "value1", 60 | "param2": "value2" 61 | } 62 | 63 | 64 | 65 | ## Tools: 66 | 67 | ${toolDescriptions} 68 | 69 | Important notes: 70 | - You can make multiple tool calls in a single response 71 | - Always use valid JSON for arguments 72 | - Tool calls will be executed and results will be provided 73 | - Continue your response after tool calls with analysis of the results 74 | `; 75 | } 76 | 77 | /** 78 | * Parses tool calls from AI response text 79 | */ 80 | export function parseToolCalls(text: string): PromptBasedToolCall[] { 81 | const toolCalls: PromptBasedToolCall[] = []; 82 | 83 | // Regex to match tool call blocks 84 | const toolCallRegex = /\s*(.*?)<\/tool_name>\s*(.*?)<\/arguments>\s*<\/tool_call>/gs; 85 | 86 | let match; 87 | let callCounter = 0; 88 | 89 | while ((match = toolCallRegex.exec(text)) !== null && callCounter < DEFAULT_PROMPT_TOOL_CONFIG.maxToolCalls) { 90 | const [fullMatch, toolName, argumentsText] = match; 91 | 92 | try { 93 | const toolNameTrimmed = toolName.trim(); 94 | const argumentsJson = argumentsText.trim(); 95 | 96 | let parsedArguments: Record = {}; 97 | if (argumentsJson) { 98 | try { 99 | parsedArguments = JSON.parse(argumentsJson); 100 | } catch (parseError) { 101 | logger.appendLine(`WARN: Failed to parse tool arguments for ${toolNameTrimmed}: ${parseError}`); 102 | // Try to extract simple key-value pairs as fallback 103 | parsedArguments = extractSimpleArguments(argumentsJson); 104 | } 105 | } 106 | 107 | const toolCall: PromptBasedToolCall = { 108 | id: `prompt-tool-${Date.now()}-${callCounter}`, 109 | toolName: toolNameTrimmed, 110 | arguments: parsedArguments, 111 | rawText: fullMatch, 112 | }; 113 | 114 | toolCalls.push(toolCall); 115 | callCounter++; 116 | 117 | } catch (error) { 118 | logger.appendLine(`ERROR: Failed to parse tool call: ${error}`); 119 | } 120 | } 121 | 122 | return toolCalls; 123 | } 124 | 125 | /** 126 | * Fallback argument parser for non-JSON formats 127 | */ 128 | function extractSimpleArguments(text: string): Record { 129 | const args: Record = {}; 130 | 131 | // Try to extract key: value pairs 132 | const keyValueRegex = /(\w+):\s*["']?([^"'\n,}]+)["']?/g; 133 | let match; 134 | 135 | while ((match = keyValueRegex.exec(text)) !== null) { 136 | const [, key, value] = match; 137 | args[key.trim()] = value.trim(); 138 | } 139 | 140 | return args; 141 | } 142 | 143 | /** 144 | * Executes a prompt-based tool call 145 | */ 146 | export async function executePromptToolCall( 147 | toolCall: PromptBasedToolCall, 148 | toolSet: ToolSet 149 | ): Promise { 150 | try { 151 | const tool = toolSet.tools[toolCall.toolName]; 152 | 153 | if (!tool) { 154 | return { 155 | id: toolCall.id, 156 | toolName: toolCall.toolName, 157 | result: null, 158 | error: `Tool '${toolCall.toolName}' not found`, 159 | }; 160 | } 161 | 162 | logger.appendLine(`INFO: Executing prompt-based tool call: ${toolCall.toolName}`); 163 | 164 | // Execute the tool 165 | if (!tool.execute) { 166 | return { 167 | id: toolCall.id, 168 | toolName: toolCall.toolName, 169 | result: null, 170 | error: `Tool '${toolCall.toolName}' has no execute function`, 171 | }; 172 | } 173 | 174 | const result = await tool.execute(toolCall.arguments, { 175 | toolCallId: toolCall.id, 176 | messages: [] 177 | }); 178 | 179 | return { 180 | id: toolCall.id, 181 | toolName: toolCall.toolName, 182 | result: result, 183 | }; 184 | 185 | } catch (error) { 186 | logger.appendLine(`ERROR: Tool execution failed for ${toolCall.toolName}: ${error}`); 187 | 188 | return { 189 | id: toolCall.id, 190 | toolName: toolCall.toolName, 191 | result: null, 192 | error: error instanceof Error ? error.message : String(error), 193 | }; 194 | } 195 | } 196 | 197 | /** 198 | * Processes text to find and execute tool calls, returning updated text with results 199 | */ 200 | export async function processPromptBasedToolCalls( 201 | text: string, 202 | toolSet: ToolSet, 203 | onToolCall?: (toolCall: PromptBasedToolCall) => void, 204 | onToolResult?: (result: PromptBasedToolResult) => void 205 | ): Promise<{ updatedText: string; toolCalls: PromptBasedToolCall[]; results: PromptBasedToolResult[]; }> { 206 | 207 | const toolCalls = parseToolCalls(text); 208 | const results: PromptBasedToolResult[] = []; 209 | let updatedText = text; 210 | 211 | if (toolCalls.length === 0) { 212 | return { updatedText, toolCalls, results }; 213 | } 214 | 215 | logger.appendLine(`INFO: Found ${toolCalls.length} prompt-based tool calls`); 216 | 217 | // Execute tool calls sequentially 218 | for (const toolCall of toolCalls) { 219 | // Notify about tool call 220 | if (onToolCall) { 221 | onToolCall(toolCall); 222 | } 223 | 224 | // Execute the tool 225 | const result = await executePromptToolCall(toolCall, toolSet); 226 | results.push(result); 227 | 228 | // Notify about result 229 | if (onToolResult) { 230 | onToolResult(result); 231 | } 232 | } 233 | 234 | return { updatedText, toolCalls, results }; 235 | } 236 | -------------------------------------------------------------------------------- /src/prompt-manager-provider.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @author Pengfei Ni 3 | * 4 | * @license 5 | * Copyright (c) 2024 - Present, Pengfei Ni 6 | * 7 | * All rights reserved. Code licensed under the ISC license 8 | * 9 | * The above copyright notice and this permission notice shall be included in all 10 | * copies or substantial portions of the Software. 11 | */ 12 | import * as vscode from "vscode"; 13 | import { Prompt, PromptStore } from "./types"; 14 | 15 | export default class PromptManagerProvider 16 | implements vscode.WebviewViewProvider 17 | { 18 | private webView?: vscode.WebviewView; 19 | private store: PromptStore = { prompts: [] }; 20 | private _panel?: vscode.WebviewPanel; 21 | 22 | constructor(private context: vscode.ExtensionContext) { 23 | this.loadPrompts(); 24 | } 25 | 26 | private loadPrompts() { 27 | this.store = this.context.globalState.get("prompts", { 28 | prompts: [], 29 | }); 30 | } 31 | 32 | private savePrompts() { 33 | this.context.globalState.update("prompts", this.store); 34 | } 35 | 36 | public resolveWebviewView( 37 | webviewView: vscode.WebviewView, 38 | _context: vscode.WebviewViewResolveContext, 39 | _token: vscode.CancellationToken, 40 | ) { 41 | this.webView = webviewView; 42 | webviewView.webview.options = { 43 | enableScripts: true, 44 | localResourceRoots: [this.context.extensionUri], 45 | }; 46 | 47 | webviewView.webview.html = this.getWebviewContent(webviewView.webview); 48 | 49 | webviewView.webview.onDidReceiveMessage(async (data) => { 50 | switch (data.type) { 51 | case "addPrompt": 52 | this.addPrompt(data.prompt); 53 | break; 54 | case "updatePrompt": 55 | this.updatePrompt(data.prompt); 56 | break; 57 | case "deletePrompt": 58 | this.deletePrompt(data.id); 59 | break; 60 | case "getPrompts": 61 | this.handleGetPrompts(webviewView.webview); 62 | break; 63 | } 64 | }); 65 | } 66 | 67 | public addPrompt(prompt: Omit) { 68 | const now = Date.now(); 69 | const newPrompt: Prompt = { 70 | id: this.generateId(), 71 | ...prompt, 72 | createdAt: now, 73 | updatedAt: now, 74 | }; 75 | this.store.prompts.push(newPrompt); 76 | this.savePrompts(); 77 | this.sendPromptsToAll(this.store.prompts); 78 | } 79 | 80 | public updatePrompt(prompt: Prompt) { 81 | const index = this.store.prompts.findIndex((p) => p.id === prompt.id); 82 | if (index !== -1) { 83 | this.store.prompts[index] = { 84 | ...prompt, 85 | updatedAt: Date.now(), 86 | }; 87 | this.savePrompts(); 88 | this.sendPromptsToAll(this.store.prompts); 89 | } 90 | } 91 | 92 | public deletePrompt(id: string) { 93 | this.store.prompts = this.store.prompts.filter((p) => p.id !== id); 94 | this.savePrompts(); 95 | this.sendPromptsToAll(this.store.prompts); 96 | } 97 | 98 | private sendPromptsToAll(prompts: Prompt[]) { 99 | this.webView?.webview.postMessage({ 100 | type: "updatePrompts", 101 | prompts: prompts, 102 | }); 103 | 104 | if (this._panel?.webview) { 105 | this._panel.webview.postMessage({ 106 | type: "updatePrompts", 107 | prompts: prompts, 108 | }); 109 | } 110 | } 111 | 112 | private generateId(): string { 113 | return Math.random().toString(36).substring(2) + Date.now().toString(36); 114 | } 115 | 116 | public getPrompts() { 117 | return this.store.prompts; 118 | } 119 | 120 | public setPanel(panel: vscode.WebviewPanel | undefined) { 121 | this._panel = panel; 122 | } 123 | 124 | public getWebviewContent(webview: vscode.Webview) { 125 | const scriptUri = webview.asWebviewUri( 126 | vscode.Uri.joinPath( 127 | this.context.extensionUri, 128 | "media", 129 | "prompt-manager.js", 130 | ), 131 | ); 132 | const stylesUri = webview.asWebviewUri( 133 | vscode.Uri.joinPath( 134 | this.context.extensionUri, 135 | "media", 136 | "prompt-manager.css", 137 | ), 138 | ); 139 | const tailwindUri = webview.asWebviewUri( 140 | vscode.Uri.joinPath( 141 | this.context.extensionUri, 142 | "media", 143 | "vendor", 144 | "tailwindcss.3.2.4.min.js", 145 | ), 146 | ); 147 | 148 | return ` 149 | 150 | 151 | 152 | 153 | 154 | 155 |
156 |
157 |
158 |
159 | 160 | 161 | 162 | 163 |

Prompt Manager

164 |
165 | 171 |
172 |
173 |
174 |
175 | 176 | 177 | `; 178 | } 179 | 180 | private handleGetPrompts(webview: vscode.Webview) { 181 | webview.postMessage({ 182 | type: "updatePrompts", 183 | prompts: this.store.prompts, 184 | }); 185 | } 186 | } 187 | -------------------------------------------------------------------------------- /src/tool-call-parser.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @author Pengfei Ni 3 | * 4 | * @license 5 | * Copyright (c) 2024 - Present, Pengfei Ni 6 | * 7 | * All rights reserved. Code licensed under the ISC license 8 | * 9 | * The above copyright notice and this permission notice shall be included in all 10 | * copies or substantial portions of the Software. 11 | */ 12 | 13 | import { logger } from "./logger"; 14 | import { PromptBasedToolCall } from "./types"; 15 | 16 | /** 17 | * Enhanced tool call parser that supports multiple formats 18 | */ 19 | export class ToolCallParser { 20 | private static readonly PATTERNS = { 21 | // Primary format: namejson 22 | XML_STYLE: /\s*(.*?)<\/tool_name>\s*(.*?)<\/arguments>\s*<\/tool_call>/gs, 23 | 24 | // Alternative format: ```tool_call\nname\njson\n``` 25 | MARKDOWN_STYLE: /```tool_call\s*\n([^\n]+)\n(.*?)\n```/gs, 26 | 27 | // Function call style: function_name({"param": "value"}) 28 | FUNCTION_STYLE: /(\w+)\s*\(\s*(\{.*?\})\s*\)/gs, 29 | 30 | // JSON style: {"tool": "name", "arguments": {...}} 31 | JSON_STYLE: /\{\s*"tool"\s*:\s*"([^"]+)"\s*,\s*"arguments"\s*:\s*(\{.*?\})\s*\}/gs, 32 | }; 33 | 34 | /** 35 | * Parse tool calls from text using multiple patterns 36 | */ 37 | static parseToolCalls(text: string, maxCalls: number = 10): PromptBasedToolCall[] { 38 | const toolCalls: PromptBasedToolCall[] = []; 39 | let callCounter = 0; 40 | 41 | // Try each pattern in order of preference 42 | for (const [patternName, pattern] of Object.entries(this.PATTERNS)) { 43 | if (callCounter >= maxCalls) break; 44 | 45 | const calls = this.parseWithPattern(text, pattern, patternName, callCounter, maxCalls); 46 | toolCalls.push(...calls); 47 | callCounter += calls.length; 48 | } 49 | 50 | // Remove duplicates based on tool name and arguments 51 | return this.removeDuplicates(toolCalls); 52 | } 53 | 54 | /** 55 | * Parse tool calls using a specific pattern 56 | */ 57 | private static parseWithPattern( 58 | text: string, 59 | pattern: RegExp, 60 | patternName: string, 61 | startCounter: number, 62 | maxCalls: number 63 | ): PromptBasedToolCall[] { 64 | const toolCalls: PromptBasedToolCall[] = []; 65 | let match; 66 | let callCounter = startCounter; 67 | 68 | // Reset regex lastIndex 69 | pattern.lastIndex = 0; 70 | 71 | while ((match = pattern.exec(text)) !== null && callCounter < maxCalls) { 72 | try { 73 | const toolCall = this.createToolCall(match, patternName, callCounter); 74 | if (toolCall) { 75 | toolCalls.push(toolCall); 76 | callCounter++; 77 | } 78 | } catch (error) { 79 | logger.appendLine(`WARN: Failed to parse tool call with ${patternName}: ${error}`); 80 | } 81 | } 82 | 83 | if (toolCalls.length > 0) { 84 | logger.appendLine(`INFO: Parsed ${toolCalls.length} tool calls using ${patternName} pattern`); 85 | } 86 | 87 | return toolCalls; 88 | } 89 | 90 | /** 91 | * Create a tool call object from regex match 92 | */ 93 | private static createToolCall( 94 | match: RegExpExecArray, 95 | patternName: string, 96 | counter: number 97 | ): PromptBasedToolCall | null { 98 | const [fullMatch, toolName, argumentsText] = match; 99 | 100 | if (!toolName?.trim()) { 101 | return null; 102 | } 103 | 104 | const toolNameTrimmed = toolName.trim(); 105 | let parsedArguments: Record = {}; 106 | 107 | if (argumentsText?.trim()) { 108 | try { 109 | parsedArguments = JSON.parse(argumentsText.trim()); 110 | } catch (parseError) { 111 | logger.appendLine(`WARN: Failed to parse JSON arguments for ${toolNameTrimmed}: ${parseError}`); 112 | 113 | // Try fallback parsing 114 | parsedArguments = this.extractFallbackArguments(argumentsText.trim()); 115 | } 116 | } 117 | 118 | return { 119 | id: `prompt-tool-${Date.now()}-${counter}`, 120 | toolName: toolNameTrimmed, 121 | arguments: parsedArguments, 122 | rawText: fullMatch, 123 | }; 124 | } 125 | 126 | /** 127 | * Extract arguments using fallback methods when JSON parsing fails 128 | */ 129 | private static extractFallbackArguments(text: string): Record { 130 | const args: Record = {}; 131 | 132 | // Try to extract key-value pairs in various formats 133 | const patterns = [ 134 | // key: "value" or key: 'value' 135 | /(\w+)\s*:\s*["']([^"']+)["']/g, 136 | // key: value (without quotes) 137 | /(\w+)\s*:\s*([^,}\n]+)/g, 138 | // "key": "value" 139 | /"(\w+)"\s*:\s*"([^"]+)"/g, 140 | ]; 141 | 142 | for (const pattern of patterns) { 143 | let match; 144 | pattern.lastIndex = 0; 145 | 146 | while ((match = pattern.exec(text)) !== null) { 147 | const [, key, value] = match; 148 | if (key && value) { 149 | args[key.trim()] = value.trim(); 150 | } 151 | } 152 | } 153 | 154 | return args; 155 | } 156 | 157 | /** 158 | * Remove duplicate tool calls 159 | */ 160 | private static removeDuplicates(toolCalls: PromptBasedToolCall[]): PromptBasedToolCall[] { 161 | const seen = new Set(); 162 | return toolCalls.filter(call => { 163 | const signature = `${call.toolName}:${JSON.stringify(call.arguments)}`; 164 | if (seen.has(signature)) { 165 | return false; 166 | } 167 | seen.add(signature); 168 | return true; 169 | }); 170 | } 171 | 172 | /** 173 | * Check if text contains potential tool calls 174 | */ 175 | static containsToolCalls(text: string): boolean { 176 | for (const pattern of Object.values(this.PATTERNS)) { 177 | pattern.lastIndex = 0; 178 | if (pattern.test(text)) { 179 | return true; 180 | } 181 | } 182 | return false; 183 | } 184 | 185 | /** 186 | * Extract text content without tool calls 187 | */ 188 | static extractTextWithoutToolCalls(text: string): string { 189 | let cleanText = text; 190 | 191 | for (const pattern of Object.values(this.PATTERNS)) { 192 | pattern.lastIndex = 0; 193 | cleanText = cleanText.replace(pattern, ''); 194 | } 195 | 196 | // Clean up extra whitespace 197 | return cleanText.replace(/\n\s*\n\s*\n/g, '\n\n').trim(); 198 | } 199 | 200 | /** 201 | * Validate tool call format 202 | */ 203 | static validateToolCall(toolCall: PromptBasedToolCall): { valid: boolean; errors: string[]; } { 204 | const errors: string[] = []; 205 | 206 | if (!toolCall.toolName) { 207 | errors.push("Tool name is required"); 208 | } 209 | 210 | if (typeof toolCall.arguments !== 'object' || toolCall.arguments === null) { 211 | errors.push("Arguments must be an object"); 212 | } 213 | 214 | if (!toolCall.id) { 215 | errors.push("Tool call ID is required"); 216 | } 217 | 218 | return { 219 | valid: errors.length === 0, 220 | errors, 221 | }; 222 | } 223 | 224 | /** 225 | * Format tool call for display 226 | */ 227 | static formatToolCallForDisplay(toolCall: PromptBasedToolCall): string { 228 | return `Tool: ${toolCall.toolName}\nArguments: ${JSON.stringify(toolCall.arguments, null, 2)}`; 229 | } 230 | 231 | /** 232 | * Generate tool call text in the standard format 233 | */ 234 | static generateToolCallText(toolName: string, arguments_: Record): string { 235 | return ` 236 | ${toolName} 237 | 238 | ${JSON.stringify(arguments_, null, 2)} 239 | 240 | `; 241 | } 242 | } 243 | -------------------------------------------------------------------------------- /src/types.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @author Pengfei Ni 3 | * 4 | * @license 5 | * Copyright (c) 2024 - Present, Pengfei Ni 6 | * 7 | * All rights reserved. Code licensed under the ISC license 8 | * 9 | * The above copyright notice and this permission notice shall be included in all 10 | * copies or substantial portions of the Software. 11 | */ 12 | export interface Prompt { 13 | id: string; 14 | name: string; 15 | content: string; 16 | createdAt: number; 17 | updatedAt: number; 18 | } 19 | 20 | export interface PromptStore { 21 | prompts: Prompt[]; 22 | } 23 | 24 | export function isOpenAIOModel(model: string) { 25 | const m = model.toLowerCase(); 26 | return ( 27 | m.includes("o1") || 28 | m.includes("o3") || 29 | m.includes("o4") 30 | ); 31 | } 32 | 33 | 34 | export function isReasoningModel(model: string) { 35 | const m = model.toLowerCase(); 36 | return ( 37 | isOpenAIOModel(model) || 38 | m.includes("deepseek-r1") || 39 | m.includes("reason") || 40 | m.includes("claude-3-7") || 41 | m.includes("qwen3") 42 | ); 43 | } 44 | 45 | // Prompt-based tool call types 46 | export interface PromptBasedToolCall { 47 | id: string; 48 | toolName: string; 49 | arguments: Record; 50 | rawText: string; 51 | } 52 | 53 | export interface PromptBasedToolResult { 54 | id: string; 55 | toolName: string; 56 | result: any; 57 | error?: string; 58 | } 59 | 60 | export interface PromptBasedToolConfig { 61 | enabled: boolean; 62 | toolCallPattern: string; 63 | maxToolCalls: number; 64 | } 65 | -------------------------------------------------------------------------------- /src/utils.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable eqeqeq */ 2 | /* eslint-disable @typescript-eslint/naming-convention */ 3 | /** 4 | * @author Pengfei Ni 5 | * 6 | * @license 7 | * Copyright (c) 2024 - Present, Pengfei Ni 8 | * 9 | * All rights reserved. Code licensed under the ISC license 10 | * 11 | * The above copyright notice and this permission notice shall be included in all 12 | * copies or substantial portions of the Software. 13 | */ 14 | export async function fetchOpenAI(url: RequestInfo | URL, options?: RequestInit): Promise { 15 | if (!options?.body) { 16 | return fetch(url, options); 17 | } 18 | 19 | // Parse the request body 20 | const body = JSON.parse(options.body as string); 21 | 22 | // Check if there are tools with functions 23 | if (body.tools?.length > 0) { 24 | body.tools = body.tools.map((tool: any) => { 25 | if (tool.type === 'function' && tool.function.strict) { 26 | // Remove the strict flag if present 27 | const { strict, ...functionWithoutStrict } = tool.function; 28 | return { 29 | ...tool, 30 | function: functionWithoutStrict 31 | }; 32 | } 33 | return tool; 34 | }); 35 | } 36 | 37 | // Create new options with modified body 38 | const newOptions = { 39 | ...options, 40 | body: JSON.stringify(body) 41 | }; 42 | 43 | console.log( 44 | `Body ${JSON.stringify( 45 | JSON.parse((newOptions?.body as string) || "{}"), 46 | null, 47 | 2 48 | )}`); 49 | 50 | // Make the actual fetch call 51 | return fetch(url, newOptions); 52 | } -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "module": "commonjs", 4 | "target": "ES2020", 5 | "lib": [ 6 | "ES2020" 7 | ], 8 | "sourceMap": true, 9 | "rootDir": "src", 10 | "strict": true, /* enable all strict type-checking options */ 11 | "esModuleInterop": true, 12 | /* Additional Checks */ 13 | // "noImplicitReturns": true, /* Report error when not all code paths in function return a value. */ 14 | // "noFallthroughCasesInSwitch": true, /* Report errors for fallthrough cases in switch statement. */ 15 | // "noUnusedParameters": true, /* Report errors on unused parameters. */ 16 | } 17 | } -------------------------------------------------------------------------------- /vsc-extension-quickstart.md: -------------------------------------------------------------------------------- 1 | # Welcome to your VS Code Extension 2 | 3 | ## What's in the folder 4 | 5 | * This folder contains all of the files necessary for your extension. 6 | * `package.json` - this is the manifest file in which you declare your extension and command. 7 | * The sample plugin registers a command and defines its title and command name. With this information VS Code can show the command in the command palette. It doesn’t yet need to load the plugin. 8 | * `src/extension.ts` - this is the main file where you will provide the implementation of your command. 9 | * The file exports one function, `activate`, which is called the very first time your extension is activated (in this case by executing the command). Inside the `activate` function we call `registerCommand`. 10 | * We pass the function containing the implementation of the command as the second parameter to `registerCommand`. 11 | 12 | ## Setup 13 | 14 | * install the recommended extensions (amodio.tsl-problem-matcher and dbaeumer.vscode-eslint) 15 | 16 | 17 | ## Get up and running straight away 18 | 19 | * Press `F5` to open a new window with your extension loaded. 20 | * Run your command from the command palette by pressing (`Ctrl+Shift+P` or `Cmd+Shift+P` on Mac) and typing `Hello World`. 21 | * Set breakpoints in your code inside `src/extension.ts` to debug your extension. 22 | * Find output from your extension in the debug console. 23 | 24 | ## Make changes 25 | 26 | * You can relaunch the extension from the debug toolbar after changing code in `src/extension.ts`. 27 | * You can also reload (`Ctrl+R` or `Cmd+R` on Mac) the VS Code window with your extension to load your changes. 28 | 29 | 30 | ## Explore the API 31 | 32 | * You can open the full set of our API when you open the file `node_modules/@types/vscode/index.d.ts`. 33 | 34 | ## Run tests 35 | 36 | * Open the debug viewlet (`Ctrl+Shift+D` or `Cmd+Shift+D` on Mac) and from the launch configuration dropdown pick `Extension Tests`. 37 | * Press `F5` to run the tests in a new window with your extension loaded. 38 | * See the output of the test result in the debug console. 39 | * Make changes to `src/test/suite/extension.test.ts` or create new test files inside the `test/suite` folder. 40 | * The provided test runner will only consider files matching the name pattern `**.test.ts`. 41 | * You can create folders inside the `test` folder to structure your tests any way you want. 42 | 43 | ## Go further 44 | 45 | * Reduce the extension size and improve the startup time by [bundling your extension](https://code.visualstudio.com/api/working-with-extensions/bundling-extension). 46 | * [Publish your extension](https://code.visualstudio.com/api/working-with-extensions/publishing-extension) on the VS Code extension marketplace. 47 | * Automate builds by setting up [Continuous Integration](https://code.visualstudio.com/api/working-with-extensions/continuous-integration). 48 | --------------------------------------------------------------------------------