├── .gitignore ├── .github ├── FUNDING.yml ├── copilot-instructions.md └── instructions │ └── ts.instructions.md ├── images └── icon.png ├── tsconfig.json ├── .vscode ├── tasks.json └── launch.json ├── SECURITY.md ├── src ├── state.ts ├── log.ts ├── http │ ├── auth.ts │ ├── routes │ │ ├── models.ts │ │ ├── health.ts │ │ └── chat.ts │ ├── utils.ts │ └── server.ts ├── config.ts ├── types │ ├── polka.d.ts │ └── openai-types.ts ├── extension.ts ├── models.ts ├── status.ts └── messages.ts ├── package.json ├── AGENTS.md ├── README.md └── LICENSE /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | out 3 | .vscode-test 4 | *.vsix 5 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: larsbaunwall -------------------------------------------------------------------------------- /images/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/larsbaunwall/vscode-copilot-bridge/HEAD/images/icon.png -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2020", 4 | "module": "commonjs", 5 | "lib": ["ES2020"], 6 | "outDir": "out", 7 | "rootDir": "src", 8 | "strict": true, 9 | "sourceMap": true, 10 | "esModuleInterop": true, 11 | "allowSyntheticDefaultImports": true, 12 | "forceConsistentCasingInFileNames": true, 13 | "types": ["node", "vscode"] 14 | }, 15 | "include": ["src/**/*.ts"] 16 | } 17 | -------------------------------------------------------------------------------- /.vscode/tasks.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "2.0.0", 3 | "tasks": [ 4 | { 5 | "type": "npm", 6 | "script": "compile", 7 | "group": "build", 8 | "problemMatcher": ["$tsc"], 9 | "label": "npm: compile" 10 | }, 11 | { 12 | "type": "npm", 13 | "script": "watch", 14 | "isBackground": true, 15 | "problemMatcher": [ 16 | "$tsc-watch" 17 | ], 18 | "label": "npm: watch" 19 | } 20 | ] 21 | } 22 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security & Compliance 2 | 3 | - Uses only the public **VS Code Language Model API** (`vscode.lm`). 4 | - Does **not** call, impersonate, or reverse-engineer private GitHub Copilot endpoints. 5 | - The HTTP server binds to **localhost** by default (non-configurable). 6 | - Mandatory bearer-token auth via `bridge.token`. 7 | - Rate and concurrency limits are available to preserve interactive editor usage. 8 | - No telemetry or prompt/response data is collected or transmitted by the author. -------------------------------------------------------------------------------- /src/state.ts: -------------------------------------------------------------------------------- 1 | import * as vscode from 'vscode'; 2 | import type { Server } from 'http'; 3 | 4 | export interface BridgeState { 5 | server?: Server; 6 | modelCache?: vscode.LanguageModelChat; // official API type 7 | statusBarItem?: vscode.StatusBarItem; 8 | output?: vscode.OutputChannel; 9 | running: boolean; 10 | activeRequests: number; 11 | lastReason?: string; 12 | modelAttempted?: boolean; // whether we've attempted to resolve a model yet 13 | } 14 | 15 | export const state: BridgeState = { 16 | running: false, 17 | activeRequests: 0, 18 | modelAttempted: false, 19 | }; 20 | -------------------------------------------------------------------------------- /src/log.ts: -------------------------------------------------------------------------------- 1 | import * as vscode from 'vscode'; 2 | import { state } from './state'; 3 | import { getBridgeConfig } from './config'; 4 | 5 | export const ensureOutput = (): void => { 6 | if (!state.output) { 7 | state.output = vscode.window.createOutputChannel('Copilot Bridge'); 8 | } 9 | }; 10 | 11 | export const info = (msg: string): void => { 12 | ensureOutput(); 13 | state.output?.appendLine(msg); 14 | }; 15 | 16 | export const verbose = (msg: string): void => { 17 | const cfg = getBridgeConfig(); 18 | if (!cfg.verbose) return; 19 | ensureOutput(); 20 | state.output?.appendLine(msg); 21 | }; 22 | 23 | export const error = (msg: string): void => { 24 | ensureOutput(); 25 | state.output?.appendLine(msg); 26 | }; 27 | -------------------------------------------------------------------------------- /src/http/auth.ts: -------------------------------------------------------------------------------- 1 | import type { IncomingMessage } from 'http'; 2 | 3 | // Cache the authorization header to avoid repeated concatenation 4 | let cachedToken = ''; 5 | let cachedAuthHeader = ''; 6 | 7 | /** 8 | * Checks if the request is authorized against the configured token. 9 | * Caches the full "Bearer " header to optimize hot path. 10 | */ 11 | export const isAuthorized = (req: IncomingMessage, token: string): boolean => { 12 | if (!token) { 13 | cachedToken = ''; 14 | cachedAuthHeader = ''; 15 | return false; 16 | } 17 | 18 | // Update cache if token changed 19 | if (token !== cachedToken) { 20 | cachedToken = token; 21 | cachedAuthHeader = `Bearer ${token}`; 22 | } 23 | 24 | return req.headers.authorization === cachedAuthHeader; 25 | }; 26 | -------------------------------------------------------------------------------- /src/config.ts: -------------------------------------------------------------------------------- 1 | import * as vscode from 'vscode'; 2 | 3 | export const LOOPBACK_HOST = '127.0.0.1' as const; 4 | 5 | export interface BridgeConfig { 6 | readonly enabled: boolean; 7 | readonly host: typeof LOOPBACK_HOST; 8 | readonly port: number; 9 | readonly token: string; 10 | readonly historyWindow: number; 11 | readonly verbose: boolean; 12 | readonly maxConcurrent: number; 13 | } 14 | 15 | export const getBridgeConfig = (): BridgeConfig => { 16 | const cfg = vscode.workspace.getConfiguration('bridge'); 17 | const resolved = { 18 | enabled: cfg.get('enabled', false), 19 | host: LOOPBACK_HOST, 20 | port: cfg.get('port', 0), 21 | token: cfg.get('token', '').trim(), 22 | historyWindow: cfg.get('historyWindow', 3), 23 | verbose: cfg.get('verbose', false), 24 | maxConcurrent: cfg.get('maxConcurrent', 1), 25 | } satisfies BridgeConfig; 26 | return resolved; 27 | }; 28 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.2.0", 3 | "configurations": [ 4 | { 5 | "name": "Run Extension (Copilot Bridge)", 6 | "type": "extensionHost", 7 | "request": "launch", 8 | "runtimeExecutable": "${execPath}", 9 | "args": [ 10 | "--extensionDevelopmentPath=${workspaceFolder}" 11 | ], 12 | "outFiles": [ 13 | "${workspaceFolder}/out/**/*.js" 14 | ], 15 | "preLaunchTask": "npm: compile", 16 | "env": { 17 | "BRIDGE_VERBOSE": "1" 18 | } 19 | }, 20 | { 21 | "name": "Run Extension + Attach Debug Server", 22 | "type": "extensionHost", 23 | "request": "launch", 24 | "runtimeExecutable": "${execPath}", 25 | "args": [ 26 | "--extensionDevelopmentPath=${workspaceFolder}" 27 | ], 28 | "outFiles": [ 29 | "${workspaceFolder}/out/**/*.js" 30 | ], 31 | "preLaunchTask": "npm: watch", 32 | "env": { 33 | "BRIDGE_VERBOSE": "1" 34 | } 35 | } 36 | ] 37 | } 38 | -------------------------------------------------------------------------------- /src/types/polka.d.ts: -------------------------------------------------------------------------------- 1 | declare module 'polka' { 2 | import { IncomingMessage, ServerResponse } from 'http'; 3 | 4 | export interface PolkaRequest extends IncomingMessage { 5 | params?: Record; 6 | } 7 | 8 | export type Next = () => void; 9 | export type Middleware = (req: PolkaRequest, res: ServerResponse, next: Next) => void; 10 | export type Handler = (req: PolkaRequest, res: ServerResponse) => void; 11 | 12 | export interface PolkaOptions { 13 | onError?: (err: Error, req: PolkaRequest, res: ServerResponse, next: Next) => void; 14 | onNoMatch?: (req: PolkaRequest, res: ServerResponse) => void; 15 | } 16 | 17 | export interface PolkaInstance { 18 | use(mw: Middleware): PolkaInstance; 19 | use(path: string, mw: Middleware): PolkaInstance; 20 | get(path: string, handler: Handler): PolkaInstance; 21 | post(path: string, handler: Handler): PolkaInstance; 22 | put(path: string, handler: Handler): PolkaInstance; 23 | delete(path: string, handler: Handler): PolkaInstance; 24 | listen(port: number, host: string, cb: () => void): void; 25 | server?: import('http').Server; 26 | } 27 | 28 | function polka(options?: PolkaOptions): PolkaInstance; 29 | export default polka; 30 | } 31 | -------------------------------------------------------------------------------- /src/http/routes/models.ts: -------------------------------------------------------------------------------- 1 | import { writeJson, writeErrorResponse } from '../utils'; 2 | import { listCopilotModels } from '../../models'; 3 | import { verbose } from '../../log'; 4 | import type { ServerResponse } from 'http'; 5 | 6 | interface ModelObject { 7 | readonly id: string; 8 | readonly object: 'model'; 9 | readonly created: number; 10 | readonly owned_by: string; 11 | readonly permission: readonly unknown[]; 12 | readonly root: string; 13 | readonly parent: null; 14 | } 15 | 16 | interface ModelsListResponse { 17 | readonly object: 'list'; 18 | readonly data: readonly ModelObject[]; 19 | } 20 | 21 | export const handleModelsRequest = async (res: ServerResponse): Promise => { 22 | try { 23 | const modelIds = await listCopilotModels(); 24 | verbose(`Models listed: ${modelIds.length} available`); 25 | 26 | const models: ModelObject[] = modelIds.map((id: string) => ({ 27 | id, 28 | object: 'model' as const, 29 | created: Math.floor(Date.now() / 1000), 30 | owned_by: 'copilot', 31 | permission: [], 32 | root: id, 33 | parent: null, 34 | })); 35 | 36 | const response: ModelsListResponse = { 37 | object: 'list', 38 | data: models, 39 | }; 40 | 41 | writeJson(res, 200, response); 42 | } catch (e) { 43 | const msg = e instanceof Error ? e.message : String(e); 44 | verbose(`Models request failed: ${msg}`); 45 | writeErrorResponse(res, 500, msg || 'Failed to list models', 'server_error', 'internal_error'); 46 | } 47 | }; 48 | -------------------------------------------------------------------------------- /src/types/openai-types.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * OpenAI API compatible types for request and response handling 3 | */ 4 | 5 | export interface OpenAIToolCall { 6 | readonly id: string; 7 | readonly type: 'function'; 8 | readonly function: { 9 | readonly name: string; 10 | readonly arguments: string; 11 | }; 12 | } 13 | 14 | export interface OpenAIMessage { 15 | readonly role: 'assistant'; 16 | readonly content: string | null; 17 | readonly tool_calls?: OpenAIToolCall[]; 18 | readonly function_call?: { 19 | readonly name: string; 20 | readonly arguments: string; 21 | }; 22 | } 23 | 24 | export interface OpenAIChoice { 25 | readonly index: number; 26 | readonly message?: OpenAIMessage; 27 | readonly delta?: Partial; 28 | readonly finish_reason: 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call' | null; 29 | } 30 | 31 | export interface OpenAIResponse { 32 | readonly id: string; 33 | readonly object: 'chat.completion' | 'chat.completion.chunk'; 34 | readonly created: number; 35 | readonly model: string; 36 | readonly choices: OpenAIChoice[]; 37 | readonly usage?: { 38 | readonly prompt_tokens: number; 39 | readonly completion_tokens: number; 40 | readonly total_tokens: number; 41 | }; 42 | } 43 | 44 | export interface ChatCompletionContext { 45 | readonly requestId: string; 46 | readonly modelName: string; 47 | readonly created: number; 48 | readonly hasTools: boolean; 49 | readonly isStreaming: boolean; 50 | } 51 | 52 | export interface ProcessedResponseData { 53 | readonly content: string; 54 | readonly toolCalls: OpenAIToolCall[]; 55 | readonly finishReason: OpenAIChoice['finish_reason']; 56 | } -------------------------------------------------------------------------------- /.github/copilot-instructions.md: -------------------------------------------------------------------------------- 1 | ## Copilot Usage Notes 2 | 3 | Always skim [AGENTS.md](../AGENTS.md) before making changes—the document is the single source of truth for architecture, performance targets, and workflow expectations. 4 | 5 | ### Hot-path rules 6 | 7 | - Reuse the helpers in `src/http/utils.ts` (`writeUnauthorized`, `writeNotFound`, `writeRateLimit`, `writeErrorResponse`) instead of hand-written JSON responses. 8 | - Preserve the SSE contract in `src/http/routes/chat.ts`: emit role chunk first, follow with `data: { ... }` payloads, and terminate with `data: [DONE]`. 9 | - When streaming, keep `socket.setNoDelay(true)` on the response socket to avoid latency regressions. 10 | - Honor `state.activeRequests` concurrency guard and return early 429s via `writeRateLimit`. 11 | 12 | ### Tool calling compatibility 13 | 14 | - `mergeTools` already merges deprecated `functions`; prefer extending it over new code paths. 15 | - The bridge treats `tool_choice: "required"` like `"auto"` and ignores `parallel_tool_calls`—reflect this limitation in docs if behavior changes. 16 | - Stream tool call deltas using `delta.tool_calls` chunks containing JSON-encoded argument strings. Downstream clients should replace, not append, argument fragments. 17 | 18 | ### Scope & contracts 19 | 20 | - Public endpoints are `/health`, `/v1/models`, `/v1/chat/completions`. Changing contracts requires README updates and a version bump. 21 | - Keep the bridge loopback-only unless a new configuration knob is explicitly approved. 22 | - Update configuration docs when introducing new `bridge.*` settings and run `npm run compile` before handing off changes. 23 | 24 | ### Workflow 25 | 26 | - Plan with the todo-list tool, keep diffs minimal, and avoid formatting unrelated regions. 27 | - Capture limitations or behavior differences (e.g., missing OpenAI response fields) in comments or docs so clients aren’t surprised. 28 | - Summarize reality after each change: what was touched, how it was verified, and any follow-ups. -------------------------------------------------------------------------------- /src/http/routes/health.ts: -------------------------------------------------------------------------------- 1 | import * as vscode from 'vscode'; 2 | import type { ServerResponse } from 'http'; 3 | import { writeJson } from '../utils'; 4 | import { hasLMApi, getModel } from '../../models'; 5 | import { state } from '../../state'; 6 | import { verbose } from '../../log'; 7 | 8 | interface HealthResponse { 9 | readonly ok: boolean; 10 | readonly api: string; 11 | readonly notes: string; 12 | readonly status: string; 13 | readonly copilot: string; 14 | readonly reason?: string; 15 | readonly version: string; 16 | readonly features: { 17 | readonly chat_completions: boolean; 18 | readonly streaming: boolean; 19 | readonly tool_calling: boolean; 20 | readonly function_calling: boolean; 21 | readonly models_list: boolean; 22 | }; 23 | readonly active_requests: number; 24 | readonly model_attempted?: boolean; 25 | } 26 | 27 | export const handleHealthCheck = async (res: ServerResponse, v: boolean): Promise => { 28 | const hasLM = hasLMApi(); 29 | 30 | // Attempt model resolution if cache is empty and verbose logging is enabled 31 | if (!state.modelCache && v) { 32 | verbose(`Health: model=${state.modelCache ? 'present' : 'missing'} lmApi=${hasLM ? 'ok' : 'missing'}`); 33 | try { 34 | await getModel(); 35 | } catch (e) { 36 | const msg = e instanceof Error ? e.message : String(e); 37 | verbose(`Health check model resolution failed: ${msg}`); 38 | } 39 | } 40 | 41 | const unavailableReason = state.modelCache 42 | ? undefined 43 | : (!hasLM ? 'missing_language_model_api' : (state.lastReason || 'copilot_model_unavailable')); 44 | 45 | const response: HealthResponse = { 46 | ok: true, 47 | api: hasLM ? 'vscode.lm' : 'missing_language_model_api', 48 | notes: "No direct Copilot endpoints; no token extraction", 49 | status: 'operational', 50 | copilot: state.modelCache ? 'ok' : 'unavailable', 51 | reason: unavailableReason, 52 | version: vscode.version, 53 | features: { 54 | chat_completions: true, 55 | streaming: true, 56 | tool_calling: true, 57 | function_calling: true, // deprecated but supported 58 | models_list: true 59 | }, 60 | active_requests: state.activeRequests, 61 | model_attempted: state.modelAttempted 62 | }; 63 | 64 | writeJson(res, 200, response); 65 | }; 66 | -------------------------------------------------------------------------------- /src/extension.ts: -------------------------------------------------------------------------------- 1 | import * as vscode from 'vscode'; 2 | import type { AddressInfo } from 'net'; 3 | import { getBridgeConfig } from './config'; 4 | import { state } from './state'; 5 | import { ensureOutput, verbose } from './log'; 6 | import { ensureStatusBar, updateStatus } from './status'; 7 | import { startServer, stopServer } from './http/server'; 8 | import { getModel } from './models'; 9 | 10 | export async function activate(ctx: vscode.ExtensionContext): Promise { 11 | ensureOutput(); 12 | ensureStatusBar(); 13 | state.statusBarItem!.text = 'Copilot Bridge: Disabled'; 14 | state.statusBarItem!.show(); 15 | ctx.subscriptions.push(state.statusBarItem!, state.output!); 16 | 17 | ctx.subscriptions.push(vscode.commands.registerCommand('bridge.enable', async () => { 18 | await startBridge(); 19 | await getModel(true); 20 | })); 21 | 22 | ctx.subscriptions.push(vscode.commands.registerCommand('bridge.disable', async () => { 23 | await stopBridge(); 24 | })); 25 | 26 | ctx.subscriptions.push(vscode.commands.registerCommand('bridge.status', async () => { 27 | const info = state.server?.address(); 28 | const bound = (info && typeof info === 'object' && 'address' in info && 'port' in info) 29 | ? `${(info as AddressInfo).address}:${(info as AddressInfo).port}` 30 | : 'n/a'; 31 | const config = getBridgeConfig(); 32 | const hasToken = config.token.length > 0; 33 | vscode.window.showInformationMessage( 34 | `Copilot Bridge: ${state.running ? 'Enabled' : 'Disabled'} | Bound: ${bound} | Token: ${hasToken ? 'Set (required)' : 'Missing (requests will 401)'}` 35 | ); 36 | })); 37 | 38 | ctx.subscriptions.push(vscode.workspace.onDidChangeConfiguration((event) => { 39 | if (!event.affectsConfiguration('bridge.token')) { 40 | return; 41 | } 42 | if (!state.statusBarItem) { 43 | return; 44 | } 45 | const kind: 'start' | 'error' | 'success' | 'disabled' = !state.running 46 | ? 'disabled' 47 | : state.modelCache 48 | ? 'success' 49 | : state.modelAttempted 50 | ? 'error' 51 | : 'start'; 52 | updateStatus(kind, { suppressLog: true }); 53 | })); 54 | 55 | const config = getBridgeConfig(); 56 | if (config.enabled) { 57 | await startBridge(); 58 | } 59 | } 60 | 61 | export async function deactivate(): Promise { 62 | await stopBridge(); 63 | } 64 | 65 | async function startBridge(): Promise { 66 | if (state.running) return; 67 | state.running = true; 68 | try { 69 | await startServer(); 70 | } catch (error) { 71 | state.running = false; 72 | state.lastReason = 'startup_failed'; 73 | updateStatus('error', { suppressLog: true }); 74 | if (error instanceof Error) { 75 | verbose(error.stack || error.message); 76 | } else { 77 | verbose(String(error)); 78 | } 79 | throw error; 80 | } 81 | } 82 | 83 | async function stopBridge(): Promise { 84 | if (!state.running) return; 85 | state.running = false; 86 | try { 87 | await stopServer(); 88 | } finally { 89 | state.server = undefined; 90 | state.modelCache = undefined; 91 | updateStatus('disabled'); 92 | verbose('Stopped'); 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "private": false, 3 | "icon": "images/icon.png", 4 | "name": "copilot-bridge", 5 | "displayName": "Copilot Bridge", 6 | "description": "Local OpenAI-compatible interface built on the public VS Code Language Model API (vscode.lm).", 7 | "version": "1.2.0", 8 | "publisher": "thinkability", 9 | "repository": { 10 | "type": "git", 11 | "url": "https://github.com/larsbaunwall/vscode-copilot-bridge.git" 12 | }, 13 | "author": "larsbaunwall", 14 | "engines": { 15 | "vscode": "^1.93.0" 16 | }, 17 | "license": "Apache License 2.0", 18 | "extensionKind": [ 19 | "ui" 20 | ], 21 | "capabilities": { 22 | "untrustedWorkspaces": { 23 | "supported": false 24 | }, 25 | "virtualWorkspaces": false 26 | }, 27 | "categories": [ 28 | "AI" 29 | ], 30 | "activationEvents": [ 31 | "onStartupFinished" 32 | ], 33 | "main": "./out/extension.js", 34 | "contributes": { 35 | "commands": [ 36 | { 37 | "command": "bridge.enable", 38 | "title": "Copilot Bridge: Enable" 39 | }, 40 | { 41 | "command": "bridge.disable", 42 | "title": "Copilot Bridge: Disable" 43 | }, 44 | { 45 | "command": "bridge.status", 46 | "title": "Copilot Bridge: Status" 47 | } 48 | ], 49 | "configuration": { 50 | "title": "Copilot Bridge", 51 | "properties": { 52 | "bridge.enabled": { 53 | "type": "boolean", 54 | "default": false, 55 | "description": "Start the Copilot Bridge automatically when VS Code starts. Uses only the public `vscode.lm` Language Model API." 56 | }, 57 | "bridge.port": { 58 | "type": "number", 59 | "default": 0, 60 | "description": "Port for the local HTTP server. 0 picks a random ephemeral port." 61 | }, 62 | "bridge.token": { 63 | "type": "string", 64 | "default": "", 65 | "description": "Bearer token required in every Authorization header. Leave empty to block access." 66 | }, 67 | "bridge.historyWindow": { 68 | "type": "number", 69 | "default": 3, 70 | "description": "Number of user/assistant turns to include (system message is kept separately)." 71 | }, 72 | "bridge.maxConcurrent": { 73 | "type": "number", 74 | "default": 1, 75 | "minimum": 1, 76 | "maximum": 4, 77 | "description": "Maximum concurrent /v1/chat/completions requests. Excess requests return 429." 78 | }, 79 | "bridge.verbose": { 80 | "type": "boolean", 81 | "default": false, 82 | "description": "Verbose logging to the 'Copilot Bridge' output channel." 83 | } 84 | } 85 | } 86 | }, 87 | "scripts": { 88 | "compile": "tsc -p .", 89 | "watch": "tsc -w -p .", 90 | "vscode:prepublish": "npm run compile", 91 | "package": "npx vsce package" 92 | }, 93 | "dependencies": { 94 | "polka": "^0.5.2" 95 | }, 96 | "devDependencies": { 97 | "@types/node": "^20.10.0", 98 | "@types/vscode": "^1.90.0", 99 | "@vscode/vsce": "^3.6.0", 100 | "typescript": "^5.4.0" 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /src/models.ts: -------------------------------------------------------------------------------- 1 | import * as vscode from 'vscode'; 2 | import { state } from './state'; 3 | import { updateStatus } from './status'; 4 | import { verbose } from './log'; 5 | 6 | // VS Code Language Model API (see selectChatModels docs in latest VS Code API reference) 7 | const hasLanguageModelAPI = (): boolean => typeof vscode.lm?.selectChatModels === 'function'; 8 | 9 | export const selectChatModels = async (family?: string): Promise => { 10 | const selector: vscode.LanguageModelChatSelector | undefined = family ? { family } : undefined; 11 | return vscode.lm.selectChatModels(selector); 12 | }; 13 | 14 | export const getModel = async (force = false, family?: string): Promise => { 15 | if (!force && state.modelCache && !family) return state.modelCache; 16 | 17 | // Mark that we've attempted at least one model fetch (affects status bar messaging) 18 | state.modelAttempted = true; 19 | 20 | const hasLM = hasLanguageModelAPI(); 21 | 22 | if (!hasLM) { 23 | if (!family) state.modelCache = undefined; 24 | state.lastReason = 'missing_language_model_api'; 25 | updateStatus('error'); 26 | verbose('VS Code Language Model API not available; update VS Code or enable proposed API (Insiders/F5/--enable-proposed-api).'); 27 | return undefined; 28 | } 29 | 30 | try { 31 | // Prefer selecting by vendor 'copilot' if no family specified to reduce unrelated models 32 | const models: vscode.LanguageModelChat[] = family 33 | ? await selectChatModels(family) 34 | : await vscode.lm.selectChatModels({ vendor: 'copilot' }); 35 | if (models.length === 0) { 36 | if (!family) state.modelCache = undefined; 37 | state.lastReason = family ? 'not_found' : 'copilot_model_unavailable'; 38 | updateStatus('error'); 39 | verbose(family ? `no models for family ${family}` : 'no copilot models available'); 40 | return undefined; 41 | } 42 | state.modelCache = models[0]; // keep first for now; future: choose by quality or family preference 43 | state.lastReason = undefined; 44 | updateStatus('success'); 45 | return state.modelCache; 46 | } catch (e: unknown) { 47 | handleModelSelectionError(e, family); 48 | return undefined; 49 | } 50 | }; 51 | 52 | export const handleModelSelectionError = (error: unknown, family?: string): void => { 53 | const msg = error instanceof Error ? error.message : String(error); 54 | if (/not found/i.test(msg) || /Unknown model family/i.test(msg)) { 55 | state.lastReason = 'not_found'; 56 | } else if (/No chat models/i.test(msg)) { 57 | state.lastReason = 'copilot_model_unavailable'; 58 | } else { 59 | state.lastReason = 'copilot_model_unavailable'; 60 | } 61 | updateStatus('error'); 62 | const fam = family ? ` family=${family}` : ''; 63 | verbose(`Model selection failed: ${msg}${fam}`); 64 | }; 65 | 66 | export const listCopilotModels = async (): Promise => { 67 | try { 68 | // Filter for Copilot models only, consistent with getModel behavior 69 | const models = await vscode.lm.selectChatModels({ vendor: 'copilot' }); 70 | const ids = models.map((m: vscode.LanguageModelChat) => { 71 | const normalized = m.family || m.id || m.name || 'copilot'; 72 | return `${normalized}`; 73 | }); 74 | return ids.length ? ids : ['copilot']; 75 | } catch { 76 | return ['copilot']; 77 | } 78 | }; 79 | 80 | export const hasLMApi = hasLanguageModelAPI; 81 | -------------------------------------------------------------------------------- /src/http/utils.ts: -------------------------------------------------------------------------------- 1 | import type { ServerResponse, IncomingMessage } from 'http'; 2 | 3 | export interface ErrorResponse { 4 | readonly error: { 5 | readonly message: string; 6 | readonly type: string; 7 | readonly code: string; 8 | readonly reason?: string; 9 | }; 10 | } 11 | 12 | // Pre-serialized common error responses for hot paths 13 | const UNAUTHORIZED_ERROR = JSON.stringify({ 14 | error: { 15 | message: 'unauthorized', 16 | type: 'invalid_request_error', 17 | code: 'unauthorized', 18 | }, 19 | }); 20 | 21 | const TOKEN_REQUIRED_ERROR = JSON.stringify({ 22 | error: { 23 | message: 'auth token required', 24 | type: 'invalid_request_error', 25 | code: 'auth_token_required', 26 | }, 27 | }); 28 | 29 | const NOT_FOUND_ERROR = JSON.stringify({ 30 | error: { 31 | message: 'not found', 32 | type: 'invalid_request_error', 33 | code: 'route_not_found', 34 | }, 35 | }); 36 | 37 | const RATE_LIMIT_ERROR = JSON.stringify({ 38 | error: { 39 | message: 'too many requests', 40 | type: 'rate_limit_error', 41 | code: 'rate_limit_exceeded', 42 | }, 43 | }); 44 | 45 | // Reusable header objects 46 | const JSON_HEADERS = { 'Content-Type': 'application/json' } as const; 47 | const RATE_LIMIT_HEADERS = { 48 | 'Content-Type': 'application/json', 49 | 'Retry-After': '1', 50 | } as const; 51 | 52 | /** 53 | * Fast-path unauthorized response (pre-serialized). 54 | */ 55 | export const writeUnauthorized = (res: ServerResponse): void => { 56 | res.writeHead(401, JSON_HEADERS); 57 | res.end(UNAUTHORIZED_ERROR); 58 | }; 59 | 60 | export const writeTokenRequired = (res: ServerResponse): void => { 61 | res.writeHead(401, JSON_HEADERS); 62 | res.end(TOKEN_REQUIRED_ERROR); 63 | }; 64 | 65 | /** 66 | * Fast-path not found response (pre-serialized). 67 | */ 68 | export const writeNotFound = (res: ServerResponse): void => { 69 | res.writeHead(404, JSON_HEADERS); 70 | res.end(NOT_FOUND_ERROR); 71 | }; 72 | 73 | /** 74 | * Fast-path rate limit response (pre-serialized). 75 | */ 76 | export const writeRateLimit = (res: ServerResponse): void => { 77 | res.writeHead(429, RATE_LIMIT_HEADERS); 78 | res.end(RATE_LIMIT_ERROR); 79 | }; 80 | 81 | export const writeJson = (res: ServerResponse, status: number, body: T): void => { 82 | res.writeHead(status, JSON_HEADERS); 83 | res.end(JSON.stringify(body)); 84 | }; 85 | 86 | export function writeErrorResponse( 87 | res: ServerResponse, 88 | status: number, 89 | message: string, 90 | type: string, 91 | code: string 92 | ): void; 93 | export function writeErrorResponse( 94 | res: ServerResponse, 95 | status: number, 96 | message: string, 97 | type: string, 98 | code: string, 99 | reason: string 100 | ): void; 101 | export function writeErrorResponse( 102 | res: ServerResponse, 103 | status: number, 104 | message: string, 105 | type: string, 106 | code: string, 107 | reason?: string 108 | ): void { 109 | writeJson(res, status, { error: { message, type, code, ...(reason ? { reason } : {}) } }); 110 | } 111 | 112 | export const readJson = (req: IncomingMessage): Promise => 113 | new Promise((resolve, reject) => { 114 | let data = ''; 115 | req.on('data', (c) => (data += c)); 116 | req.on('end', () => { 117 | try { 118 | resolve((data ? JSON.parse(data) : {}) as T); 119 | } catch (e) { 120 | reject(e); 121 | } 122 | }); 123 | req.on('error', reject); 124 | }); 125 | -------------------------------------------------------------------------------- /src/status.ts: -------------------------------------------------------------------------------- 1 | import * as vscode from 'vscode'; 2 | import { AddressInfo } from 'net'; 3 | import { state } from './state'; 4 | import { LOOPBACK_HOST, getBridgeConfig } from './config'; 5 | import { info } from './log'; 6 | 7 | const formatEndpoint = (addr: AddressInfo | null, port: number): string => { 8 | if (addr) { 9 | const address = addr.address === '::' ? LOOPBACK_HOST : addr.address; 10 | return `${address}:${addr.port}`; 11 | } 12 | const normalizedPort = port === 0 ? 'auto' : port; 13 | return `${LOOPBACK_HOST}:${normalizedPort}`; 14 | }; 15 | 16 | const buildTooltip = (status: string, endpoint: string, tokenConfigured: boolean, reason?: string): vscode.MarkdownString => { 17 | const tooltip = new vscode.MarkdownString(); 18 | tooltip.supportThemeIcons = true; 19 | tooltip.isTrusted = true; 20 | tooltip.appendMarkdown(`**Copilot Bridge**\n\n`); 21 | tooltip.appendMarkdown(`Status: ${status}\n\n`); 22 | tooltip.appendMarkdown(`Endpoint: \`http://${endpoint}\`\n\n`); 23 | 24 | if (tokenConfigured) { 25 | tooltip.appendMarkdown('Auth token: ✅ configured. Requests must include `Authorization: Bearer `.'); 26 | } else { 27 | tooltip.appendMarkdown('Auth token: ⚠️ not configured — all API requests return **401 Unauthorized** until you set `bridge.token`.'); 28 | tooltip.appendMarkdown('\n\n[Configure token](command:workbench.action.openSettings?%22bridge.token%22)'); 29 | } 30 | 31 | if (reason) { 32 | tooltip.appendMarkdown(`\n\nLast reason: \`${reason}\``); 33 | } 34 | 35 | return tooltip; 36 | }; 37 | 38 | export const ensureStatusBar = (): void => { 39 | if (!state.statusBarItem) { 40 | state.statusBarItem = vscode.window.createStatusBarItem(vscode.StatusBarAlignment.Left, 100); 41 | state.statusBarItem.text = 'Copilot Bridge: Disabled'; 42 | state.statusBarItem.command = 'bridge.status'; 43 | state.statusBarItem.show(); 44 | updateStatus('disabled'); 45 | } 46 | }; 47 | 48 | export type BridgeStatusKind = 'start' | 'error' | 'success' | 'disabled'; 49 | 50 | interface UpdateStatusOptions { 51 | readonly suppressLog?: boolean; 52 | } 53 | 54 | export const updateStatus = (kind: BridgeStatusKind, options: UpdateStatusOptions = {}): void => { 55 | const cfg = getBridgeConfig(); 56 | const addr = state.server?.address() as AddressInfo | null; 57 | const shown = formatEndpoint(addr, cfg.port); 58 | const tokenConfigured = cfg.token.length > 0; 59 | 60 | if (!state.statusBarItem) return; 61 | 62 | let statusLabel: string; 63 | switch (kind) { 64 | case 'start': { 65 | const availability = state.modelCache ? 'OK' : (state.modelAttempted ? 'Unavailable' : 'Pending'); 66 | state.statusBarItem.text = `Copilot Bridge: ${availability} @ ${shown}`; 67 | if (!options.suppressLog) { 68 | info(`Started at http://${shown} | Copilot: ${state.modelCache ? 'ok' : (state.modelAttempted ? 'unavailable' : 'pending')}`); 69 | } 70 | statusLabel = availability; 71 | break; 72 | } 73 | case 'error': 74 | state.statusBarItem.text = `Copilot Bridge: Unavailable @ ${shown}`; 75 | statusLabel = 'Unavailable'; 76 | break; 77 | case 'success': 78 | state.statusBarItem.text = `Copilot Bridge: OK @ ${shown}`; 79 | statusLabel = 'OK'; 80 | break; 81 | case 'disabled': 82 | state.statusBarItem.text = 'Copilot Bridge: Disabled'; 83 | statusLabel = 'Disabled'; 84 | break; 85 | default: 86 | // Exhaustive check in case of future extension 87 | const _never: never = kind; 88 | return _never; 89 | } 90 | 91 | state.statusBarItem.tooltip = buildTooltip(statusLabel, shown, tokenConfigured, state.lastReason); 92 | }; 93 | -------------------------------------------------------------------------------- /src/http/server.ts: -------------------------------------------------------------------------------- 1 | import polka from 'polka'; 2 | import type { Server, IncomingMessage, ServerResponse } from 'http'; 3 | import { getBridgeConfig } from '../config'; 4 | import { state } from '../state'; 5 | import { isAuthorized } from './auth'; 6 | import { handleHealthCheck } from './routes/health'; 7 | import { handleModelsRequest } from './routes/models'; 8 | import { handleChatCompletion } from './routes/chat'; 9 | import { writeErrorResponse, writeNotFound, writeRateLimit, writeTokenRequired, writeUnauthorized } from './utils'; 10 | import { ensureOutput, verbose } from '../log'; 11 | import { updateStatus } from '../status'; 12 | 13 | export const startServer = async (): Promise => { 14 | if (state.server) return; 15 | const config = getBridgeConfig(); 16 | ensureOutput(); 17 | 18 | const app = polka({ 19 | onError: (err, req, res) => { 20 | const msg = err instanceof Error ? err.message : String(err); 21 | verbose(`HTTP error: ${msg}`); 22 | if (!res.headersSent) { 23 | writeErrorResponse(res, 500, msg || 'internal_error', 'server_error', 'internal_error'); 24 | } else { 25 | try { res.end(); } catch {/* ignore */} 26 | } 27 | }, 28 | onNoMatch: (_req, res) => { 29 | writeNotFound(res); 30 | }, 31 | }); 32 | 33 | // Auth middleware - runs before all routes (except /health) 34 | app.use((req, res, next) => { 35 | const path = req.url ?? '/'; 36 | if (path === '/health') { 37 | return next(); 38 | } 39 | const token = getBridgeConfig().token; 40 | if (!token) { 41 | if (config.verbose) { 42 | verbose('401 unauthorized: missing auth token'); 43 | } 44 | writeTokenRequired(res); 45 | return; 46 | } 47 | if (!isAuthorized(req, token)) { 48 | writeUnauthorized(res); 49 | return; 50 | } 51 | next(); 52 | }); 53 | 54 | // Verbose logging middleware 55 | if (config.verbose) { 56 | app.use((req, res, next) => { 57 | verbose(`${req.method} ${req.url}`); 58 | next(); 59 | }); 60 | } 61 | 62 | app.get('/health', async (_req: IncomingMessage, res: ServerResponse) => { 63 | await handleHealthCheck(res, config.verbose); 64 | }); 65 | 66 | app.get('/v1/models', async (_req: IncomingMessage, res: ServerResponse) => { 67 | await handleModelsRequest(res); 68 | }); 69 | 70 | app.post('/v1/chat/completions', async (req: IncomingMessage, res: ServerResponse) => { 71 | // Rate limiting check 72 | if (state.activeRequests >= config.maxConcurrent) { 73 | if (config.verbose) { 74 | verbose(`429 throttled (active=${state.activeRequests}, max=${config.maxConcurrent})`); 75 | } 76 | writeRateLimit(res); 77 | return; 78 | } 79 | 80 | try { 81 | await handleChatCompletion(req, res); 82 | } catch (e) { 83 | const msg = e instanceof Error ? e.message : String(e); 84 | writeErrorResponse(res, 500, msg || 'internal_error', 'server_error', 'internal_error'); 85 | } 86 | }); 87 | 88 | await new Promise((resolve, reject) => { 89 | try { 90 | app.listen(config.port, config.host, () => { 91 | const srv = app.server as Server | undefined; 92 | if (!srv) return reject(new Error('Server failed to start')); 93 | state.server = srv; 94 | updateStatus('start'); 95 | resolve(); 96 | }); 97 | const srv = app.server as Server | undefined; 98 | srv?.on('error', reject); 99 | } catch (err) { 100 | reject(err); 101 | } 102 | }); 103 | }; 104 | 105 | export const stopServer = async (): Promise => { 106 | await new Promise((resolve) => { 107 | if (!state.server) return resolve(); 108 | state.server.close(() => resolve()); 109 | }); 110 | state.server = undefined; 111 | }; 112 | -------------------------------------------------------------------------------- /.github/instructions/ts.instructions.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: 'Guidelines for TypeScript Development targeting TypeScript 5.x and ES2022 output' 3 | applyTo: '**/*.ts' 4 | --- 5 | 6 | # TypeScript Development 7 | 8 | > These instructions assume projects are built with TypeScript 5.x (or newer) compiling to an ES2022 JavaScript baseline. Adjust guidance if your runtime requires older language targets or down-level transpilation. 9 | 10 | ## Core Intent 11 | 12 | - Respect the existing architecture and coding standards. 13 | - Prefer readable, explicit solutions over clever shortcuts. 14 | - Extend current abstractions before inventing new ones. 15 | - Prioritize maintainability and clarity, short methods and classes, clean code. 16 | - Keep edits aligned with [AGENTS.md](../../AGENTS.md) and `.github/copilot-instructions.md`. 17 | 18 | ## Programming Language: TypeScript 19 | 20 | **TypeScript Best Practices:** 21 | - Use strict TypeScript configuration with `"strict": true` 22 | - Prefer interfaces over type aliases for object shapes 23 | - Use explicit return types for all public functions 24 | - Avoid `any` type - use `unknown` or proper typing instead 25 | - Use utility types (Pick, Omit, Partial) for type transformations 26 | - Implement proper null/undefined checking 27 | 28 | ## Code Style: Clean Code 29 | 30 | **Clean Code Principles:** 31 | - Write self-documenting code with meaningful names 32 | - Keep functions small and focused on a single responsibility 33 | - Avoid deep nesting and complex conditional statements 34 | - Use consistent formatting and indentation 35 | - Write code that tells a story and is easy to understand 36 | - Refactor ruthlessly to eliminate code smells 37 | 38 | ## General Guardrails 39 | 40 | - Target TypeScript 5.x / ES2022 and prefer native features over polyfills. 41 | - Use pure ES modules; never emit `require`, `module.exports`, or CommonJS helpers. 42 | - Rely on the project's build, lint, and test scripts unless asked otherwise. 43 | - Note design trade-offs when intent is not obvious. 44 | - Reuse the HTTP helpers in `src/http/utils.ts` (`writeUnauthorized`, `writeNotFound`, `writeRateLimit`, `writeErrorResponse`) instead of writing ad-hoc JSON responses. 45 | - Preserve the SSE contract in `src/http/routes/chat.ts`: send the role chunk first, follow with `data: { ... }` payloads, and always terminate with `data: [DONE]`. 46 | - When streaming, call `res.socket?.setNoDelay(true)` before emitting chunks to avoid latency regressions. 47 | - Honor the concurrency guard (`state.activeRequests`) and return early 429 responses via `writeRateLimit` when limits are exceeded. 48 | - Communicate limitations of the VS Code LM API, e.g., `tool_choice: "required"` behaving like `"auto"` and lack of `parallel_tool_calls` support. 49 | 50 | ## Project Organization 51 | 52 | - Follow the repository's folder and responsibility layout for new code. 53 | - Use kebab-case filenames (e.g., `user-session.ts`, `data-service.ts`) unless told otherwise. 54 | - Keep tests, types, and helpers near their implementation when it aids discovery. 55 | - Reuse or extend shared utilities before adding new ones. 56 | 57 | ## Naming & Style 58 | 59 | - Use PascalCase for classes, interfaces, enums, and type aliases; camelCase for everything else. 60 | - Skip interface prefixes like `I`; rely on descriptive names. 61 | - Name things for their behavior or domain meaning, not implementation. 62 | 63 | ## Formatting & Style 64 | 65 | - Run the repository's lint/format scripts (e.g., `npm run lint`) before submitting. 66 | - Match the project's indentation, quote style, and trailing comma rules. 67 | - Keep functions focused; extract helpers when logic branches grow. 68 | - Favor immutable data and pure functions when practical. 69 | 70 | ## Type System Expectations 71 | 72 | - Avoid `any` (implicit or explicit); prefer `unknown` plus narrowing. 73 | - Use discriminated unions for realtime events and state machines. 74 | - Centralize shared contracts instead of duplicating shapes. 75 | - Express intent with TypeScript utility types (e.g., `Readonly`, `Partial`, `Record`). 76 | 77 | ## Async, Events & Error Handling 78 | 79 | - Use `async/await`; wrap awaits in try/catch with structured errors. 80 | - Guard edge cases early to avoid deep nesting. 81 | - Send errors through the project's logging/telemetry utilities. 82 | - Surface user-facing errors via the repository's notification pattern. 83 | - Debounce configuration-driven updates and dispose resources deterministically. 84 | - Prefer the pre-serialized error helpers for fast paths and document any new reason codes in README + status handlers. 85 | 86 | ## Architecture & Patterns 87 | 88 | - Follow the repository's dependency injection or composition pattern; keep modules single-purpose. 89 | - Observe existing initialization and disposal sequences when wiring into lifecycles. 90 | - Keep transport, domain, and presentation layers decoupled with clear interfaces. 91 | - Supply lifecycle hooks (e.g., `initialize`, `dispose`) and targeted tests when adding services. 92 | 93 | ## External Integrations 94 | 95 | - Instantiate clients outside hot paths and inject them for testability. 96 | - Never hardcode secrets; load them from secure sources. 97 | - Apply retries, backoff, and cancellation to network or IO calls. 98 | - Normalize external responses and map errors to domain shapes. 99 | 100 | ## Security Practices 101 | 102 | - Validate and sanitize external input with schema validators or type guards. 103 | - Avoid dynamic code execution and untrusted template rendering. 104 | - Encode untrusted content before rendering HTML; use framework escaping or trusted types. 105 | - Use parameterized queries or prepared statements to block injection. 106 | - Keep secrets in secure storage, rotate them regularly, and request least-privilege scopes. 107 | - Favor immutable flows and defensive copies for sensitive data. 108 | - Use vetted crypto libraries only. 109 | - Patch dependencies promptly and monitor advisories. 110 | 111 | ## Configuration & Secrets 112 | 113 | - Reach configuration through shared helpers and validate with schemas or dedicated validators. 114 | - Handle secrets via the project's secure storage; guard `undefined` and error states. 115 | - Document new configuration keys and update related tests. 116 | 117 | ## UI & UX Components 118 | 119 | - Sanitize user or external content before rendering. 120 | - Keep UI layers thin; push heavy logic to services or state managers. 121 | - Use messaging or events to decouple UI from business logic. 122 | 123 | ## Testing Expectations 124 | 125 | - Add or update unit tests with the project's framework and naming style. 126 | - Expand integration or end-to-end suites when behavior crosses modules or platform APIs. 127 | - Run targeted test scripts for quick feedback before submitting. 128 | - Avoid brittle timing assertions; prefer fake timers or injected clocks. 129 | 130 | ## Performance & Reliability 131 | 132 | - Lazy-load heavy dependencies and dispose them when done. 133 | - Defer expensive work until users need it. 134 | - Batch or debounce high-frequency events to reduce thrash. 135 | - Track resource lifetimes to prevent leaks. 136 | - Avoid repeated configuration reads in hot paths; cache settings when practical. 137 | - Maintain streaming code paths without buffering entire responses; only accumulate when `stream: false`. 138 | 139 | ## Documentation & Comments 140 | 141 | - Add JSDoc to public APIs; include `@remarks` or `@example` when helpful. 142 | - Write comments that capture intent, and remove stale notes during refactors. 143 | - Update architecture or design docs when introducing significant patterns. -------------------------------------------------------------------------------- /src/messages.ts: -------------------------------------------------------------------------------- 1 | import * as vscode from 'vscode'; 2 | 3 | export interface ChatMessage { 4 | readonly role: 'system' | 'user' | 'assistant' | 'tool'; 5 | readonly content?: string | MessageContent[] | null; 6 | readonly name?: string; 7 | readonly tool_calls?: ToolCall[]; 8 | readonly tool_call_id?: string; 9 | readonly function_call?: FunctionCall; 10 | } 11 | 12 | export interface MessageContent { 13 | readonly type: string; 14 | readonly text?: string; 15 | readonly [key: string]: unknown; 16 | } 17 | 18 | export interface ToolCall { 19 | readonly id: string; 20 | readonly type: 'function'; 21 | readonly function: FunctionCall; 22 | } 23 | 24 | export interface FunctionCall { 25 | readonly name: string; 26 | readonly arguments: string; 27 | } 28 | 29 | export interface Tool { 30 | readonly type: 'function'; 31 | readonly function: ToolFunction; 32 | } 33 | 34 | export interface ToolFunction { 35 | readonly name: string; 36 | readonly description?: string; 37 | readonly parameters?: object; 38 | } 39 | 40 | export interface ChatCompletionRequest { 41 | readonly model?: string; 42 | readonly messages: ChatMessage[]; 43 | readonly stream?: boolean; 44 | readonly tools?: Tool[]; 45 | readonly tool_choice?: 'none' | 'auto' | 'required' | { type: 'function'; function: { name: string } }; 46 | readonly parallel_tool_calls?: boolean; 47 | readonly functions?: ToolFunction[]; // Deprecated, use tools instead 48 | readonly function_call?: 'none' | 'auto' | { name: string }; // Deprecated, use tool_choice instead 49 | readonly temperature?: number; 50 | readonly top_p?: number; 51 | readonly n?: number; 52 | readonly stop?: string | string[]; 53 | readonly max_tokens?: number; 54 | readonly max_completion_tokens?: number; 55 | readonly presence_penalty?: number; 56 | readonly frequency_penalty?: number; 57 | readonly logit_bias?: Record; 58 | readonly logprobs?: boolean; 59 | readonly top_logprobs?: number; 60 | readonly user?: string; 61 | readonly seed?: number; 62 | readonly response_format?: { 63 | readonly type: 'text' | 'json_object' | 'json_schema'; 64 | readonly json_schema?: { 65 | readonly name: string; 66 | readonly schema: object; 67 | readonly strict?: boolean; 68 | }; 69 | }; 70 | readonly [key: string]: unknown; 71 | } 72 | 73 | const VALID_ROLES = ['system', 'user', 'assistant', 'tool'] as const; 74 | type Role = typeof VALID_ROLES[number]; 75 | const isValidRole = (role: unknown): role is Role => typeof role === 'string' && VALID_ROLES.includes(role as Role); 76 | 77 | export const isChatMessage = (msg: unknown): msg is ChatMessage => { 78 | if (typeof msg !== 'object' || msg === null) return false; 79 | const candidate = msg as Record; 80 | if (!('role' in candidate)) return false; 81 | if (!isValidRole(candidate.role)) return false; 82 | 83 | // Tool messages require tool_call_id and content 84 | if (candidate.role === 'tool') { 85 | return typeof candidate.tool_call_id === 'string' && 86 | (typeof candidate.content === 'string' || candidate.content === null); 87 | } 88 | 89 | // Assistant messages can have content and/or tool_calls/function_call 90 | if (candidate.role === 'assistant') { 91 | const hasContent = candidate.content !== undefined; 92 | const hasToolCalls = Array.isArray(candidate.tool_calls); 93 | const hasFunctionCall = typeof candidate.function_call === 'object' && candidate.function_call !== null; 94 | return hasContent || hasToolCalls || hasFunctionCall; 95 | } 96 | 97 | // System and user messages must have content 98 | return candidate.content !== undefined && candidate.content !== null; 99 | }; 100 | 101 | export const isChatCompletionRequest = (body: unknown): body is ChatCompletionRequest => { 102 | if (typeof body !== 'object' || body === null) return false; 103 | const candidate = body as Record; 104 | if (!('messages' in candidate)) return false; 105 | const messages = candidate.messages; 106 | return Array.isArray(messages) && messages.length > 0 && messages.every(isChatMessage); 107 | }; 108 | 109 | // Convert OpenAI tools to VS Code Language Model tools 110 | export const convertOpenAIToolsToLM = (tools?: Tool[]): vscode.LanguageModelChatTool[] => { 111 | if (!tools) return []; 112 | return tools.map(tool => ({ 113 | name: tool.function.name, 114 | description: tool.function.description || '', 115 | inputSchema: tool.function.parameters 116 | })); 117 | }; 118 | 119 | // Convert deprecated functions to tools format 120 | export const convertFunctionsToTools = (functions?: ToolFunction[]): Tool[] => { 121 | if (!functions) return []; 122 | return functions.map(func => ({ 123 | type: 'function' as const, 124 | function: func 125 | })); 126 | }; 127 | 128 | const toText = (content: unknown): string => { 129 | if (typeof content === 'string') return content; 130 | if (Array.isArray(content)) return content.map(toText).join('\n'); 131 | if (content && typeof content === 'object' && 'text' in content) { 132 | const textVal = (content as { text?: unknown }).text; 133 | if (typeof textVal === 'string') return textVal; 134 | } 135 | try { 136 | return JSON.stringify(content); 137 | } catch { 138 | return String(content); 139 | } 140 | }; 141 | 142 | export const normalizeMessagesLM = ( 143 | messages: readonly ChatMessage[], 144 | histWindow: number 145 | ): (vscode.LanguageModelChatMessage | { role: 'user' | 'assistant'; content: string })[] => { 146 | const systemMessages = messages.filter((m) => m.role === 'system'); 147 | const systemMessage = systemMessages[systemMessages.length - 1]; 148 | 149 | // Include user, assistant, and tool messages in conversation 150 | const conversationMessages = messages.filter((m) => 151 | m.role === 'user' || m.role === 'assistant' || m.role === 'tool' 152 | ).slice(-histWindow * 3); // Increased window to account for tool messages 153 | 154 | const lmMsg = (vscode as unknown as { LanguageModelChatMessage?: typeof vscode.LanguageModelChatMessage }).LanguageModelChatMessage; 155 | const userFactory = lmMsg?.User; 156 | const assistantFactory = lmMsg?.Assistant; 157 | const hasFactories = Boolean(userFactory && assistantFactory); 158 | 159 | const result: (vscode.LanguageModelChatMessage | { role: 'user' | 'assistant'; content: string })[] = []; 160 | let firstUserSeen = false; 161 | 162 | for (const m of conversationMessages) { 163 | if (m.role === 'user') { 164 | let text = toText(m.content); 165 | if (!firstUserSeen && systemMessage) { 166 | text = `[SYSTEM]\n${toText(systemMessage.content)}\n\n[DIALOG]\nuser: ${text}`; 167 | firstUserSeen = true; 168 | } 169 | result.push(userFactory ? userFactory(text) : { role: 'user', content: text }); 170 | } else if (m.role === 'assistant') { 171 | // For assistant messages, we need to handle both content and tool calls 172 | let text = ''; 173 | 174 | if (m.content) { 175 | text = toText(m.content); 176 | } 177 | 178 | // If the assistant message has tool calls, format them appropriately 179 | if (m.tool_calls && m.tool_calls.length > 0) { 180 | const toolCallsText = m.tool_calls.map(tc => 181 | `[TOOL_CALL:${tc.id}] ${tc.function.name}(${tc.function.arguments})` 182 | ).join('\n'); 183 | 184 | if (text) { 185 | text += '\n' + toolCallsText; 186 | } else { 187 | text = toolCallsText; 188 | } 189 | } 190 | 191 | // Handle deprecated function_call format 192 | if (!text && m.function_call) { 193 | text = `[FUNCTION_CALL] ${m.function_call.name}(${m.function_call.arguments})`; 194 | } 195 | 196 | result.push(assistantFactory ? assistantFactory(text) : { role: 'assistant', content: text }); 197 | } else if (m.role === 'tool') { 198 | // Tool messages should be converted to user messages with tool result context 199 | const toolResult = `[TOOL_RESULT:${m.tool_call_id}] ${toText(m.content)}`; 200 | result.push(userFactory ? userFactory(toolResult) : { role: 'user', content: toolResult }); 201 | } 202 | } 203 | 204 | if (!firstUserSeen && systemMessage) { 205 | const text = `[SYSTEM]\n${toText(systemMessage.content)}`; 206 | result.unshift(userFactory ? userFactory(text) : { role: 'user', content: text }); 207 | } 208 | 209 | if (result.length === 0) result.push(userFactory ? userFactory('') : { role: 'user', content: '' }); 210 | 211 | return result; 212 | }; 213 | -------------------------------------------------------------------------------- /AGENTS.md: -------------------------------------------------------------------------------- 1 | # AI Agent Contribution Guide 2 | 3 | This document gives coding agents (and human maintainers) a clear, opinionated playbook for making safe, coherent, high‑quality changes to this repository. 4 | 5 | --- 6 | 7 | ## 1. Project Purpose 8 | 9 | Expose GitHub Copilot through a local **OpenAI‑compatible** HTTP bridge inside VS Code. Primary user stories: 10 | 11 | - Run a local `/v1/chat/completions` endpoint that forwards to Copilot via the VS Code Language Model API. 12 | - List available Copilot model families through `/v1/models`. 13 | - Basic health & availability via `/health`. 14 | 15 | The server is **local only** (loopback host by default) and is not meant for multi‑tenant or remote exposure. 16 | 17 | --- 18 | 19 | ## 2. Architecture Snapshot 20 | 21 | | Layer | Key Files | Notes | 22 | |-------|-----------|-------| 23 | | VS Code Extension Activation | `src/extension.ts` | Enables/Disables bridge, manages status command. | 24 | | HTTP Server (Polka) | `src/http/server.ts` | Routes + middleware + error handling. | 25 | | Routes | `src/http/routes/*.ts` | `health.ts`, `models.ts`, `chat.ts`. | 26 | | LM / Copilot Integration | `src/models.ts` | Model selection, status updates. | 27 | | Message Normalization | `src/messages.ts` | Shapes user/assistant/system to LM API format. | 28 | | Status & State | `src/status.ts`, `src/state.ts` | In‑memory server + model state, status bar text. | 29 | | Config & Logging | `src/config.ts`, `src/log.ts` | Reads `bridge.*` settings, output channel. | 30 | | Utilities | `src/http/utils.ts` | JSON helpers, typed error responses. | 31 | 32 | --- 33 | 34 | ## 3. Coding Standards 35 | 36 | 1. **TypeScript Strictness**: No `any` or loose `unknown` unless inside *typed* external shim declarations. Use strong VS Code API types (`vscode.LanguageModelChat`, etc.). 37 | 2. **Imports**: All imports at file top. No inline `import('module')` types. 38 | 3. **ES Module Style**: Use `import` syntax (even though `commonjs` output). No `require` in source except in isolated legacy shims (currently none). 39 | 4. **Polka Typings**: The custom declaration in `src/types/polka.d.ts` must stay minimal but strongly typed. Extend only when you need new surface. 40 | 5. **Error Handling**: Use central `onError` (`server.ts`). Avoid swallowing errors; bubble or log via `verbose`. Prefer the pre-serialized helpers in `src/http/utils.ts` (`writeUnauthorized`, `writeNotFound`, `writeRateLimit`, `writeErrorResponse`) instead of hand-crafted JSON bodies. 41 | 6. **Logging**: Use `verbose()` for debug (guarded by config), `info()` for one‑time start messages, `error()` sparingly (currently not widely used—add only if user‑facing severity). 42 | 7. **Status Bar**: Use `updateStatus(kind)` with kinds: `start | error | success`. Initial pending state relies on `state.modelAttempted`. 43 | 8. **Model Selection**: Always feature-detect the LM API (`hasLMApi`). Return early on missing API with clear `state.lastReason` codes. 44 | 9. **Endpoint Stability**: Public paths (`/health`, `/v1/models`, `/v1/chat/completions`). Changes require README updates and semantic version bump. 45 | 10. **Streaming & Tool Calling**: SSE contract: multiple `data: {chunk}` events + final `data: [DONE]`. Preserve this shape. Tool call chunks must emit `delta.tool_calls` entries encoded as JSON; arguments may arrive as incremental strings, so downstream clients should replace rather than append. The bridge treats `tool_choice: "required"` the same as `"auto"` and ignores `parallel_tool_calls` because the VS Code LM API lacks those controls—communicate this limitation in README and responses if behaviour changes in future. 46 | 47 | --- 48 | 49 | ## 4. State & Reason Codes 50 | 51 | `state.lastReason` drives health + status explanations. Allowed values (current): 52 | 53 | - `missing_language_model_api` 54 | - `copilot_model_unavailable` 55 | - `not_found` 56 | - (Potential future: `consent_required`, `rate_limited`) 57 | 58 | If you introduce new reason codes, update: 59 | 60 | - `README.md` troubleshooting section 61 | - `handleModelSelectionError` 62 | - Health output expectations 63 | 64 | --- 65 | 66 | ## 5. Configuration Contract (`bridge.*`) 67 | 68 | See `package.json` contributes -> configuration. When adding new settings: 69 | 70 | - Provide default 71 | - Document in README table 72 | - Use `cfg.get(key, default)` pattern 73 | - Add to `BridgeConfig` and ensure `getBridgeConfig()` uses `satisfies` to keep type safety 74 | 75 | --- 76 | 77 | ## 6. Adding Endpoints 78 | 79 | Before adding an endpoint: 80 | 81 | - Justify purpose (user scenario). Keep scope tight; avoid feature creep. 82 | - Enforce auth (token) uniformly—reuse existing middleware pattern. 83 | - Return OpenAI‑compatible shapes only if endpoint is explicitly an OpenAI analog; otherwise define a minimal JSON schema and document it. 84 | - Update README (Endpoints section) and bump version (PATCH or MINOR depending on scope). 85 | 86 | --- 87 | 88 | ## 7. Versioning & Releases 89 | 90 | - Patch: bug fixes, doc updates, internal refactors. 91 | - Minor: new endpoint, new config option, new visible status semantics. 92 | - Major (future if ever): breaking API changes (endpoint removal, payload contract changes). 93 | 94 | Use `npm version ` then rebuild & (optionally) package VSIX. 95 | 96 | --- 97 | 98 | ## 8. Logging Guidelines 99 | 100 | | Use | Function | Example | 101 | |-----|----------|---------| 102 | | Startup/one‑off info | `info()` | Bound address, model availability summary | 103 | | Debug/verbose flow | `verbose()` | Per‑request logging, selection outcomes, SSE lifecycle | 104 | | Serious error (rare) | `error()` | Unrecoverable initialization failure | 105 | 106 | Avoid high‑volume logs in hot loops. Guard truly verbose details behind feature flags if needed. 107 | 108 | --- 109 | 110 | ## 9. Performance & Concurrency 111 | 112 | - Concurrency limit enforced in `/v1/chat/completions` before model call; maintain early 429 path. 113 | - Streaming is async iteration; avoid buffering entire response unless `stream: false`. 114 | - Disable Nagle’s algorithm on streaming sockets with `socket.setNoDelay(true)` before writing SSE payloads. 115 | - Do not introduce global locks; keep per‑request ephemeral state. 116 | 117 | --- 118 | 119 | ## 10. Security 120 | 121 | - Must not widen default host binding without explicit config. 122 | - All non-health/model/chat endpoints (future) must preserve token auth. 123 | - Never log bearer tokens or raw user messages verbatim if sensitive; current design logs only structural info. 124 | 125 | --- 126 | 127 | ## 11. Testing Philosophy (Future) 128 | 129 | Tests are currently absent. If adding: 130 | 131 | - Unit: message normalization, model selection error categorization. 132 | - Integration (optional): spin up server with mock LM API (abstract LM provider behind interface for test harness). 133 | 134 | Keep tests deterministic (no real network LM calls). 135 | 136 | --- 137 | 138 | ## 12. AI Agent Change Workflow 139 | 140 | 1. **Scan**: Read related files (avoid editing blindly). Use grep/search for symbol impact. 141 | 2. **Plan**: List concrete steps & affected files; ensure config/docs alignment. 142 | 3. **Edit**: Minimal diffs; avoid formatting unrelated sections. 143 | 4. **Validate**: `npm run compile` must pass. (If adding tests later: run them.) 144 | 5. **Docs**: Update README + this file if contracts change. 145 | 6. **Status**: Summarize what changed, why, and any follow‑ups. 146 | 147 | Never leave the codebase with failing type checks. 148 | 149 | --- 150 | 151 | ## 13. Common Pitfalls 152 | 153 | | Pitfall | Avoidance | 154 | |---------|-----------| 155 | | Using `any` for quick fixes | Introduce proper interface / generic or refine existing type guard | 156 | | Forgetting health/status synchronization | Update `state.lastReason` & call `updateStatus` consistently | 157 | | Adding silent failure paths | Always log via `verbose()` or propagate error to `onError` | 158 | | Breaking SSE spec | Maintain final `data: [DONE]` sentinel | 159 | | Undocumented reason codes | Update troubleshooting section immediately | 160 | 161 | --- 162 | 163 | ## 14. Future Enhancements (Backlog Ideas) 164 | 165 | - Graceful shutdown hook (capture SIGINT in dev host context if feasible) 166 | - Adaptive model selection (prefer family ordering / scoring) 167 | - Rate limit headers (e.g., `X-RateLimit-Remaining`) 168 | - Optional request timeout support 169 | - Structured logging (JSON) behind a flag 170 | - Basic test harness / mock LM provider 171 | 172 | (Do **not** implement without explicit issue creation & approval.) 173 | 174 | --- 175 | 176 | ## 15. Style & Formatting 177 | 178 | - Rely on TypeScript compiler; no implicit any. 179 | - Prefer `const` and readonly arrays where practical. 180 | - Use nullish coalescing & optional chaining. 181 | - Use descriptive variable names (`shown`, `availability`, etc.). 182 | 183 | --- 184 | 185 | ## 16. When in Doubt 186 | 187 | If a change touches: 188 | 189 | - Endpoint contracts 190 | - Security (auth / binding) 191 | - Status semantics 192 | 193 | …then treat it as a **feature change** and document thoroughly. 194 | 195 | --- 196 | 197 | Happy bridging! 198 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Copilot Bridge (VS Code Extension) 4 | 5 | [![Visual Studio Marketplace Version](https://img.shields.io/visual-studio-marketplace/v/thinkability.copilot-bridge)](https://marketplace.visualstudio.com/items?itemName=thinkability.copilot-bridge) 6 | [![Visual Studio Marketplace Installs](https://img.shields.io/visual-studio-marketplace/d/thinkability.copilot-bridge?label=installs)](https://marketplace.visualstudio.com/items?itemName=thinkability.copilot-bridge) 7 | 8 | > **A local interface for GitHub Copilot built on the official VS Code Language Models API.** 9 | 10 | Copilot Bridge lets you access your personal Copilot session locally through an OpenAI-compatible interface — **without calling any private GitHub endpoints**. It’s designed for developers experimenting with AI agents, CLI tools, and custom integrations inside their own editor environment. 11 | 12 | > **API Surface:** Uses only the public VS Code **Language Model API** (`vscode.lm`) for model discovery and chat. No private Copilot endpoints, tokens, or protocol emulation. 13 | --- 14 | 15 | ## ✨ Key Features 16 | 17 | - Local HTTP server locked to `127.0.0.1` 18 | - OpenAI-style `/v1/chat/completions`, `/v1/models`, and `/health` endpoints 19 | - SSE streaming for incremental responses 20 | - Real-time model discovery via VS Code Language Model API 21 | - Concurrency and rate limits to keep VS Code responsive 22 | - Mandatory bearer token authentication with `HTTP 401 Unauthorized` protection 23 | - Lightweight Polka-based server integrated directly with the VS Code runtime 24 | 25 | --- 26 | 27 | ## ⚖️ Compliance & Usage Notice 28 | 29 | - Uses **only** the public VS Code Language Models API. 30 | - Does **not** contact or emulate private GitHub Copilot endpoints. 31 | - Requires an active GitHub Copilot subscription. 32 | - Subject to [GitHub Terms of Service](https://docs.github.com/en/site-policy/github-terms/github-terms-of-service) and the [Github Acceptable Use Policy](https://docs.github.com/en/site-policy/acceptable-use-policies/github-acceptable-use-policies). 33 | - Intended for **personal, local experimentation** only. 34 | - No affiliation with GitHub or Microsoft. 35 | 36 | > ❗ The author provides this project as a technical demonstration. Use responsibly and ensure your own compliance with applicable terms. 37 | 38 | --- 39 | 40 | ## 🚧 Scope and Limitations 41 | 42 | | ✅ Supported | 🚫 Not Supported | 43 | |--------------|------------------| 44 | | Local, single-user loopback use | Multi-user or shared deployments | 45 | | Testing local agents or CLI integrations | Continuous automation or CI/CD use | 46 | | Educational / experimental use | Public or commercial API hosting | 47 | 48 | --- 49 | 50 | ## 🧠 Motivation 51 | 52 | Copilot Bridge was built to demonstrate how VS Code’s **Language Model API** can power local-first AI tooling. 53 | It enables developers to reuse OpenAI-compatible SDKs and workflows while keeping all traffic on-device. 54 | 55 | This is **not** a Copilot proxy, wrapper, or reverse-engineered client — it’s a bridge built entirely on the editor’s public extension surface. 56 | 57 | --- 58 | 59 | ## ⚠️ Disclaimer 60 | 61 | This software is provided *as is* for research and educational purposes. 62 | Use at your own risk. 63 | You are solely responsible for ensuring compliance with your Copilot license and applicable terms. 64 | The author collects no data and has no access to user prompts or completions. 65 | 66 | --- 67 | 68 | ## 🚀 Quick Start 69 | 70 | ### Requirements 71 | - Visual Studio Code Desktop with GitHub Copilot signed in 72 | - (Optional) Node.js 18+ and npm for local builds 73 | 74 | ### Installation 75 | 76 | 1. Install from the [Visual Studio Marketplace](https://marketplace.visualstudio.com/items?itemName=thinkability.copilot-bridge) or load the `.vsix`. 77 | 2. Set **Copilot Bridge › Token** to a secret value (Settings UI or JSON). Requests without this token receive `401 Unauthorized`. 78 | 3. Open the **Command Palette** → “Copilot Bridge: Enable” to start the bridge. 79 | 4. Check status anytime with “Copilot Bridge: Status” or by hovering the status bar item (it links directly to the token setting when missing). 80 | 5. Keep VS Code open — the bridge runs only while the editor is active. 81 | 82 | --- 83 | 84 | ## 📡 Using the Bridge 85 | 86 | Replace `PORT` with the one shown in “Copilot Bridge: Status”. Use the same token value you configured in VS Code: 87 | 88 | ```bash 89 | export PORT=12345 # Replace with the port from the status command 90 | export BRIDGE_TOKEN="" 91 | ``` 92 | 93 | List models: 94 | 95 | ```bash 96 | curl -H "Authorization: Bearer $BRIDGE_TOKEN" \ 97 | http://127.0.0.1:$PORT/v1/models 98 | ``` 99 | 100 | Stream a completion: 101 | 102 | ```bash 103 | curl -N \ 104 | -H "Authorization: Bearer $BRIDGE_TOKEN" \ 105 | -H "Content-Type: application/json" \ 106 | -d '{"model":"gpt-4o-copilot","messages":[{"role":"user","content":"hello"}]}' \ 107 | http://127.0.0.1:$PORT/v1/chat/completions 108 | ``` 109 | 110 | Use with OpenAI SDK: 111 | 112 | ```ts 113 | import OpenAI from "openai"; 114 | 115 | if (!process.env.BRIDGE_TOKEN) { 116 | throw new Error("Set BRIDGE_TOKEN to the same token configured in VS Code settings (bridge.token)."); 117 | } 118 | 119 | const client = new OpenAI({ 120 | baseURL: `http://127.0.0.1:${process.env.PORT}/v1`, 121 | apiKey: process.env.BRIDGE_TOKEN, 122 | }); 123 | 124 | const rsp = await client.chat.completions.create({ 125 | model: "gpt-4o-copilot", 126 | messages: [{ role: "user", content: "hello" }], 127 | }); 128 | 129 | console.log(rsp.choices[0].message?.content); 130 | ``` 131 | 132 | --- 133 | 134 | ## 🧩 Architecture 135 | 136 | The extension uses VS Code’s built-in Language Model API to select available Copilot chat models. 137 | Requests are normalized and sent through VS Code itself, never directly to GitHub Copilot servers. 138 | Responses stream back via SSE with concurrency controls for editor stability. 139 | 140 | 141 | ### How it calls models (pseudocode) 142 | 143 | ```ts 144 | import * as vscode from "vscode"; 145 | 146 | const models = await vscode.lm.selectChatModels({ 147 | where: { vendor: "copilot", supports: { reasoning: true } } 148 | }); 149 | const model = models[0] ?? (await vscode.lm.selectChatModels({}))[0]; 150 | if (!model) throw new Error("No language models available (vscode.lm)"); 151 | 152 | const stream = await model.sendRequest( 153 | { kind: "chat", messages: [{ role: "user", content: "hello" }] }, 154 | { temperature: 0.2 } 155 | ); 156 | 157 | // Stream chunks → SSE to localhost client; no private Copilot protocol used. 158 | ``` 159 | 160 | --- 161 | 162 | 163 | ## 🔧 Configuration 164 | 165 | | Setting | Default | Description | 166 | |----------|----------|-------------| 167 | | `bridge.enabled` | false | Start automatically with VS Code | 168 | | `bridge.port` | 0 | Ephemeral port | 169 | | `bridge.token` | "" | Bearer token required for every request (leave empty to block API access) | 170 | | `bridge.historyWindow` | 3 | Retained conversation turns | 171 | | `bridge.maxConcurrent` | 1 | Max concurrent requests | 172 | | `bridge.verbose` | false | Enable verbose logging | 173 | 174 | > ℹ️ The bridge always binds to `127.0.0.1` and cannot be exposed to other interfaces. 175 | 176 | > 💡 Hover the status bar item to confirm the token status; missing tokens show a warning link that opens the relevant setting. 177 | 178 | --- 179 | 180 | ## 🪶 Logging & Diagnostics 181 | 182 | 1. Enable `bridge.verbose`. 183 | 2. Open **View → Output → “Copilot Bridge”**. 184 | 3. Observe connection events, health checks, and streaming traces. 185 | 186 | --- 187 | 188 | ## 🔒 Security 189 | 190 | > ⚠️ This extension is intended for **localhost use only**. 191 | > Never expose the endpoint to external networks. 192 | 193 | - Loopback-only binding (non-configurable) 194 | - Mandatory bearer token gating (requests rejected without the correct header) 195 | - **Telemetry:** none collected or transmitted. 196 | 197 | --- 198 | 199 | ## 🧾 Changelog 200 | 201 | - **v1.2.0** – Authentication token now mandatory; status bar hover warns when missing 202 | - **v1.1.1** – Locked the HTTP server to localhost for improved safety 203 | - **v1.1.0** – Performance improvements (~30%) 204 | - **v1.0.0** – Modular core, OpenAI typings, tool-calling support 205 | - **v0.2.2** – Polka integration, improved model family selection 206 | - **v0.1.0–0.1.5** – Initial releases and bug fixes 207 | 208 | --- 209 | 210 | ## 🤝 Contributing 211 | 212 | Pull requests and discussions are welcome. 213 | Please open an [issue](https://github.com/larsbaunwall/vscode-copilot-bridge/issues) to report bugs or suggest features. 214 | 215 | --- 216 | 217 | ## 📄 License 218 | 219 | Apache 2.0 © 2025 [Lars Baunwall] 220 | Independent project — not affiliated with GitHub or Microsoft. 221 | For compliance or takedown inquiries, please open a GitHub issue. 222 | 223 | --- 224 | 225 | ### ❓ FAQ 226 | 227 | #### Can I run this on a server? 228 | No. Copilot Bridge is designed for **localhost-only**, single-user, interactive use. 229 | Running it on a shared host or exposing it over a network would violate its intended scope and could breach the Copilot terms. 230 | The host is bound to `127.0.0.1` (non-configurable). 231 | 232 | #### Does it send any data to the author? 233 | No. The bridge never transmits telemetry, prompts, or responses to any external service. 234 | All traffic stays on your machine and flows through VS Code’s built-in model interface. 235 | 236 | #### What happens if Copilot is unavailable? 237 | The `/health` endpoint will report a diagnostic reason such as `copilot_unavailable` or `missing_language_model_api`. 238 | This means VS Code currently has no accessible models via `vscode.lm`. Once Copilot becomes available again, the bridge will resume automatically. 239 | 240 | #### Can I use non-Copilot models? 241 | Yes, if other providers register with `vscode.lm`. The bridge will detect any available chat-capable models and use the first suitable one it finds. 242 | 243 | #### How is this different from reverse-engineered Copilot proxies? 244 | Reverse-engineered proxies call private endpoints directly or reuse extracted tokens. 245 | Copilot Bridge does neither—it communicates only through VS Code’s sanctioned **Language Model API**, keeping usage transparent and compliant. -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /src/http/routes/chat.ts: -------------------------------------------------------------------------------- 1 | import * as vscode from 'vscode'; 2 | import type { IncomingMessage, ServerResponse } from 'http'; 3 | import { state } from '../../state'; 4 | import { 5 | isChatCompletionRequest, 6 | type ChatCompletionRequest, 7 | normalizeMessagesLM, 8 | convertOpenAIToolsToLM, 9 | convertFunctionsToTools, 10 | type Tool, 11 | } from '../../messages'; 12 | import { readJson, writeErrorResponse, writeJson } from '../utils'; 13 | import { verbose } from '../../log'; 14 | import { getModel, hasLMApi } from '../../models'; 15 | import { getBridgeConfig } from '../../config'; 16 | import type { 17 | ChatCompletionContext, 18 | ProcessedResponseData, 19 | OpenAIResponse, 20 | OpenAIMessage, 21 | OpenAIToolCall, 22 | OpenAIChoice, 23 | } from '../../types/openai-types'; 24 | 25 | /** 26 | * Handles OpenAI-compatible chat completion requests with support for streaming and tool calling. 27 | * @param req - HTTP request object 28 | * @param res - HTTP response object 29 | */ 30 | export async function handleChatCompletion(req: IncomingMessage, res: ServerResponse): Promise { 31 | state.activeRequests++; 32 | verbose(`Request started (active=${state.activeRequests})`); 33 | 34 | try { 35 | const body = await readJson(req); 36 | if (!isChatCompletionRequest(body)) { 37 | writeErrorResponse(res, 400, 'invalid request', 'invalid_request_error', 'invalid_payload'); 38 | return; 39 | } 40 | 41 | const model = await resolveModel(body.model, res); 42 | if (!model) { 43 | return; 44 | } 45 | 46 | const config = getBridgeConfig(); 47 | const mergedTools = mergeTools(body); 48 | const lmMessages = normalizeMessagesLM(body.messages, config.historyWindow); 49 | const lmTools = convertOpenAIToolsToLM(mergedTools); 50 | const requestOptions: vscode.LanguageModelChatRequestOptions = lmTools.length > 0 51 | ? { tools: lmTools } 52 | : {}; 53 | 54 | const modelName = selectResponseModelName(model, body.model); 55 | const chatContext = createChatCompletionContext(body, mergedTools.length > 0, modelName); 56 | verbose(`LM request via API model=${model.family || model.id || model.name || 'unknown'} tools=${lmTools.length}`); 57 | 58 | const cancellationToken = new vscode.CancellationTokenSource(); 59 | 60 | try { 61 | const response = await model.sendRequest( 62 | lmMessages as vscode.LanguageModelChatMessage[], 63 | requestOptions, 64 | cancellationToken.token 65 | ); 66 | 67 | try { 68 | if (chatContext.isStreaming) { 69 | await streamResponse(res, response, chatContext); 70 | } else { 71 | const processed = await collectResponseData(response); 72 | sendCompletionResponse(res, chatContext, processed, body); 73 | } 74 | } finally { 75 | disposeResponse(response); 76 | } 77 | } finally { 78 | cancellationToken.dispose(); 79 | } 80 | } catch (error) { 81 | const errorMessage = error instanceof Error ? error.message : String(error); 82 | writeErrorResponse(res, 500, errorMessage || 'internal_error', 'server_error', 'internal_error'); 83 | } finally { 84 | state.activeRequests--; 85 | verbose(`Request complete (active=${state.activeRequests})`); 86 | } 87 | } 88 | 89 | const SSE_HEADERS = { 90 | 'Content-Type': 'text/event-stream', 91 | 'Cache-Control': 'no-cache', 92 | Connection: 'keep-alive', 93 | } as const; 94 | 95 | /** 96 | * Merges tools and deprecated functions, respecting tool_choice configuration. 97 | * @param body - Chat completion request 98 | * @returns Filtered array of tools to use 99 | */ 100 | function mergeTools(body: ChatCompletionRequest): Tool[] { 101 | // Early exit for disabled tools 102 | if (body.tool_choice === 'none' || body.function_call === 'none') { 103 | return []; 104 | } 105 | 106 | const baseTools = body.tools ?? []; 107 | const functionTools = convertFunctionsToTools(body.functions); 108 | const combined = functionTools.length > 0 ? [...baseTools, ...functionTools] : baseTools; 109 | 110 | // Handle specific tool selection 111 | if ( 112 | body.tool_choice && 113 | typeof body.tool_choice === 'object' && 114 | 'type' in body.tool_choice && 115 | body.tool_choice.type === 'function' && 116 | 'function' in body.tool_choice && 117 | body.tool_choice.function && 118 | typeof body.tool_choice.function === 'object' && 119 | 'name' in body.tool_choice.function 120 | ) { 121 | const fnName = body.tool_choice.function.name; 122 | if (typeof fnName === 'string') { 123 | return combined.filter((tool) => tool.function.name === fnName); 124 | } 125 | } 126 | 127 | return combined; 128 | } 129 | 130 | async function resolveModel( 131 | requestedModel: string | undefined, 132 | res: ServerResponse 133 | ): Promise { 134 | const model = await getModel(false, requestedModel); 135 | if (model) { 136 | return model; 137 | } 138 | 139 | const hasLanguageModels = hasLMApi(); 140 | if (requestedModel && hasLanguageModels) { 141 | writeErrorResponse(res, 404, 'model not found', 'invalid_request_error', 'model_not_found', 'not_found'); 142 | } else { 143 | const reason = hasLanguageModels ? 'copilot_model_unavailable' : 'missing_language_model_api'; 144 | writeErrorResponse(res, 503, 'Copilot unavailable', 'server_error', 'copilot_unavailable', reason); 145 | } 146 | return undefined; 147 | } 148 | 149 | function createChatCompletionContext( 150 | body: ChatCompletionRequest, 151 | hasTools: boolean, 152 | modelName: string 153 | ): ChatCompletionContext { 154 | return { 155 | requestId: `chatcmpl-${Math.random().toString(36).slice(2)}`, 156 | modelName, 157 | created: Math.floor(Date.now() / 1000), 158 | hasTools, 159 | isStreaming: body.stream === true, 160 | }; 161 | } 162 | 163 | /** 164 | * Streams chat completion response using Server-Sent Events. 165 | * @param res - HTTP response object 166 | * @param response - VS Code Language Model response 167 | * @param context - Chat completion context 168 | */ 169 | async function streamResponse( 170 | res: ServerResponse, 171 | response: vscode.LanguageModelChatResponse, 172 | context: ChatCompletionContext 173 | ): Promise { 174 | // Disable Nagle's algorithm for lower latency streaming 175 | if (res.socket) { 176 | res.socket.setNoDelay(true); 177 | } 178 | 179 | res.writeHead(200, SSE_HEADERS); 180 | if (typeof res.flushHeaders === 'function') { 181 | res.flushHeaders(); 182 | } 183 | verbose(`SSE start id=${context.requestId}`); 184 | 185 | let sawToolCall = false; 186 | let sentRoleChunk = false; 187 | 188 | for await (const part of response.stream) { 189 | // Send initial role chunk once 190 | if (!sentRoleChunk) { 191 | writeSseData(res, createChunkResponse(context, { role: 'assistant' }, null)); 192 | sentRoleChunk = true; 193 | } 194 | 195 | if (isToolCallPart(part)) { 196 | sawToolCall = true; 197 | writeSseData(res, createChunkResponse(context, { 198 | tool_calls: [createToolCall(part)], 199 | }, null)); 200 | } else { 201 | const content = extractTextContent(part); 202 | if (content) { 203 | writeSseData(res, createChunkResponse(context, { content }, null)); 204 | } 205 | } 206 | } 207 | 208 | // Ensure role chunk is sent even for empty responses 209 | if (!sentRoleChunk) { 210 | writeSseData(res, createChunkResponse(context, { role: 'assistant' }, null)); 211 | } 212 | 213 | const finalChunk = createChunkResponse(context, {}, sawToolCall ? 'tool_calls' : 'stop'); 214 | writeSseData(res, finalChunk); 215 | res.write('data: [DONE]\n\n'); 216 | res.end(); 217 | verbose(`SSE end id=${context.requestId}`); 218 | } 219 | 220 | /** 221 | * Collects complete response data from VS Code Language Model stream. 222 | * @param response - VS Code Language Model response 223 | * @returns Processed response data with content and tool calls 224 | */ 225 | async function collectResponseData( 226 | response: vscode.LanguageModelChatResponse 227 | ): Promise { 228 | let content = ''; 229 | const toolCalls: OpenAIToolCall[] = []; 230 | 231 | for await (const part of response.stream) { 232 | if (isToolCallPart(part)) { 233 | toolCalls.push(createToolCall(part)); 234 | } else { 235 | content += extractTextContent(part); 236 | } 237 | } 238 | 239 | const finishReason: OpenAIChoice['finish_reason'] = toolCalls.length > 0 ? 'tool_calls' : 'stop'; 240 | return { content, toolCalls, finishReason }; 241 | } 242 | 243 | function sendCompletionResponse( 244 | res: ServerResponse, 245 | context: ChatCompletionContext, 246 | data: ProcessedResponseData, 247 | requestBody?: ChatCompletionRequest 248 | ): void { 249 | const message = createOpenAIMessage(data, requestBody); 250 | const response: OpenAIResponse = { 251 | id: context.requestId, 252 | object: 'chat.completion', 253 | created: context.created, 254 | model: context.modelName, 255 | choices: [ 256 | { 257 | index: 0, 258 | message, 259 | finish_reason: data.finishReason, 260 | }, 261 | ], 262 | usage: { 263 | prompt_tokens: 0, 264 | completion_tokens: 0, 265 | total_tokens: 0, 266 | }, 267 | }; 268 | 269 | verbose(`Non-stream complete len=${data.content.length} tool_calls=${data.toolCalls.length}`); 270 | writeJson(res, 200, response); 271 | } 272 | 273 | function createOpenAIMessage( 274 | data: ProcessedResponseData, 275 | requestBody?: ChatCompletionRequest 276 | ): OpenAIMessage { 277 | const base: OpenAIMessage = { 278 | role: 'assistant', 279 | content: data.toolCalls.length > 0 ? null : data.content, 280 | }; 281 | 282 | if (data.toolCalls.length === 0) { 283 | return base; 284 | } 285 | 286 | const withTools: OpenAIMessage = { 287 | ...base, 288 | tool_calls: data.toolCalls, 289 | }; 290 | 291 | if (data.toolCalls.length === 1 && requestBody?.function_call !== undefined) { 292 | return { 293 | ...withTools, 294 | function_call: { 295 | name: data.toolCalls[0].function.name, 296 | arguments: data.toolCalls[0].function.arguments, 297 | }, 298 | }; 299 | } 300 | 301 | return withTools; 302 | } 303 | 304 | function createChunkResponse( 305 | context: ChatCompletionContext, 306 | delta: Partial, 307 | finishReason: OpenAIChoice['finish_reason'] | null 308 | ): OpenAIResponse { 309 | return { 310 | id: context.requestId, 311 | object: 'chat.completion.chunk', 312 | created: context.created, 313 | model: context.modelName, 314 | choices: [ 315 | { 316 | index: 0, 317 | delta, 318 | finish_reason: finishReason, 319 | }, 320 | ], 321 | }; 322 | } 323 | 324 | function writeSseData(res: ServerResponse, data: OpenAIResponse): void { 325 | res.write(`data: ${JSON.stringify(data)}\n\n`); 326 | } 327 | 328 | function createToolCall(part: vscode.LanguageModelToolCallPart): OpenAIToolCall { 329 | return { 330 | id: part.callId, 331 | type: 'function', 332 | function: { 333 | name: part.name, 334 | arguments: JSON.stringify(part.input), 335 | }, 336 | }; 337 | } 338 | 339 | function isToolCallPart(part: unknown): part is vscode.LanguageModelToolCallPart { 340 | return ( 341 | part !== null && 342 | typeof part === 'object' && 343 | 'callId' in part && 344 | 'name' in part && 345 | 'input' in part 346 | ); 347 | } 348 | 349 | function extractTextContent(part: unknown): string { 350 | if (typeof part === 'string') { 351 | return part; 352 | } 353 | 354 | if (part !== null && typeof part === 'object' && 'value' in part) { 355 | return String((part as { value: unknown }).value) || ''; 356 | } 357 | 358 | return ''; 359 | } 360 | 361 | function disposeResponse(response: vscode.LanguageModelChatResponse): void { 362 | const disposable = response as { dispose?: () => void }; 363 | if (typeof disposable.dispose === 'function') { 364 | disposable.dispose(); 365 | } 366 | } 367 | 368 | /** 369 | * Selects the most appropriate model name for the response. 370 | * Prioritizes requested model, then model ID, family, name, and finally defaults to 'copilot'. 371 | * @param model - VS Code Language Model instance 372 | * @param requestedModel - Model name from the request 373 | * @returns Model name to use in response 374 | */ 375 | function selectResponseModelName( 376 | model: vscode.LanguageModelChat, 377 | requestedModel: string | undefined 378 | ): string { 379 | return requestedModel ?? model.id ?? model.family ?? model.name ?? 'copilot'; 380 | } 381 | --------------------------------------------------------------------------------