├── .git-blame-ignore-revs ├── .gitignore ├── .npmignore ├── .prettierignore ├── .prettierrc.json ├── .vscode-test.mjs ├── .vscode ├── extensions.json ├── launch.json └── settings.json ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── LICENSE ├── README.md ├── SECURITY.md ├── SUPPORT.md ├── build ├── base.yml ├── build-tracer.ts ├── postcompile.ts └── postinstall.ts ├── examples ├── README.md ├── file-contents.tsx ├── history.tsx ├── package-lock.json ├── package.json └── tsconfig.json ├── package-lock.json ├── package.json ├── src ├── base │ ├── htmlTracer.ts │ ├── htmlTracerTypes.ts │ ├── index.ts │ ├── jsonTypes.ts │ ├── materialized.ts │ ├── once.ts │ ├── output │ │ ├── mode.ts │ │ ├── openaiConvert.ts │ │ ├── openaiTypes.ts │ │ ├── rawTypes.ts │ │ └── vscode.ts │ ├── promptElement.ts │ ├── promptElements.tsx │ ├── promptRenderer.ts │ ├── results.ts │ ├── test │ │ ├── elements.test.tsx │ │ ├── materialized.test.ts │ │ ├── renderer.bench.tsx │ │ ├── renderer.test.tsx │ │ └── testUtils.ts │ ├── tokenizer │ │ ├── cl100kBaseTokenizer.ts │ │ ├── cl100k_base.tiktoken │ │ └── tokenizer.ts │ ├── tracer.ts │ ├── tsx-globals.ts │ ├── tsx.ts │ ├── types.ts │ ├── util │ │ ├── arrays.ts │ │ ├── assert.ts │ │ └── vs │ │ │ ├── common │ │ │ ├── charCode.ts │ │ │ ├── marshallingIds.ts │ │ │ ├── path.ts │ │ │ ├── platform.ts │ │ │ ├── process.ts │ │ │ └── uri.ts │ │ │ └── nls.ts │ ├── vscode.d.ts │ └── vscodeTypes.d.ts └── tracer │ ├── hooks.ts │ ├── i18n.tsx │ ├── index.css │ ├── index.tsx │ ├── node.tsx │ └── tsconfig.json └── tsconfig.json /.git-blame-ignore-revs: -------------------------------------------------------------------------------- 1 | a5803dad4d5e70d74d162e1f423223d68e975587 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | dist/ 3 | 4 | # Logs 5 | logs 6 | *.log 7 | npm-debug.log* 8 | yarn-debug.log* 9 | yarn-error.log* 10 | lerna-debug.log* 11 | .pnpm-debug.log* 12 | 13 | # Diagnostic reports (https://nodejs.org/api/report.html) 14 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 15 | 16 | # Runtime data 17 | pids 18 | *.pid 19 | *.seed 20 | *.pid.lock 21 | 22 | # Directory for instrumented libs generated by jscoverage/JSCover 23 | lib-cov 24 | 25 | # Coverage directory used by tools like istanbul 26 | coverage 27 | *.lcov 28 | 29 | # nyc test coverage 30 | .nyc_output 31 | 32 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 33 | .grunt 34 | 35 | # Bower dependency directory (https://bower.io/) 36 | bower_components 37 | 38 | # node-waf configuration 39 | .lock-wscript 40 | 41 | # Compiled binary addons (https://nodejs.org/api/addons.html) 42 | build/Release 43 | 44 | # Dependency directories 45 | node_modules/ 46 | jspm_packages/ 47 | 48 | # Snowpack dependency directory (https://snowpack.dev/) 49 | web_modules/ 50 | 51 | # TypeScript cache 52 | *.tsbuildinfo 53 | 54 | # Optional npm cache directory 55 | .npm 56 | 57 | # Optional eslint cache 58 | .eslintcache 59 | 60 | # Optional stylelint cache 61 | .stylelintcache 62 | 63 | # Microbundle cache 64 | .rpt2_cache/ 65 | .rts2_cache_cjs/ 66 | .rts2_cache_es/ 67 | .rts2_cache_umd/ 68 | 69 | # Optional REPL history 70 | .node_repl_history 71 | 72 | # Output of 'npm pack' 73 | *.tgz 74 | 75 | # Yarn Integrity file 76 | .yarn-integrity 77 | 78 | # dotenv environment variable files 79 | .env 80 | .env.development.local 81 | .env.test.local 82 | .env.production.local 83 | .env.local 84 | 85 | # parcel-bundler cache (https://parceljs.org/) 86 | .cache 87 | .parcel-cache 88 | 89 | # Next.js build output 90 | .next 91 | out 92 | 93 | # Nuxt.js build / generate output 94 | .nuxt 95 | dist 96 | 97 | # Gatsby files 98 | .cache/ 99 | # Comment in the public line in if your project uses Gatsby and not Next.js 100 | # https://nextjs.org/blog/next-9-1#public-directory-support 101 | # public 102 | 103 | # vuepress build output 104 | .vuepress/dist 105 | 106 | # vuepress v2.x temp and cache directory 107 | .temp 108 | .cache 109 | 110 | # Docusaurus cache and generated files 111 | .docusaurus 112 | 113 | # Serverless directories 114 | .serverless/ 115 | 116 | # FuseBox cache 117 | .fusebox/ 118 | 119 | # DynamoDB Local files 120 | .dynamodb/ 121 | 122 | # TernJS port file 123 | .tern-port 124 | 125 | # Stores VSCode versions used for testing VSCode extensions 126 | .vscode-test 127 | 128 | # yarn v2 129 | .yarn/cache 130 | .yarn/unplugged 131 | .yarn/build-state.yml 132 | .yarn/install-state.gz 133 | .pnp.* 134 | 135 | src/base/htmlTracerSrc.ts 136 | 137 | *.cpuprofile 138 | -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | src/ 2 | build/ 3 | .vscode/ 4 | .gitignore 5 | .prettierignore 6 | .prettierrc.json 7 | .vscode-test.mjs 8 | .vscode-test/ 9 | *.tgz 10 | tsconfig.json 11 | *.md 12 | dist/base/test/ 13 | *.map 14 | dist/base/tokenizer/cl100kBaseTokenizer*.* 15 | dist/base/tokenizer/cl100k_base.tiktoken 16 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | dist/** 2 | .vscode/** 3 | .vscode-test/** 4 | .git-blame-ignore-revs 5 | **/*.js 6 | src/base/util/vs/** 7 | SECURITY.md 8 | SUPPORT.md 9 | -------------------------------------------------------------------------------- /.prettierrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "useTabs": true, 3 | "arrowParens": "avoid", 4 | "printWidth": 100, 5 | "singleQuote": true 6 | } 7 | -------------------------------------------------------------------------------- /.vscode-test.mjs: -------------------------------------------------------------------------------- 1 | import { defineConfig } from '@vscode/test-cli'; 2 | 3 | export default defineConfig({ 4 | files: 'dist/base/test/*.test.js', 5 | version: 'insiders', 6 | launchArgs: ['--disable-extensions', '--profile-temp'], 7 | mocha: { 8 | ui: 'tdd', 9 | color: true, 10 | forbidOnly: !!process.env.CI, 11 | timeout: 5000, 12 | }, 13 | }); 14 | -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | "recommendations": ["esbenp.prettier-vscode"] 3 | } 4 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | "configurations": [ 3 | { 4 | "name": "Extension tests", 5 | "type": "extensionHost", 6 | "request": "launch", 7 | "testConfiguration": "${workspaceFolder}/.vscode-test.mjs", 8 | "sourceMaps": true, 9 | "smartStep": true, 10 | "internalConsoleOptions": "openOnSessionStart", 11 | "outFiles": [ 12 | "${workspaceFolder}/dist/**/*.js", 13 | "!**/node_modules/**" 14 | ], 15 | }, 16 | ] 17 | } -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "git.branchProtection": ["main"], 3 | "git.branchProtectionPrompt": "alwaysCommitToNewBranch", 4 | "files.trimTrailingWhitespace": true, 5 | "editor.defaultFormatter": "esbenp.prettier-vscode", 6 | "editor.formatOnSave": true, 7 | "extension-test-runner.extractSettings": { 8 | "suite": ["suite"], 9 | "test": ["test"], 10 | "extractWith": "syntax" 11 | }, 12 | "extension-test-runner.debugOptions": { 13 | "outFiles": ["${workspaceFolder}/dist/**/*.js"] 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## 0.3.0-alpha.7 4 | 5 | - **feat:** add a `passPriority` attribute for logical wrapper elements 6 | - **fix:** tool calls not being visible in tracer 7 | 8 | ## 0.3.0-alpha.6 9 | 10 | - **fix:** containers without priority set should have max priority 11 | 12 | ## 0.3.0-alpha.5 13 | 14 | - **feat:** add `Expandable` elements to the renderer. See the [readme](./README.md#expandable-text) for details. 15 | 16 | ## 0.3.0-alpha.4 17 | 18 | - **feat:** enhance the `HTMLTracer` to allow consumers to visualize element pruning order 19 | 20 | ## 0.3.0-alpha.3 21 | 22 | - **feat:** add `MetadataMap.getAll()` 23 | - **fix:** don't drop empty messages that have tool calls 24 | 25 | ## 0.3.0-alpha.2 26 | 27 | - **fix:** update to match proposed VS Code tools API 28 | 29 | ## 0.3.0-alpha.1 30 | 31 | - ⚠️ **breaking refactor:** `priority` is now local within tree elements 32 | 33 | Previously, in order to calculate elements to be pruned if the token budget was exceeded, all text in the prompt was collected into a flat list and lowest `priority` elements were removed first; the priority value was global. However, this made composition difficult because all elements needed to operate within the domain of priorities provided by the prompt. 34 | 35 | In this version, priorities are handled as a tree. To prune elements, the lowest priority element is selected among siblings recursively, until a leaf node is selected and removed. Take the tree of elements: 36 | 37 | ``` 38 | A[priority=1] 39 | A1[priority=50] 40 | A2[priority=200] 41 | B[priority=2] 42 | B1[priority=0] 43 | B2[priority=100] 44 | ``` 45 | 46 | The pruning order is now `A1`, `A2`, `B1`, then `B2`. Previously it would have been `B1`, `A1`, `B2`, `A2`. In a tiebreaker between two sibling elements with the same priority, the element with the lowest-priority direct child is chosen for pruning. For example, in the case 47 | 48 | ``` 49 | A 50 | A1[priority=50] 51 | A2[priority=200] 52 | B 53 | B1[priority=0] 54 | B2[priority=100] 55 | ``` 56 | 57 | The pruning order is `B1`, `A1`, `B2`, `A2`. 58 | 59 | - **feature:** new `LegacyPrioritization` element 60 | 61 | There is a new `LegacyPrioritization` which can be used to wrap other elements in order to fall-back to the classic global prioritization model. This is a stepping stone and will be removed in future versions. 62 | 63 | ```tsx 64 | 65 | ... 66 | ... 67 | 68 | ``` 69 | 70 | - **feature:** new `Chunk` element 71 | 72 | The new `Chunk` element can be used to group elements that should either be all retained, or all pruned. This is similar to a `TextChunk`, but it also allows for extrinsic children. For example, you might wrap content like this to ensure the `FileLink` isn't present without its `FileContents` and vise-versa: 73 | 74 | ```tsx 75 | 76 | The file I'm editing is:
77 |
78 | 79 |
80 | ``` 81 | 82 | - **feature:** `local` metadata 83 | 84 | Previously, metadata in a prompt was always globally available regardless of where it was positioned and whether the position it was in survived pruning. There is a new `local` flag you can apply such that the metadata is only retained if the element it's in was included in the prompt: 85 | 86 | ```tsx 87 | 88 | 89 | Hello world! 90 | 91 | ``` 92 | 93 | Internally, references are now represented as local metadata. 94 | 95 | - ⚠️ **breaking refactor:** metadata is now returned from `render()` 96 | 97 | Rather than being a separate property on the renderer, metadata is now returned in the `RenderPromptResult`. 98 | 99 | - **refactor:** whitespace tightening 100 | 101 | The new tree-based rendering allows us to be slightly smarter in how line breaks are inserted and retained. The following rules are in place: 102 | 103 | - Line breaks `
` always ensure that there's a line break at the location. 104 | - The contents of any tag will add a line break before them if one does not exist (between `HiBye`, for example.) 105 | - A line break is not automatically inserted for siblings directly following other text (for example, there is no line break in `Check out `) 106 | - Leading and trailing whitespace is removed from chat messages. 107 | 108 | This may result in some churn in existing elements. 109 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Microsoft Open Source Code of Conduct 2 | 3 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 4 | 5 | Resources: 6 | 7 | - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) 8 | - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) 9 | - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns 10 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Microsoft Corporation. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # @vscode/prompt-tsx 2 | 3 | This library enables you to declare prompts using TSX when you develop VS Code extensions that integrate with Copilot Chat. To learn more, check out our [documentation](https://code.visualstudio.com/api/extension-guides/chat) or fork our quickstart [sample](https://github.com/microsoft/vscode-extension-samples/tree/main/chat-sample). 4 | 5 | ## Why TSX? 6 | 7 | As AI engineers, our products communicate with large language models using chat messages composed of text prompts. While developing Copilot Chat, we've found that composing prompts with just bare strings is unwieldy and frustrating. 8 | 9 | Some of the challenges we ran into include: 10 | 11 | 1. We used either programmatic string concatenation or template strings for composing prompts. Programmatic string concatenation made prompt text increasingly difficult to read, maintain, and update over time. Template string-based prompts were rigid and prone to issues like unnecessary whitespace. 12 | 2. In both cases, our prompts and RAG-generated context could not adapt to changing context window constraints as we upgraded our models. Prompts are ultimately bare strings, which makes them hard to edit once they are composed via string concatenation. 13 | 14 | To improve the developer experience for writing prompts in language model-based VS Code extensions like Copilot Chat, we built the TSX-based prompt renderer that we've extracted in this library. This has enabled us to compose expressive, flexible prompts that cleanly convert to chat messages. Our prompts are now able to evolve with our product and dynamically adapt to each model's context window. 15 | 16 | ### Key concepts 17 | 18 | In this library, prompts are represented as a tree of TSX components that are flattened into a list of chat messages. Each TSX node in the tree has a `priority` that is conceptually similar to a `zIndex` (higher number == higher priority). 19 | 20 | If a rendered prompt has more message tokens than can fit into the available context window, the prompt renderer prunes messages with the lowest priority from the `ChatMessage`s result, preserving the order in which they were declared. This means your extension code can safely declare TSX components for potentially large pieces of context like conversation history and codebase context. 21 | 22 | TSX components at the root level must render to `ChatMessage`s at the root level. `ChatMessage`s may have TSX components as children, but they must ultimately render to text. You can also have `TextChunk`s within `ChatMessage`s, which allows you to reduce less important parts of a chat message under context window limits without losing the full message. 23 | 24 | ## Usage 25 | 26 | ### Workspace Setup 27 | 28 | You can install this library in your extension using the command 29 | 30 | ``` 31 | npm install --save @vscode/prompt-tsx 32 | ``` 33 | 34 | This library exports a `renderPrompt` utility for rendering a TSX component to `vscode.LanguageModelChatMessage`s. 35 | 36 | To enable TSX use in your extension, add the following configuration options to your `tsconfig.json`: 37 | 38 | ```json 39 | { 40 | "compilerOptions": { 41 | // ... 42 | "jsx": "react", 43 | "jsxFactory": "vscpp", 44 | "jsxFragmentFactory": "vscppf" 45 | } 46 | // ... 47 | } 48 | ``` 49 | 50 | Note: if your codebase depends on both `@vscode/prompt-tsx` and another library that uses JSX, for example in a monorepo where a parent folder has dependencies on React, you may encounter compilation errors when trying to add this library to your project. This is because [by default](https://www.typescriptlang.org/tsconfig/#types%5D), TypeScript includes all `@types` packages during compilation. You can address this by explicitly listing the types that you want considered during compilation, e.g.: 51 | 52 | ```json 53 | { 54 | "compilerOptions": { 55 | "types": ["node", "jest", "express"] 56 | } 57 | } 58 | ``` 59 | 60 | ### Rendering a Prompt 61 | 62 | Next, your extension can use `renderPrompt` to render a TSX prompt. Here is an example of using TSX prompts in a Copilot chat participant that suggests SQL queries based on database context: 63 | 64 | ```ts 65 | import { renderPrompt } from '@vscode/prompt-tsx'; 66 | import * as vscode from 'vscode'; 67 | import { TestPrompt } from './prompt'; 68 | 69 | const participant = vscode.chat.createChatParticipant( 70 | 'mssql', 71 | async ( 72 | request: vscode.ChatRequest, 73 | context: vscode.ChatContext, 74 | response: vscode.ChatResponseStream, 75 | token: vscode.CancellationToken 76 | ) => { 77 | response.progress('Reading database context...'); 78 | 79 | const models = await vscode.lm.selectChatModels({ family: 'gpt-4' }); 80 | if (models.length === 0) { 81 | // No models available, return early 82 | return; 83 | } 84 | const chatModel = models[0]; 85 | 86 | // Render TSX prompt 87 | const { messages } = await renderPrompt( 88 | TestPrompt, 89 | { userQuery: request.prompt }, 90 | { modelMaxPromptTokens: 4096 }, 91 | chatModel 92 | ); 93 | 94 | const chatRequest = await chatModel.sendRequest(messages, {}, token); 95 | 96 | // ... Report stream data to VS Code UI 97 | } 98 | ); 99 | ``` 100 | 101 | Here is how you would declare the TSX prompt rendered above: 102 | 103 | ````tsx 104 | import { 105 | AssistantMessage, 106 | BasePromptElementProps, 107 | PromptElement, 108 | PromptSizing, 109 | UserMessage, 110 | } from '@vscode/prompt-tsx'; 111 | import * as vscode from 'vscode'; 112 | 113 | export interface PromptProps extends BasePromptElementProps { 114 | userQuery: string; 115 | } 116 | 117 | export interface PromptState { 118 | creationScript: string; 119 | } 120 | 121 | export class TestPrompt extends PromptElement { 122 | override async prepare() {} 123 | 124 | async render(state: PromptState, sizing: PromptSizing) { 125 | const sqlExtensionApi = await vscode.extensions.getExtension('ms-mssql.mssql')?.activate(); 126 | const creationScript = await sqlExtensionApi.getDatabaseCreateScript?.(); 127 | 128 | return ( 129 | <> 130 | 131 | You are a SQL expert. 132 |
133 | Your task is to help the user craft SQL queries that perform their task. 134 |
135 | You should suggest SQL queries that are performant and correct. 136 |
137 | Return your suggested SQL query in a Markdown code block that begins with ```sql and ends 138 | with ```. 139 |
140 |
141 | 142 | Here are the creation scripts that were used to create the tables in my database. Pay 143 | close attention to the tables and columns that are available in my database: 144 |
145 | {state.creationScript} 146 |
147 | {this.props.userQuery} 148 |
149 | 150 | ); 151 | } 152 | } 153 | ```` 154 | 155 | Please note: 156 | 157 | - If your prompt does asynchronous work e.g. VS Code extension API calls or additional requests to the Copilot API for chunk reranking, you can precompute this state in an optional async `prepare` method. `prepare` is called before `render` and the prepared state will be passed back to your prompt component's sync `render` method. 158 | - Newlines are not preserved in JSX text or between JSX elements when rendered, and must be explicitly declared with the builtin `
` attribute. 159 | 160 | ### Prioritization 161 | 162 | If a rendered prompt has more message tokens than can fit into the available context window, the prompt renderer prunes messages with the lowest priority from the `ChatMessage`s result. 163 | 164 | In the above example, each message had the same priority, so they would be pruned in the order in which they were declared, but we could control that by passing a priority to element: 165 | 166 | ```jsx 167 | <> 168 | You are a SQL expert... 169 | 170 | Here are the creation scripts that were used to create the tables in my database... 171 | 172 | {this.props.userQuery} 173 | 174 | ``` 175 | 176 | In this case, a very long `userQuery` would get pruned from the output first if it's too long. Priorities are local in the element tree, so for example the tree of nodes... 177 | 178 | ```html 179 | 180 | A 181 | B 182 | 183 | 184 | C 185 | D 186 | 187 | ``` 188 | 189 | ...would be pruned in the order `B->A->D->C`. If two sibling elements share the same priority, the renderer looks ahead at their direct children and picks whichever one has a child with the lowest priority: if the `SystemMessage` and `UserMessage` in the above example did not declare priorities, the pruning order would be `B->D->A->C`. 190 | 191 | Continuous text strings and elements can both be pruned from the tree. If you have a set of elements that you want to either be include all the time or none of the time, you can use the simple `Chunk` utility element: 192 | 193 | ```html 194 | 195 | The file I'm editing is: 196 | 197 | ``` 198 | 199 | #### Passing Priority 200 | 201 | In some cases, you may have logical wrapper elements which contain other elements which should share the parent's priority scope. You can use the `passPriority` attribute for this: 202 | 203 | ```tsx 204 | class MyContainer extends PromptElement { 205 | render() { 206 | return <>{this.props.children}; 207 | } 208 | } 209 | 210 | const myPrompt = ( 211 | 212 | 213 | 214 | 215 | 216 | 217 | 218 | ); 219 | ``` 220 | 221 | In this case where we have a wrapper element which includes the children in its own output, the prune order would be `ChildA`, `ChildC`, then `ChildB`. 222 | 223 | ### Flex Behavior 224 | 225 | Wholesale pruning is not always ideal. Instead, we'd prefer to include as much of the query as possible. To do this, we can use the `flexGrow` property, which allows an element to use the remainder of its parent's token budget when it's rendered. 226 | 227 | `prompt-tsx` provides a utility component that supports this use case: `TextChunk`. Given input text, and optionally a delimiting string or regular expression, it'll include as much of the text as possible to fit within its budget: 228 | 229 | ```tsx 230 | <> 231 | You are a SQL expert... 232 | 233 | Here are the creation scripts that were used to create the tables in my database... 234 | 235 | 236 | {this.props.userQuery} 237 | 238 | 239 | ``` 240 | 241 | When `flexGrow` is set for an element, other elements are rendered first, and then the `flexGrow` element is rendered and given the remaining unused token budget from its container as a parameter in the `PromptSizing` passed to its `prepare` and `render` methods. Here's a simplified version of the `TextChunk` component: 242 | 243 | ```tsx 244 | class SimpleTextChunk extends PromptElement<{ text: string }, string> { 245 | prepare(sizing: PromptSizing): Promise { 246 | const words = this.props.text.split(' '); 247 | let str = ''; 248 | 249 | for (const word of words) { 250 | if (tokenizer.tokenLength(str + ' ' + word) > sizing.tokenBudget) { 251 | break; 252 | } 253 | 254 | str += ' ' + word; 255 | } 256 | 257 | return str; 258 | } 259 | 260 | render(content: string) { 261 | return <>{content}; 262 | } 263 | } 264 | ``` 265 | 266 | There are a few similar properties which control budget allocation you might find useful for more advanced cases: 267 | 268 | - `flexReserve`: controls the number of tokens reserved from the container's budget _before_ this element gets rendered. For example, if you have a 100 token budget and the elements `<>`, then `Foo` would receive a `PromptSizing.tokenBudget` of 70, and `Bar` would receive however many tokens of the 100 that `Foo` didn't use. This is only useful in conjunction with `flexGrow`. 269 | 270 | This may also be set to a string in the form `/N` to take a proportion of the container's budget. For example, `` would reserve a third of the container's budget for this element. 271 | 272 | - `flexBasis`: controls the proportion of tokens allocated from the container's budget to this element. It defaults to `1` on all elements. For example, if you have the elements `<>` and a 100 token budget, each element would be allocated 50 tokens in its `PromptSizing.tokenBudget`. If you instead render `<>`, `Bar` would receive 66 tokens and `Foo` would receive 33. 273 | 274 | It's important to note that all of the `flex*` properties allow for cooperative use of the token budget for a prompt, but have no effect on the prioritization and pruning logic undertaken once all elements are rendered. 275 | 276 | ### Local Priority Limits 277 | 278 | `prompt-tsx` provides a `TokenLimit` element that can be used to set a hard cap on the number of tokens that can be consumed by a prompt or part of a prompt. Using it is fairly straightforward: 279 | 280 | ```tsx 281 | class PromptWithLimit extends PromptElement { 282 | render() { 283 | return ( 284 | 285 | {/* Your elements here! */} 286 | 287 | ); 288 | } 289 | } 290 | ``` 291 | 292 | `TokenLimit` subtrees are pruned before the prompt gets pruned. As you would expect, the `PromptSizing` of child elements inside of a limit reflect the reduced budget. If the `TokenLimit` would get `tokenBudget` smaller than its maximum via the usual distribution rules, then that's given it child elements instead (but pruning to the `max` value still happens.) 293 | 294 | ### Expandable Text 295 | 296 | The tools provided by `flex*` attributes are good, but sometimes you may still end up with unused space in your token budget that you'd like to utilize. We provide a special `` element that can be used in this case. It takes a callback that can return a text string. 297 | 298 | ```tsx 299 | { 300 | let data = 'hi'; 301 | while (true) { 302 | const more = getMoreUsefulData(); 303 | if (await sizing.countTokens(data + more) > sizing.tokenBudget) { break } 304 | data += more; 305 | } 306 | } 307 | return data; 308 | }} /> 309 | ``` 310 | 311 | After the prompt is rendered, the renderer sums up the tokens used by all messages. If there is unused budget, then any `` elements' values are called again with their `PromptSizing` is increased by the token excess. 312 | 313 | If there are multiple `` elements, then they're re-called in the order in which they were initially rendered. Because they're designed to fill up any remaining space, it usually makes sense to have at most one `` element per prompt. 314 | 315 | ### "Keep With" 316 | 317 | In some cases, content might only be relevant when other content is also included in the request. For example in tool calls, your tool call request should only be rendered if the tool call response survived prioritization. 318 | 319 | You can use the `useKeepWith` function to help with this. It returns a component class which is only visible in the output as none of its usages become empty. For example: 320 | 321 | ```tsx 322 | class MyPromptElement extends PromptElement { 323 | render() { 324 | const KeepWith = useKeepWith(); 325 | return ( 326 | <> 327 | 328 | ... 329 | 330 | 331 | ... 332 | 333 | 334 | ); 335 | } 336 | } 337 | ``` 338 | 339 | Unlike ``, which prevents pruning of any children and simply removes them as a block, `` in this case will allow the `ToolCallResponse` to be pruned, and if it's fully pruned it will also remove the `ToolCallRequest`. 340 | 341 | You can also pass the `KeepWith` instance to `toolCalls` in `AssistantMessage`s. 342 | 343 | #### Debugging Budgeting 344 | 345 | You can set a `tracer` property on the `PromptElement` to debug how your elements are rendered and how this library allocates your budget. We include a basic `HTMLTracer` you can use, which can be served on an address: 346 | 347 | ```js 348 | const renderer = new PromptRenderer(/* ... */); 349 | const tracer = new HTMLTracer(); 350 | renderer.tracer = tracer; 351 | renderer.render(/* ... */); 352 | 353 | tracer.serveHTML().then(server => { 354 | console.log('Server address:', server.address); 355 | }); 356 | ``` 357 | 358 | ### IfEmpty 359 | 360 | The `` helper allows you to provide an alternative element to use if the default children of an element are empty at the time of rendering. This is especially useful when you require fallback logic for opaque child data, such as tool calls. 361 | 362 | ```tsx 363 | class MyPromptElement extends PromptElement { 364 | render() { 365 | const KeepWith = useKeepWith(); 366 | return ( 367 | <> 368 | 369 | ... 370 | 371 | 372 | ); 373 | } 374 | } 375 | ``` 376 | 377 | ### Usage in Tools 378 | 379 | Visual Studio Code's API supports language models tools, sometimes called 'functions'. The tools API allows tools to return multiple content types of data to its consumers, and this library supports both returning rich prompt elements to tool callers, as well as using rich content returned from tools. 380 | 381 | #### As a Tool 382 | 383 | As a tool, you can use this library normally. However, to return data to the tool caller, you will want to use a special function `renderElementJSON` to serialize your elements to a plain, transferrable JSON object that can be used by a consumer if they also leverage prompt-tsx: 384 | 385 | Note that when VS Code invokes your language model tool, the `options` may contain `tokenizationOptions` which you should pass through as the third argument to `renderElementJSON`: 386 | 387 | ```ts 388 | import { LanguageModelPromptTsxPart, LanguageModelToolInvocationOptions, LanguageModelToolResult } from 'vscode' 389 | 390 | async function doToolInvocation( 391 | options: LanguageModelToolInvocationOptions 392 | ): LanguageModelToolResult { 393 | const json = await renderElementJSON(MyElement, { /* props */ }, options.tokenizationOptions) 394 | return new LanguageModelToolResult([new LanguageModelPromptTsxPart(json)]) 395 | } 396 | ``` 397 | 398 | #### As a Consumer 399 | 400 | You may invoke the `vscode.lm.invokeTool` API however you see fit. If you know your token budget in advance, you should pass it to the tool when you call `invokeTool` via the `tokenOptions` option. You can then render the result using the `` helper element, for example: 401 | 402 | ```tsx 403 | class MyElement extends PromptElement { 404 | async render(_state: void, sizing: PromptSizing) { 405 | const result = await vscode.lm.invokeTool(toolId, { 406 | parameters: getToolParameters(), 407 | tokenizationOptions: { 408 | tokenBudget: sizing.tokenBudget, 409 | countTokens: (text, token) => sizing.countTokens(text, token), 410 | }, 411 | }); 412 | 413 | return ; 414 | } 415 | } 416 | ``` 417 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Security 4 | 5 | Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet) and [Xamarin](https://github.com/xamarin). 6 | 7 | If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/security.md/definition), please report it to us as described below. 8 | 9 | ## Reporting Security Issues 10 | 11 | **Please do not report security vulnerabilities through public GitHub issues.** 12 | 13 | Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/security.md/msrc/create-report). 14 | 15 | If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/security.md/msrc/pgp). 16 | 17 | You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc). 18 | 19 | Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: 20 | 21 | * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) 22 | * Full paths of source file(s) related to the manifestation of the issue 23 | * The location of the affected source code (tag/branch/commit or direct URL) 24 | * Any special configuration required to reproduce the issue 25 | * Step-by-step instructions to reproduce the issue 26 | * Proof-of-concept or exploit code (if possible) 27 | * Impact of the issue, including how an attacker might exploit the issue 28 | 29 | This information will help us triage your report more quickly. 30 | 31 | If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/security.md/msrc/bounty) page for more details about our active programs. 32 | 33 | ## Preferred Languages 34 | 35 | We prefer all communications to be in English. 36 | 37 | ## Policy 38 | 39 | Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/security.md/cvd). 40 | 41 | 42 | -------------------------------------------------------------------------------- /SUPPORT.md: -------------------------------------------------------------------------------- 1 | # TODO: The maintainer of this repo has not yet edited this file 2 | 3 | **REPO OWNER**: Do you want Customer Service & Support (CSS) support for this product/project? 4 | 5 | - **No CSS support:** Fill out this template with information about how to file issues and get help. 6 | - **Yes CSS support:** Fill out an intake form at [aka.ms/onboardsupport](https://aka.ms/onboardsupport). CSS will work with/help you to determine next steps. 7 | - **Not sure?** Fill out an intake as though the answer were "Yes". CSS will help you decide. 8 | 9 | *Then remove this first heading from this SUPPORT.MD file before publishing your repo.* 10 | 11 | # Support 12 | 13 | ## How to file issues and get help 14 | 15 | This project uses GitHub Issues to track bugs and feature requests. Please search the existing 16 | issues before filing new issues to avoid duplicates. For new issues, file your bug or 17 | feature request as a new Issue. 18 | 19 | For help and questions about using this project, please **REPO MAINTAINER: INSERT INSTRUCTIONS HERE 20 | FOR HOW TO ENGAGE REPO OWNERS OR COMMUNITY FOR HELP. COULD BE A STACK OVERFLOW TAG OR OTHER 21 | CHANNEL. WHERE WILL YOU HELP PEOPLE?**. 22 | 23 | ## Microsoft Support Policy 24 | 25 | Support for this **PROJECT or PRODUCT** is limited to the resources listed above. 26 | -------------------------------------------------------------------------------- /build/base.yml: -------------------------------------------------------------------------------- 1 | name: $(Date:yyyyMMdd)$(Rev:.r) 2 | 3 | trigger: 4 | branches: 5 | include: 6 | - main 7 | 8 | resources: 9 | repositories: 10 | - repository: templates 11 | type: github 12 | name: microsoft/vscode-engineering 13 | ref: main 14 | endpoint: Monaco 15 | 16 | parameters: 17 | - name: publishPackage 18 | displayName: 🚀 Publish @vscode/prompt-tsx 19 | type: boolean 20 | default: false 21 | 22 | extends: 23 | template: azure-pipelines/npm-package/pipeline.yml@templates 24 | parameters: 25 | npmPackages: 26 | - name: prompt-tsx 27 | 28 | buildSteps: 29 | - script: npm ci 30 | displayName: Install dependencies 31 | 32 | - script: npm run compile 33 | displayName: Compile 34 | 35 | testPlatforms: 36 | - name: Linux 37 | nodeVersions: 38 | - 20.x 39 | - name: MacOS 40 | nodeVersions: 41 | - 20.x 42 | - name: Windows 43 | nodeVersions: 44 | - 20.x 45 | 46 | testSteps: 47 | - script: npm ci 48 | displayName: Install dependencies 49 | 50 | - bash: | 51 | /usr/bin/Xvfb :99 -screen 0 1024x768x24 > /dev/null 2>&1 & 52 | echo ">>> Started xvfb" 53 | displayName: Start xvfb 54 | condition: eq(variables['Agent.OS'], 'Linux') 55 | 56 | - script: npm run compile 57 | displayName: Compile npm package 58 | env: 59 | DISPLAY: ':99.0' 60 | 61 | - script: npm run test 62 | displayName: Test npm package 63 | env: 64 | DISPLAY: ':99.0' 65 | 66 | publishPackage: ${{ parameters.publishPackage }} 67 | -------------------------------------------------------------------------------- /build/build-tracer.ts: -------------------------------------------------------------------------------- 1 | import * as assert from 'assert'; 2 | import * as chokidar from 'chokidar'; 3 | import * as esbuild from 'esbuild'; 4 | import { writeFileSync } from 'fs'; 5 | 6 | const watch = process.argv.includes('--watch'); 7 | const minify = watch ? process.argv.includes('--minify') : !process.argv.includes('--no-minify'); 8 | 9 | const ctx = esbuild.context({ 10 | entryPoints: ['src/tracer/index.tsx'], 11 | tsconfig: 'src/tracer/tsconfig.json', 12 | bundle: true, 13 | sourcemap: minify ? false : 'inline', 14 | minify, 15 | platform: 'browser', 16 | outdir: 'out', 17 | write: false, 18 | }); 19 | 20 | function build() { 21 | return ctx 22 | .then(ctx => ctx.rebuild()) 23 | .then(bundle => { 24 | assert.strictEqual(bundle.outputFiles.length, 2, 'expected to have 2 output files'); 25 | 26 | const css = bundle.outputFiles.find(o => o.path.endsWith('.css')); 27 | assert.ok(css, 'expected to have css'); 28 | const js = bundle.outputFiles.find(o => o.path.endsWith('.js')); 29 | assert.ok(js, 'expected to have js'); 30 | writeFileSync( 31 | 'src/base/htmlTracerSrc.ts', 32 | `export const tracerSrc = ${JSON.stringify( 33 | js.text 34 | )};\nexport const tracerCss = ${JSON.stringify(css.text)};` 35 | ); 36 | }) 37 | .catch(err => { 38 | if (err.errors) { 39 | console.error(err.errors.join('\n')); 40 | } else { 41 | console.error(err); 42 | } 43 | }); 44 | } 45 | 46 | if (watch) { 47 | let timeout: NodeJS.Timeout | null = null; 48 | chokidar.watch('src/tracer/**/*.{tsx,ts,css}', {}).on('all', () => { 49 | if (timeout) { 50 | clearTimeout(timeout); 51 | } 52 | timeout = setTimeout(build, 600); 53 | }); 54 | } else { 55 | build().then(() => { 56 | process.exit(0); 57 | }); 58 | } 59 | -------------------------------------------------------------------------------- /build/postcompile.ts: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | import { copyStaticAssets } from './postinstall'; 6 | 7 | async function main() { 8 | // Ship the vscodeTypes.d.ts file in the dist bundle 9 | await copyStaticAssets(['src/base/vscodeTypes.d.ts'], 'dist/base/'); 10 | } 11 | 12 | main(); 13 | -------------------------------------------------------------------------------- /build/postinstall.ts: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | import * as fs from 'fs'; 6 | import * as path from 'path'; 7 | 8 | const REPO_ROOT = path.join(__dirname, '..'); 9 | 10 | export async function copyStaticAssets(srcpaths: string[], dst: string): Promise { 11 | await Promise.all( 12 | srcpaths.map(async srcpath => { 13 | const src = path.join(REPO_ROOT, srcpath); 14 | const dest = path.join(REPO_ROOT, dst, path.basename(srcpath)); 15 | await fs.promises.mkdir(path.dirname(dest), { recursive: true }); 16 | await fs.promises.copyFile(src, dest); 17 | }) 18 | ); 19 | } 20 | 21 | async function main() { 22 | // Ship the tiktoken file in the dist bundle 23 | await copyStaticAssets(['src/base/tokenizer/cl100k_base.tiktoken'], 'dist/base/tokenizer'); 24 | } 25 | 26 | main(); 27 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # @vscode/prompt-tsx Samples 2 | 3 | This directory contains samples for common patterns using the `@vscode/prompt-tsx` library. Each file contains an example of a component, with a docblock explaining its design. You can find the same content is a nicely-formatted way in the [VS Code Documentation for extension authors](https://code.visualstudio.com/api/extension-guides/prompt-tsx). 4 | -------------------------------------------------------------------------------- /examples/file-contents.tsx: -------------------------------------------------------------------------------- 1 | import { 2 | BasePromptElementProps, 3 | PromptElement, 4 | PromptPiece, 5 | PromptSizing, 6 | SystemMessage, 7 | UserMessage, 8 | } from '@vscode/prompt-tsx'; 9 | import { ChatContext, TextDocument } from 'vscode'; 10 | import { History } from './history'; 11 | 12 | interface IFilesToInclude { 13 | document: TextDocument; 14 | line: number; 15 | } 16 | 17 | interface IMyPromptProps extends BasePromptElementProps { 18 | history: ChatContext['history']; 19 | userQuery: string; 20 | files: IFilesToInclude[]; 21 | } 22 | 23 | /** 24 | * In this example, we want to include the contents of all files the user is 25 | * currently looking at in their prompt. But, these files could be big, to the 26 | * point where including all of them would lead to their text being pruned! 27 | * 28 | * This example shows you how to use the `flexGrow` property to cooperatively 29 | * size the file contents to fit within the token budget. Each element receives 30 | * information about how much of the token budget it is suggested to consume in 31 | * its `PromptSizing` object, passed to both `prepare` and `render`. 32 | * 33 | * By default, each element has a `flexGrow` value of `0`. This means they're 34 | * all rendered concurrently and split the budget equally (unless modified by 35 | * a `flexBasis` value.) If you assign elements to a higher `flexGrow` value, 36 | * then they're rendered after everything else, and they're given any remaining 37 | * unused budget. This gives you a great way to create elements that size to 38 | * fit but not exceed your total budget. 39 | * 40 | * Let's use this to make the `FileContext` grow to fill the available space. 41 | * We'll assign it a `flexGrow` value of `1`, and then it will be rendered after 42 | * the instructions and query. 43 | * 44 | * History can be big, however, and we'd prefer to bring in more context rather 45 | * than more history. So, we'll assign the `History` element a `flexGrow` value 46 | * of `2` for the sole purpose of keeping its token consumption out of the 47 | * `FileContext` budget. However, we will set `flexReserve="/5"` to have it 48 | * 'reserve' 1/5th of the total budget from being given to the sizing of 49 | * earlier elements, just to make sure we have some amount of history in the 50 | * prompt. 51 | * 52 | * It's important to note that the `flexGrow` value, and `PromptSizing` in 53 | * general, allows **cooperative** use of the token budget. If the prompt is 54 | * over budget after everything is rendered, then pruning still happens as 55 | * usual. `flex*` values have no impact on the priority or pruning process. 56 | * 57 | * While we're using the active files and selections here, these same concepts 58 | * can be applied in other scenarios too. 59 | */ 60 | export class MyPrompt extends PromptElement { 61 | render() { 62 | return ( 63 | <> 64 | Here are your base instructions. 65 | {/* See `./history.tsx` for an explainer on the history element. */} 66 | 74 | {this.props.userQuery} 75 | 76 | 77 | ); 78 | } 79 | } 80 | 81 | class FileContext extends PromptElement<{ files: IFilesToInclude[] } & BasePromptElementProps> { 82 | async render(_state: void, sizing: PromptSizing): Promise { 83 | const files = await this.getExpandedFiles(sizing); 84 | return <>{files.map(f => f.toString())}; 85 | } 86 | 87 | /** 88 | * The idea here is: 89 | * 90 | * 1. We wrap each file in markdown-style code fences, so get the base 91 | * token consumption of each of those. 92 | * 2. Keep looping through the files. Each time, add one line from each file 93 | * until either we're out of lines (anyHadLinesToExpand=false) or until 94 | * the next line would cause us to exceed our token budget. 95 | * 96 | * This always will produce files that are under the budget because 97 | * tokenization can cause content on multiple lines to 'merge', but it will 98 | * never exceed the budget. 99 | * 100 | * (`tokenLength(a) + tokenLength(b) <= tokenLength(a + b)` in all current 101 | * tokenizers.) 102 | */ 103 | private async getExpandedFiles(sizing: PromptSizing) { 104 | const files = this.props.files.map(f => new FileContextTracker(f.document, f.line)); 105 | 106 | let tokenCount = 0; 107 | // count the base amount of tokens used by the files: 108 | for (const file of files) { 109 | tokenCount += await file.tokenCount(sizing); 110 | } 111 | 112 | while (true) { 113 | let anyHadLinesToExpand = false; 114 | for (const file of files) { 115 | const nextLine = file.nextLine(); 116 | if (nextLine === undefined) { 117 | continue; 118 | } 119 | 120 | anyHadLinesToExpand = true; 121 | const nextTokenCount = await sizing.countTokens(nextLine); 122 | if (tokenCount + nextTokenCount > sizing.tokenBudget) { 123 | return files; 124 | } 125 | 126 | file.expand(); 127 | tokenCount += nextTokenCount; 128 | } 129 | 130 | if (!anyHadLinesToExpand) { 131 | return files; 132 | } 133 | } 134 | } 135 | } 136 | 137 | class FileContextTracker { 138 | private prefix = `# ${this.document.fileName}\n\`\`\`\n`; 139 | private suffix = '\n```\n'; 140 | private lines: string[] = []; 141 | 142 | private aboveLine = this.originLine; 143 | private belowLine = this.originLine; 144 | private nextLineIs: 'above' | 'below' | 'none' = 'above'; 145 | 146 | constructor(private readonly document: TextDocument, private readonly originLine: number) {} 147 | 148 | /** Counts the length of the current data. */ 149 | public async tokenCount(sizing: PromptSizing) { 150 | const before = await sizing.countTokens(this.prefix); 151 | const after = await sizing.countTokens(this.suffix); 152 | return before + after; 153 | } 154 | 155 | /** Gets the next line that will be added on the following `expand` call. */ 156 | public nextLine(): string | undefined { 157 | switch (this.nextLineIs) { 158 | case 'above': 159 | return this.document.lineAt(this.aboveLine).text + '\n'; 160 | case 'below': 161 | return this.document.lineAt(this.belowLine).text + '\n'; 162 | case 'none': 163 | return undefined; 164 | } 165 | } 166 | 167 | /** Adds in the 'next line' */ 168 | public expand() { 169 | if (this.nextLineIs === 'above') { 170 | this.lines.unshift(this.document.lineAt(this.aboveLine).text); 171 | if (this.belowLine < this.document.lineCount - 1) { 172 | this.belowLine++; 173 | this.nextLineIs = 'below'; 174 | } else if (this.aboveLine > 0) { 175 | this.aboveLine--; 176 | } else { 177 | this.nextLineIs = 'none'; 178 | } 179 | } else if (this.nextLineIs === 'below') { 180 | this.lines.push(this.document.lineAt(this.belowLine).text); 181 | if (this.aboveLine > 0) { 182 | this.aboveLine--; 183 | this.nextLineIs = 'above'; 184 | } else if (this.belowLine < this.document.lineCount - 1) { 185 | this.belowLine++; 186 | } else { 187 | this.nextLineIs = 'none'; 188 | } 189 | } 190 | } 191 | 192 | /** Gets the file content as a string. */ 193 | toString() { 194 | return this.prefix + this.lines.join('\n') + this.suffix; 195 | } 196 | } 197 | -------------------------------------------------------------------------------- /examples/history.tsx: -------------------------------------------------------------------------------- 1 | import { 2 | AssistantMessage, 3 | BasePromptElementProps, 4 | PrioritizedList, 5 | PromptElement, 6 | PromptPiece, 7 | PromptSizing, 8 | SystemMessage, 9 | UserMessage, 10 | } from '@vscode/prompt-tsx'; 11 | import { 12 | CancellationToken, 13 | ChatContext, 14 | ChatRequestTurn, 15 | ChatResponseMarkdownPart, 16 | ChatResponseTurn, 17 | Progress, 18 | } from 'vscode'; 19 | import { ChatResponsePart } from '../dist/base/vscodeTypes'; 20 | 21 | interface IMyPromptProps extends BasePromptElementProps { 22 | history: ChatContext['history']; 23 | userQuery: string; 24 | } 25 | 26 | /** 27 | * Including conversation history in your prompt is important as it allows the 28 | * user to ask followup questions to previous messages. However, you want to 29 | * make sure its priority is treated appropriately because history can 30 | * grow very large over time. 31 | * 32 | * We've found that the pattern which makes the most sense is usually to prioritize, in order: 33 | * 34 | * 1. The base prompt instructions, then 35 | * 1. The current user query, then 36 | * 1. The last couple turns of chat history, then 37 | * 1. Any supporting data, then 38 | * 1. As much of the remaining history as you can fit. 39 | * 40 | * For this reason, we split the history in two parts in the prompt, where 41 | * recent prompt turns are prioritized above general contextual information. 42 | */ 43 | export class MyPrompt extends PromptElement { 44 | render() { 45 | return ( 46 | <> 47 | 48 | Here are your base instructions. They have the highest priority because you want to make 49 | sure they're always included! 50 | 51 | {/* The remainder of the history has the lowest priority since it's less relevant */} 52 | 53 | {/* The last 2 history messages are preferred over any workspace context we have vlow */} 54 | 55 | {/* The user query is right behind the system message in priority */} 56 | {this.props.userQuery} 57 | 58 | With a slightly lower priority, you can include some contextual data about the workspace 59 | or files here... 60 | 61 | 62 | ); 63 | } 64 | } 65 | 66 | interface IHistoryProps extends BasePromptElementProps { 67 | history: ChatContext['history']; 68 | newer: number; // last 2 message priority values 69 | older: number; // previous message priority values 70 | passPriority: true; // require this prop be set! 71 | } 72 | 73 | /** 74 | * We can wrap up this history element to be a little easier to use. `prompt-tsx` 75 | * has a `passPriority` attribute which allows an element to act as a 'pass-through' 76 | * container, so that its children are pruned as if they were direct children of 77 | * the parent. With this component, the elements 78 | * 79 | * ``` 80 | * 81 | * 82 | * ``` 83 | * 84 | * ...can equivalently be expressed as: 85 | * 86 | * ``` 87 | * 88 | * ``` 89 | */ 90 | export class History extends PromptElement { 91 | render(): PromptPiece { 92 | return ( 93 | <> 94 | 95 | 96 | 97 | ); 98 | } 99 | } 100 | 101 | interface IHistoryMessagesProps extends BasePromptElementProps { 102 | history: ChatContext['history']; 103 | } 104 | 105 | /** 106 | * The History element simply lists user and assistant messages from the chat 107 | * context. If things like tool calls or file trees are relevant for, your 108 | * case, you can make this element more complex to handle those cases. 109 | */ 110 | export class HistoryMessages extends PromptElement { 111 | render(): PromptPiece { 112 | const history: (UserMessage | AssistantMessage)[] = []; 113 | for (const turn of this.props.history) { 114 | if (turn instanceof ChatRequestTurn) { 115 | history.push({turn.prompt}); 116 | } else if (turn instanceof ChatResponseTurn) { 117 | history.push( 118 | 119 | {chatResponseToMarkdown(turn)} 120 | 121 | ); 122 | } 123 | } 124 | return ( 125 | 126 | {history} 127 | 128 | ); 129 | } 130 | } 131 | 132 | const chatResponseToMarkdown = (response: ChatResponseTurn) => { 133 | let str = ''; 134 | for (const part of response.response) { 135 | if (response instanceof ChatResponseMarkdownPart) { 136 | str += part.value; 137 | } 138 | } 139 | 140 | return str; 141 | }; 142 | -------------------------------------------------------------------------------- /examples/package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "examples", 3 | "lockfileVersion": 3, 4 | "requires": true, 5 | "packages": { 6 | "": { 7 | "devDependencies": { 8 | "@types/vscode": "^1.95.0", 9 | "@vscode/prompt-tsx": "file:.." 10 | } 11 | }, 12 | "..": { 13 | "version": "0.3.0-alpha.13", 14 | "dev": true, 15 | "license": "MIT", 16 | "devDependencies": { 17 | "@microsoft/tiktokenizer": "^1.0.6", 18 | "@types/node": "^20.11.30", 19 | "@vscode/test-cli": "^0.0.9", 20 | "@vscode/test-electron": "^2.4.1", 21 | "concurrently": "^9.0.1", 22 | "cross-env": "^7.0.3", 23 | "esbuild": "^0.24.0", 24 | "mocha": "^10.2.0", 25 | "preact": "^10.24.2", 26 | "prettier": "^2.8.8", 27 | "tsx": "^4.19.1", 28 | "typescript": "^5.6.2" 29 | } 30 | }, 31 | "node_modules/@types/vscode": { 32 | "version": "1.95.0", 33 | "resolved": "https://registry.npmjs.org/@types/vscode/-/vscode-1.95.0.tgz", 34 | "integrity": "sha512-0LBD8TEiNbet3NvWsmn59zLzOFu/txSlGxnv5yAFHCrhG9WvAnR3IvfHzMOs2aeWqgvNjq9pO99IUw8d3n+unw==", 35 | "dev": true 36 | }, 37 | "node_modules/@vscode/prompt-tsx": { 38 | "resolved": "..", 39 | "link": true 40 | } 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /examples/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "devDependencies": { 3 | "@types/vscode": "^1.95.0", 4 | "@vscode/prompt-tsx": "file:.." 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /examples/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "jsx": "react", 4 | "jsxFactory": "vscpp", 5 | "jsxFragmentFactory": "vscppf" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@vscode/prompt-tsx", 3 | "version": "0.4.0-alpha.4", 4 | "description": "Declare LLM prompts with TSX", 5 | "main": "./dist/base/index.js", 6 | "types": "./dist/base/index.d.ts", 7 | "scripts": { 8 | "fmt": "prettier . --write", 9 | "prepack": "npm run compile", 10 | "compile": "tsx ./build/build-tracer.ts && tsc -p tsconfig.json && tsx ./build/postcompile.ts", 11 | "watch": "concurrently \"npm run -s watch:base\" \"npm run -s watch:tracer\"", 12 | "watch:tracer": "tsx ./build/build-tracer.ts --watch", 13 | "watch:base": "tsc --watch --sourceMap --preserveWatchOutput", 14 | "test": "vscode-test", 15 | "test:unit": "cross-env IS_OUTSIDE_VSCODE=1 mocha --import=tsx -u tdd \"src/base/test/**/*.test.{ts,tsx}\"", 16 | "test:bench": "tsx ./src/base/test/renderer.bench.tsx", 17 | "prettier": "prettier --list-different --write --cache .", 18 | "prepare": "tsx ./build/postinstall.ts" 19 | }, 20 | "keywords": [], 21 | "author": "Microsoft Corporation", 22 | "license": "MIT", 23 | "bugs": { 24 | "url": "https://github.com/microsoft/vscode-prompt-tsx/issues" 25 | }, 26 | "repository": { 27 | "type": "git", 28 | "url": "git+https://github.com/microsoft/vscode-prompt-tsx.git" 29 | }, 30 | "homepage": "https://github.com/microsoft/vscode-prompt-tsx#readme", 31 | "devDependencies": { 32 | "@microsoft/tiktokenizer": "^1.0.6", 33 | "@types/node": "^20.11.30", 34 | "@vscode/test-cli": "^0.0.9", 35 | "@vscode/test-electron": "^2.4.1", 36 | "concurrently": "^9.0.1", 37 | "cross-env": "^7.0.3", 38 | "esbuild": "^0.25.4", 39 | "mocha": "^10.2.0", 40 | "preact": "^10.24.2", 41 | "prettier": "^2.8.8", 42 | "tinybench": "^3.1.1", 43 | "tsx": "^4.19.1", 44 | "typescript": "^5.6.2" 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /src/base/htmlTracer.ts: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | import type { IncomingMessage, OutgoingMessage, Server } from 'http'; 6 | import type { AddressInfo } from 'net'; 7 | import { tracerCss, tracerSrc } from './htmlTracerSrc'; 8 | import { 9 | HTMLTraceEpoch, 10 | IHTMLTraceRenderData, 11 | IMaterializedMetadata, 12 | ITraceMaterializedContainer, 13 | ITraceMaterializedNode, 14 | TraceMaterializedNodeType, 15 | } from './htmlTracerTypes'; 16 | import { 17 | MaterializedChatMessageImage, 18 | MaterializedChatMessage, 19 | MaterializedChatMessageTextChunk, 20 | GenericMaterializedContainer, 21 | MaterializedNode, 22 | MaterializedChatMessageOpaque, 23 | MaterializedChatMessageBreakpoint, 24 | } from './materialized'; 25 | import { PromptMetadata } from './results'; 26 | import { ITokenizer } from './tokenizer/tokenizer'; 27 | import { IElementEpochData, ITraceData, ITraceEpoch, ITracer, ITraceRenderData } from './tracer'; 28 | import { Raw } from './output/mode'; 29 | 30 | /** 31 | * Handler that can trace rendering internals into an HTML summary. 32 | */ 33 | export class HTMLTracer implements ITracer { 34 | private traceData?: ITraceData; 35 | private readonly epochs: ITraceEpoch[] = []; 36 | 37 | addRenderEpoch(epoch: ITraceEpoch): void { 38 | this.epochs.push(epoch); 39 | } 40 | 41 | includeInEpoch(data: IElementEpochData): void { 42 | this.epochs[this.epochs.length - 1].elements.push(data); 43 | } 44 | 45 | didMaterializeTree(traceData: ITraceData): void { 46 | this.traceData = traceData; 47 | } 48 | 49 | /** 50 | * Returns HTML to trace the output. Note that is starts a server which is 51 | * used for client interaction to resize the prompt and its `address` should 52 | * be displayed or opened as a link in a browser. 53 | * 54 | * The server runs until it is disposed. 55 | */ 56 | public async serveHTML(): Promise { 57 | return RequestServer.create({ 58 | epochs: this.epochs, 59 | traceData: mustGet(this.traceData), 60 | }); 61 | } 62 | 63 | /** 64 | * Gets an HTML router for a server at the URL. URL is the form `http://127.0.0.1:1234`. 65 | */ 66 | public serveRouter(url: string): IHTMLRouter { 67 | return new RequestRouter({ 68 | baseAddress: url, 69 | epochs: this.epochs, 70 | traceData: mustGet(this.traceData), 71 | }); 72 | } 73 | } 74 | 75 | export interface IHTMLRouter { 76 | address: string; 77 | route(httpIncomingMessage: unknown, httpOutgoingMessage: unknown): boolean; 78 | } 79 | 80 | export interface IHTMLServer { 81 | address: string; 82 | getHTML(): Promise; 83 | dispose(): void; 84 | } 85 | 86 | interface IServerOpts { 87 | epochs: ITraceEpoch[]; 88 | traceData: ITraceData; 89 | baseAddress: string; 90 | } 91 | 92 | class RequestRouter implements IHTMLRouter { 93 | private serverToken = crypto.randomUUID(); 94 | 95 | constructor(private readonly opts: IServerOpts) {} 96 | 97 | public route(httpIncomingMessage: unknown, httpOutgoingMessage: unknown): boolean { 98 | const req = httpIncomingMessage as IncomingMessage; 99 | const res = httpOutgoingMessage as OutgoingMessage; 100 | const url = new URL(req.url || '/', `http://localhost`); 101 | const prefix = `/${this.serverToken}`; 102 | switch (url.pathname) { 103 | case prefix: 104 | case `${prefix}/`: 105 | this.onRoot(url, req, res); 106 | break; 107 | case `${prefix}/regen`: 108 | this.onRegen(url, req, res); 109 | break; 110 | default: 111 | return false; 112 | } 113 | 114 | return true; 115 | } 116 | 117 | public get address() { 118 | return this.opts.baseAddress + '/' + this.serverToken; 119 | } 120 | 121 | public async getHTML() { 122 | const { traceData, epochs } = this.opts; 123 | return ` 124 | 125 | 134 | `; 135 | } 136 | 137 | private async onRegen(url: URL, _req: IncomingMessage, res: OutgoingMessage) { 138 | const { traceData } = this.opts; 139 | const budget = Number(url.searchParams.get('n') || traceData.budget); 140 | const renderedTree = await traceData.renderTree(budget); 141 | const serialized = await serializeRenderData(traceData.tokenizer, renderedTree); 142 | const json = JSON.stringify(serialized); 143 | res.setHeader('Content-Type', 'application/json'); 144 | res.setHeader('Content-Length', Buffer.byteLength(json)); 145 | res.end(json); 146 | } 147 | 148 | private onRoot(_url: URL, _req: IncomingMessage, res: OutgoingMessage) { 149 | this.getHTML().then(html => { 150 | res.setHeader('Content-Type', 'text/html'); 151 | res.setHeader('Content-Length', Buffer.byteLength(html)); 152 | res.end(html); 153 | }); 154 | } 155 | } 156 | 157 | class RequestServer extends RequestRouter implements IHTMLServer { 158 | public static async create(opts: Omit) { 159 | const { createServer } = await import('http'); 160 | const server = createServer((req, res) => { 161 | try { 162 | if (!instance.route(req, res)) { 163 | res.statusCode = 404; 164 | res.end('Not Found'); 165 | } 166 | } catch (e) { 167 | res.statusCode = 500; 168 | res.end(String(e)); 169 | } 170 | }); 171 | 172 | const port = await new Promise((resolve, reject) => { 173 | server 174 | .listen(0, '127.0.0.1', () => resolve((server.address() as AddressInfo).port)) 175 | .on('error', reject); 176 | }); 177 | 178 | const instance = new RequestServer( 179 | { 180 | ...opts, 181 | baseAddress: `http://127.0.0.1:${port}`, 182 | }, 183 | server 184 | ); 185 | 186 | return instance; 187 | } 188 | 189 | constructor(opts: IServerOpts, private readonly server: Server) { 190 | super(opts); 191 | } 192 | 193 | dispose() { 194 | this.server.closeAllConnections(); 195 | this.server.close(); 196 | } 197 | } 198 | 199 | async function serializeRenderData( 200 | tokenizer: ITokenizer, 201 | tree: ITraceRenderData 202 | ): Promise { 203 | return { 204 | container: (await serializeMaterialized( 205 | tokenizer, 206 | tree.container, 207 | false 208 | )) as ITraceMaterializedContainer, 209 | removed: tree.removed, 210 | budget: tree.budget, 211 | }; 212 | } 213 | 214 | async function serializeMaterialized( 215 | tokenizer: ITokenizer, 216 | materialized: MaterializedNode, 217 | inChatMessage: boolean 218 | ): Promise { 219 | const common = { 220 | metadata: materialized.metadata.map(serializeMetadata), 221 | priority: materialized.priority, 222 | }; 223 | 224 | if (materialized instanceof MaterializedChatMessageTextChunk) { 225 | return { 226 | ...common, 227 | type: TraceMaterializedNodeType.TextChunk, 228 | value: materialized.text, 229 | tokens: await materialized.upperBoundTokenCount(tokenizer), 230 | }; 231 | } else if (materialized instanceof MaterializedChatMessageImage) { 232 | return { 233 | ...common, 234 | name: materialized.id.toString(), 235 | id: materialized.id, 236 | type: TraceMaterializedNodeType.Image, 237 | value: materialized.src, 238 | tokens: await materialized.upperBoundTokenCount(tokenizer), 239 | }; 240 | } else if ( 241 | materialized instanceof MaterializedChatMessageOpaque || 242 | materialized instanceof MaterializedChatMessageBreakpoint 243 | ) { 244 | // todo: add to visualizer 245 | return undefined; 246 | } else { 247 | const containerCommon = { 248 | ...common, 249 | id: materialized.id, 250 | name: materialized.name, 251 | children: ( 252 | await Promise.all( 253 | materialized.children.map(c => 254 | serializeMaterialized( 255 | tokenizer, 256 | c, 257 | inChatMessage || materialized instanceof MaterializedChatMessage 258 | ) 259 | ) 260 | ) 261 | ).filter(r => !!r), 262 | tokens: inChatMessage 263 | ? await materialized.upperBoundTokenCount(tokenizer) 264 | : await materialized.tokenCount(tokenizer), 265 | }; 266 | 267 | if (materialized instanceof GenericMaterializedContainer) { 268 | return { 269 | ...containerCommon, 270 | type: TraceMaterializedNodeType.Container, 271 | }; 272 | } else if (materialized instanceof MaterializedChatMessage) { 273 | const content = materialized.text 274 | .filter(element => typeof element === 'string') 275 | .join('') 276 | .trim(); 277 | return { 278 | ...containerCommon, 279 | type: TraceMaterializedNodeType.ChatMessage, 280 | role: Raw.ChatRole.display(materialized.role), 281 | text: content, 282 | }; 283 | } 284 | } 285 | 286 | assertNever(materialized); 287 | } 288 | 289 | function assertNever(x: never): never { 290 | throw new Error('unreachable'); 291 | } 292 | 293 | function serializeMetadata(metadata: PromptMetadata): IMaterializedMetadata { 294 | return { name: metadata.constructor.name, value: JSON.stringify(metadata) }; 295 | } 296 | 297 | const mustGet = (value: T | undefined): T => { 298 | if (value === undefined) { 299 | throw new Error('Prompt must be rendered before calling HTMLTRacer.serveHTML'); 300 | } 301 | 302 | return value; 303 | }; 304 | -------------------------------------------------------------------------------- /src/base/htmlTracerTypes.ts: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | import { ITraceEpoch } from './tracer'; 6 | 7 | export type HTMLTraceEpoch = ITraceEpoch; 8 | 9 | export interface IHTMLTraceRenderData { 10 | container: ITraceMaterializedContainer; 11 | removed: number; 12 | budget: number; 13 | } 14 | 15 | export type ITraceMaterializedNode = 16 | | ITraceMaterializedContainer 17 | | ITraceMaterializedChatMessage 18 | | ITraceMaterializedChatMessageTextChunk 19 | | ITraceMaterializedChatMessageImage; 20 | 21 | export const enum TraceMaterializedNodeType { 22 | Container, 23 | ChatMessage, 24 | TextChunk, 25 | Image, 26 | } 27 | 28 | export interface IMaterializedMetadata { 29 | name: string; 30 | value: string; 31 | } 32 | 33 | export interface ITraceMaterializedCommon { 34 | priority: number; 35 | tokens: number; 36 | metadata: IMaterializedMetadata[]; 37 | } 38 | 39 | export interface ITraceMaterializedContainer extends ITraceMaterializedCommon { 40 | type: TraceMaterializedNodeType.Container; 41 | id: number; 42 | name: string | undefined; 43 | children: ITraceMaterializedNode[]; 44 | } 45 | 46 | export interface ITraceMaterializedChatMessage extends ITraceMaterializedCommon { 47 | type: TraceMaterializedNodeType.ChatMessage; 48 | id: number; 49 | role: string; 50 | name: string | undefined; 51 | priority: number; 52 | text: string; 53 | tokens: number; 54 | children: ITraceMaterializedNode[]; 55 | } 56 | 57 | export interface ITraceMaterializedChatMessageTextChunk extends ITraceMaterializedCommon { 58 | type: TraceMaterializedNodeType.TextChunk; 59 | value: string; 60 | priority: number; 61 | tokens: number; 62 | } 63 | 64 | export interface ITraceMaterializedChatMessageImage extends ITraceMaterializedCommon { 65 | id: number; 66 | type: TraceMaterializedNodeType.Image; 67 | name: string 68 | value: string; 69 | priority: number; 70 | tokens: number, 71 | } 72 | 73 | -------------------------------------------------------------------------------- /src/base/index.ts: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | import type { 6 | CancellationToken, 7 | ChatResponsePart, 8 | LanguageModelChat, 9 | LanguageModelChatMessage, 10 | Progress, 11 | } from 'vscode'; 12 | import { PromptElementJSON } from './jsonTypes'; 13 | import { ModeToChatMessageType, OutputMode, Raw } from './output/mode'; 14 | import { ChatMessage } from './output/openaiTypes'; 15 | import { MetadataMap, PromptRenderer } from './promptRenderer'; 16 | import { PromptReference } from './results'; 17 | import { ITokenizer, VSCodeTokenizer } from './tokenizer/tokenizer'; 18 | import { BasePromptElementProps, IChatEndpointInfo, PromptElementCtor } from './types'; 19 | import { ChatDocumentContext } from './vscodeTypes.d'; 20 | 21 | export * from './htmlTracer'; 22 | export * as JSONTree from './jsonTypes'; 23 | export * from './output/mode'; 24 | export * from './promptElements'; 25 | export * from './results'; 26 | export { ITokenizer } from './tokenizer/tokenizer'; 27 | export * from './tracer'; 28 | export * from './tsx-globals'; 29 | export * from './types'; 30 | 31 | export { PromptElement } from './promptElement'; 32 | export { MetadataMap, PromptRenderer, QueueItem, RenderPromptResult } from './promptRenderer'; 33 | 34 | /** 35 | * Renders a prompt element and returns the result. 36 | * 37 | * @template P - The type of the prompt element props. 38 | * @param ctor - The constructor of the prompt element. 39 | * @param props - The props for the prompt element. 40 | * @param endpoint - The chat endpoint information. 41 | * @param progress - The progress object for reporting progress of the chat response. 42 | * @param token - The cancellation token for cancelling the operation. 43 | * @param tokenizer - The tokenizer for tokenizing the chat response. 44 | * @param mode - The mode to render the chat messages in. 45 | * @returns A promise that resolves to an object containing the rendered {@link LanguageModelChatMessage chat messages}, token count, metadatas, used context, and references. 46 | */ 47 | export async function renderPrompt

( 48 | ctor: PromptElementCtor, 49 | props: P, 50 | endpoint: IChatEndpointInfo, 51 | tokenizerMetadata: ITokenizer | LanguageModelChat, 52 | progress?: Progress, 53 | token?: CancellationToken, 54 | mode?: OutputMode.VSCode 55 | ): Promise<{ 56 | messages: LanguageModelChatMessage[]; 57 | tokenCount: number; 58 | metadata: MetadataMap; 59 | usedContext: ChatDocumentContext[]; 60 | references: PromptReference[]; 61 | }>; 62 | /** 63 | * Renders a prompt element and returns the result. 64 | * 65 | * @template P - The type of the prompt element props. 66 | * @param ctor - The constructor of the prompt element. 67 | * @param props - The props for the prompt element. 68 | * @param endpoint - The chat endpoint information. 69 | * @param progress - The progress object for reporting progress of the chat response. 70 | * @param token - The cancellation token for cancelling the operation. 71 | * @param tokenizer - The tokenizer for tokenizing the chat response. 72 | * @param mode - The mode to render the chat messages in. 73 | * @returns A promise that resolves to an object containing the rendered {@link ChatMessage chat messages}, token count, metadatas, used context, and references. 74 | */ 75 | export async function renderPrompt

( 76 | ctor: PromptElementCtor, 77 | props: P, 78 | endpoint: IChatEndpointInfo, 79 | tokenizerMetadata: ITokenizer, 80 | progress?: Progress, 81 | token?: CancellationToken 82 | ): Promise<{ 83 | messages: ModeToChatMessageType[TMode][]; 84 | tokenCount: number; 85 | metadata: MetadataMap; 86 | usedContext: ChatDocumentContext[]; 87 | references: PromptReference[]; 88 | }>; 89 | export async function renderPrompt

( 90 | ctor: PromptElementCtor, 91 | props: P, 92 | endpoint: IChatEndpointInfo, 93 | tokenizerMetadata: ITokenizer | LanguageModelChat, 94 | progress?: Progress, 95 | token?: CancellationToken, 96 | mode = OutputMode.VSCode 97 | ): Promise<{ 98 | messages: (ChatMessage | LanguageModelChatMessage)[]; 99 | tokenCount: number; 100 | metadata: MetadataMap; 101 | usedContext: ChatDocumentContext[]; 102 | references: PromptReference[]; 103 | }> { 104 | let tokenizer = 105 | 'countTokens' in tokenizerMetadata 106 | ? new VSCodeTokenizer((text, token) => tokenizerMetadata.countTokens(text, token), mode) 107 | : tokenizerMetadata; 108 | const renderer = new PromptRenderer(endpoint, ctor, props, tokenizer); 109 | const renderResult = await renderer.render(progress, token); 110 | const usedContext = renderer.getUsedContext(); 111 | return { ...renderResult, usedContext }; 112 | } 113 | 114 | /** 115 | * Content type of the return value from {@link renderElementJSON}. 116 | * When responding to a tool invocation, the tool should set this as the 117 | * content type in the returned data: 118 | * 119 | * ```ts 120 | * import { contentType } from '@vscode/prompt-tsx'; 121 | * 122 | * async function doToolInvocation(): vscode.LanguageModelToolResult { 123 | * return { 124 | * [contentType]: await renderElementJSON(...), 125 | * toString: () => '...', 126 | * }; 127 | * } 128 | * ``` 129 | */ 130 | export const contentType = 'application/vnd.codechat.prompt+json.1'; 131 | 132 | /** 133 | * Renders a prompt element to a serializable state. This type be returned in 134 | * tools results and reused in subsequent render calls via the `` 135 | * element. 136 | * 137 | * In this mode, message chunks are not pruned from the tree; budget 138 | * information is used only to hint to the elements how many tokens they should 139 | * consume when rendered. 140 | * 141 | * @template P - The type of the prompt element props. 142 | * @param ctor - The constructor of the prompt element. 143 | * @param props - The props for the prompt element. 144 | * @param budgetInformation - Information about the token budget. 145 | * `vscode.LanguageModelToolInvocationOptions` is assignable to this object. 146 | * @param token - The cancellation token for cancelling the operation. 147 | * @returns A promise that resolves to an object containing the serialized data. 148 | */ 149 | export function renderElementJSON

( 150 | ctor: PromptElementCtor, 151 | props: P, 152 | budgetInformation: 153 | | { 154 | tokenBudget: number; 155 | countTokens(text: string, token?: CancellationToken): Thenable; 156 | } 157 | | undefined, 158 | token?: CancellationToken 159 | ): Promise { 160 | const renderer = new PromptRenderer( 161 | { modelMaxPromptTokens: budgetInformation?.tokenBudget ?? Number.MAX_SAFE_INTEGER }, 162 | ctor, 163 | props, 164 | // note: if tokenBudget is given, countTokens is also give and vise-versa. 165 | // `1` is used only as a dummy fallback to avoid errors if no/unlimited budget is provided. 166 | { 167 | mode: OutputMode.Raw, 168 | countMessageTokens(message) { 169 | throw new Error('Tools may only return text, not messages.'); // for now... 170 | }, 171 | tokenLength(part, token) { 172 | if (part.type === Raw.ChatCompletionContentPartKind.Text) { 173 | return Promise.resolve( 174 | budgetInformation?.countTokens(part.text, token) ?? Promise.resolve(1) 175 | ); 176 | } 177 | return Promise.resolve(1); 178 | }, 179 | } 180 | ); 181 | 182 | return renderer.renderElementJSON(token); 183 | } 184 | -------------------------------------------------------------------------------- /src/base/jsonTypes.ts: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | import type { Range } from 'vscode'; 6 | import { ChatResponseReferencePartStatusKind } from './results'; 7 | import { UriComponents } from './util/vs/common/uri'; 8 | import { BasePromptElementProps, PromptElementProps } from './types'; 9 | 10 | // Types in this region are the JSON representation of prompt elements. These 11 | // can be transmitted between tools and tool callers. 12 | // 13 | // ⚠️ Changes to these types MUST be made in a backwards-compatible way. ⚠️ 14 | // Tools and tool callers may be using different prompt-tsx versions. 15 | // 16 | // All enums in this file have explicitly-assigned values, and authors should 17 | // take care not to change existing enum valus. 18 | 19 | export const enum PromptNodeType { 20 | Piece = 1, 21 | Text = 2, 22 | Opaque = 3, 23 | } 24 | 25 | export interface TextJSON { 26 | type: PromptNodeType.Text; 27 | text: string; 28 | priority: number | undefined; 29 | references: PromptReferenceJSON[] | undefined; 30 | lineBreakBefore: boolean | undefined; 31 | } 32 | 33 | /** 34 | * Constructor kind of the node represented by {@link PieceJSON}. This is 35 | * less descriptive than the actual constructor, as we only care to preserve 36 | * the element data that the renderer cares about. 37 | */ 38 | export const enum PieceCtorKind { 39 | BaseChatMessage = 1, 40 | Other = 2, 41 | ImageChatMessage = 3, 42 | } 43 | 44 | export const jsonRetainedProps = Object.keys({ 45 | flexBasis: 1, 46 | flexGrow: 1, 47 | flexReserve: 1, 48 | passPriority: 1, 49 | priority: 1, 50 | } satisfies { [key in keyof BasePromptElementProps]: 1 }) as readonly (keyof BasePromptElementProps)[]; 51 | 52 | export interface BasePieceJSON { 53 | type: PromptNodeType.Piece; 54 | ctor: PieceCtorKind.BaseChatMessage | PieceCtorKind.Other; 55 | ctorName: string | undefined; 56 | children: PromptNodeJSON[]; 57 | references: PromptReferenceJSON[] | undefined; 58 | props: Record; 59 | keepWithId?: number; 60 | flags?: number; // ContainerFlags 61 | } 62 | 63 | export interface ImageChatMessagePieceJSON { 64 | type: PromptNodeType.Piece; 65 | ctor: PieceCtorKind.ImageChatMessage; 66 | children: PromptNodeJSON[]; 67 | references: PromptReferenceJSON[] | undefined; 68 | props: { 69 | src: string; 70 | detail?: 'low' | 'high'; 71 | }; 72 | } 73 | 74 | export interface OpaqueJSON { 75 | type: PromptNodeType.Opaque; 76 | tokenUsage?: number; 77 | value: unknown; 78 | priority?: number; 79 | } 80 | 81 | export type PieceJSON = BasePieceJSON | ImageChatMessagePieceJSON; 82 | 83 | export type PromptNodeJSON = PieceJSON | TextJSON | OpaqueJSON; 84 | 85 | export type UriOrLocationJSON = UriComponents | { uri: UriComponents; range: Range }; 86 | 87 | export interface PromptReferenceJSON { 88 | anchor: UriOrLocationJSON | { variableName: string; value?: UriOrLocationJSON }; 89 | iconPath?: UriComponents | { id: string } | { light: UriComponents; dark: UriComponents }; 90 | options?: { status?: { description: string; kind: ChatResponseReferencePartStatusKind } }; 91 | } 92 | 93 | export interface PromptElementJSON { 94 | node: PieceJSON; 95 | } 96 | 97 | /** Iterates over each {@link PromptNodeJSON} in the tree. */ 98 | export function forEachNode(node: PromptNodeJSON, fn: (node: PromptNodeJSON) => void) { 99 | fn(node); 100 | 101 | if (node.type === PromptNodeType.Piece) { 102 | for (const child of node.children) { 103 | forEachNode(child, fn); 104 | } 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /src/base/once.ts: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | export function once any>(fn: T): T & { clear: () => void } { 6 | let result: ReturnType; 7 | let called = false; 8 | 9 | const wrappedFunction = ((...args: Parameters): ReturnType => { 10 | if (!called) { 11 | result = fn(...args); 12 | called = true; 13 | } 14 | return result; 15 | }) as T & { clear: () => void }; 16 | 17 | wrappedFunction.clear = () => { 18 | called = false; 19 | }; 20 | 21 | return wrappedFunction; 22 | } 23 | -------------------------------------------------------------------------------- /src/base/output/mode.ts: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | import type { LanguageModelChatMessage } from 'vscode'; 6 | import { toOpenAiChatMessage, toOpenAIChatMessages } from './openaiConvert'; 7 | import { ChatMessage as OpenAIChatMessage } from './openaiTypes'; 8 | import { ChatMessage as RawChatMessage } from './rawTypes'; 9 | import { toVsCodeChatMessage, toVsCodeChatMessages } from './vscode'; 10 | 11 | export * as OpenAI from './openaiTypes'; 12 | export * as Raw from './rawTypes'; 13 | 14 | export enum OutputMode { 15 | Raw = 1, 16 | OpenAI = 1 << 1, 17 | VSCode = 1 << 2, 18 | } 19 | 20 | /** Map of the mode to the type of message it produces. */ 21 | export interface ModeToChatMessageType { 22 | [OutputMode.Raw]: RawChatMessage; 23 | [OutputMode.VSCode]: LanguageModelChatMessage; 24 | [OutputMode.OpenAI]: OpenAIChatMessage; 25 | } 26 | 27 | /** 28 | * Converts the raw message representation emitted by this library to the given 29 | * type of chat message. The target chat message may or may not represent all 30 | * data included in the {@link RawChatMessage}. 31 | */ 32 | export function toMode( 33 | mode: Mode, 34 | messages: RawChatMessage 35 | ): ModeToChatMessageType[Mode]; 36 | export function toMode( 37 | mode: Mode, 38 | messages: readonly RawChatMessage[] 39 | ): ModeToChatMessageType[Mode][]; 40 | export function toMode( 41 | mode: Mode, 42 | messages: readonly RawChatMessage[] | RawChatMessage 43 | ): ModeToChatMessageType[Mode][] | ModeToChatMessageType[Mode] { 44 | switch (mode) { 45 | case OutputMode.Raw: 46 | return messages as ModeToChatMessageType[Mode][]; 47 | case OutputMode.VSCode: 48 | return ( 49 | messages instanceof Array ? toVsCodeChatMessages(messages) : toVsCodeChatMessage(messages) 50 | ) as ModeToChatMessageType[Mode]; 51 | case OutputMode.OpenAI: 52 | return ( 53 | messages instanceof Array ? toOpenAIChatMessages(messages) : toOpenAiChatMessage(messages) 54 | ) as ModeToChatMessageType[Mode]; 55 | default: 56 | throw new Error(`Unknown output mode: ${mode}`); 57 | } 58 | } 59 | 60 | export function toVSCode(messages: RawChatMessage): LanguageModelChatMessage; 61 | export function toVSCode(messages: readonly RawChatMessage[]): LanguageModelChatMessage[]; 62 | export function toVSCode( 63 | messages: readonly RawChatMessage[] | RawChatMessage 64 | ): LanguageModelChatMessage | LanguageModelChatMessage[] { 65 | return toMode(OutputMode.VSCode, messages as any); 66 | } 67 | 68 | export function toOpenAI(messages: RawChatMessage): OpenAIChatMessage; 69 | export function toOpenAI(messages: readonly RawChatMessage[]): OpenAIChatMessage[]; 70 | export function toOpenAI( 71 | messages: readonly RawChatMessage[] | RawChatMessage 72 | ): OpenAIChatMessage | OpenAIChatMessage[] { 73 | return toMode(OutputMode.OpenAI, messages as any); 74 | } 75 | -------------------------------------------------------------------------------- /src/base/output/openaiConvert.ts: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | import * as Raw from './rawTypes'; 5 | import * as OpenAI from './openaiTypes'; 6 | import { OutputMode } from './mode'; 7 | 8 | function onlyStringContent(content: Raw.ChatCompletionContentPart[]): string { 9 | return content 10 | .filter(part => part.type === Raw.ChatCompletionContentPartKind.Text) 11 | .map(part => part.text) 12 | .join(''); 13 | } 14 | 15 | function stringAndImageContent( 16 | content: Raw.ChatCompletionContentPart[] 17 | ): string | OpenAI.ChatCompletionContentPart[] { 18 | 19 | const parts = content 20 | .map((part): OpenAI.ChatCompletionContentPart | undefined => { 21 | if (part.type === Raw.ChatCompletionContentPartKind.Text) { 22 | return { 23 | type: 'text', 24 | text: part.text, 25 | }; 26 | } else if (part.type === Raw.ChatCompletionContentPartKind.Image) { 27 | return { 28 | image_url: part.imageUrl, 29 | type: 'image_url', 30 | }; 31 | } else if ( 32 | part.type === Raw.ChatCompletionContentPartKind.Opaque && 33 | Raw.ChatCompletionContentPartOpaque.usableIn(part, OutputMode.OpenAI) 34 | ) { 35 | return part.value as any; 36 | } 37 | }) 38 | .filter(r => !!r); 39 | 40 | 41 | if (parts.every(part => part.type === 'text')) { 42 | return parts.map(p=> (p as OpenAI.ChatCompletionContentPartText).text).join(''); 43 | } 44 | 45 | return parts; 46 | } 47 | 48 | export function toOpenAiChatMessage(message: Raw.ChatMessage): OpenAI.ChatMessage | undefined { 49 | switch (message.role) { 50 | case Raw.ChatRole.System: 51 | return { 52 | role: OpenAI.ChatRole.System, 53 | content: onlyStringContent(message.content), 54 | name: message.name, 55 | }; 56 | case Raw.ChatRole.User: 57 | return { 58 | role: OpenAI.ChatRole.User, 59 | content: stringAndImageContent(message.content), 60 | name: message.name, 61 | }; 62 | case Raw.ChatRole.Assistant: 63 | return { 64 | role: OpenAI.ChatRole.Assistant, 65 | content: onlyStringContent(message.content), 66 | name: message.name, 67 | tool_calls: message.toolCalls?.map(toolCall => ({ 68 | id: toolCall.id, 69 | function: toolCall.function, 70 | type: 'function', 71 | })), 72 | }; 73 | case Raw.ChatRole.Tool: 74 | return { 75 | role: OpenAI.ChatRole.Tool, 76 | content: stringAndImageContent(message.content), 77 | tool_call_id: message.toolCallId, 78 | }; 79 | default: 80 | return undefined; 81 | } 82 | } 83 | 84 | export function toOpenAIChatMessages(messages: readonly Raw.ChatMessage[]): OpenAI.ChatMessage[] { 85 | return messages.map(toOpenAiChatMessage).filter(r => !!r); 86 | } 87 | -------------------------------------------------------------------------------- /src/base/output/openaiTypes.ts: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | /** 6 | * An OpenAI Chat Completion message. 7 | * 8 | * Reference: https://platform.openai.com/docs/api-reference/chat/create 9 | */ 10 | export type ChatMessage = 11 | | AssistantChatMessage 12 | | SystemChatMessage 13 | | UserChatMessage 14 | | ToolChatMessage 15 | | FunctionChatMessage; 16 | 17 | export interface SystemChatMessage { 18 | role: ChatRole.System; 19 | 20 | /** 21 | * The content of the chat message. 22 | */ 23 | content: string; 24 | 25 | /** 26 | * An optional name for the participant. Provides the model information to differentiate between participants of the same role. 27 | */ 28 | name?: string; 29 | } 30 | 31 | export interface UserChatMessage { 32 | role: ChatRole.User; 33 | 34 | /** 35 | * The content of the chat message. 36 | */ 37 | content: string | Array; 38 | 39 | /** 40 | * An optional name for the participant. Provides the model information to differentiate between participants of the same role. 41 | */ 42 | name?: string; 43 | } 44 | 45 | export type ChatCompletionContentPart = ChatCompletionContentPartImage | ChatCompletionContentPartText; 46 | 47 | export interface ChatCompletionContentPartImage { 48 | image_url: ChatCompletionContentPartImage.ImageURL; 49 | 50 | /** 51 | * The type of the content part. 52 | */ 53 | type: 'image_url'; 54 | } 55 | 56 | export namespace ChatCompletionContentPartImage { 57 | export interface ImageURL { 58 | /** 59 | * Either a URL of the image or the base64 encoded image data. 60 | */ 61 | url: string; 62 | 63 | /** 64 | * Specifies the detail level of the image. Learn more in the 65 | * [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). 66 | */ 67 | detail?: 'low' | 'high'; 68 | } 69 | } 70 | 71 | export interface ChatCompletionContentPartText { 72 | /** 73 | * The text content. 74 | */ 75 | text: string; 76 | 77 | /** 78 | * The type of the content part. 79 | */ 80 | type: 'text'; 81 | } 82 | 83 | export interface ChatMessageToolCall { 84 | /** 85 | * The ID of the tool call. 86 | */ 87 | id: string; 88 | 89 | /** 90 | * The function that the model called. 91 | */ 92 | function: ChatMessageFunction; 93 | 94 | /** 95 | * The type of the tool. Currently, only `function` is supported. 96 | */ 97 | type: 'function'; 98 | } 99 | 100 | export interface AssistantChatMessage { 101 | role: ChatRole.Assistant; 102 | 103 | /** 104 | * The content of the chat message. 105 | */ 106 | content: string; 107 | 108 | /** 109 | * An optional name for the participant. Provides the model information to differentiate between participants of the same role. 110 | */ 111 | name?: string; 112 | 113 | /** 114 | * The tool calls generated by the model. 115 | */ 116 | tool_calls?: Array; 117 | } 118 | 119 | export interface ToolChatMessage { 120 | role: ChatRole.Tool; 121 | 122 | /** 123 | * Tool call that this message is responding to. 124 | */ 125 | tool_call_id?: string; 126 | 127 | /** 128 | * The content of the chat message. 129 | */ 130 | content: string | Array; 131 | } 132 | 133 | /** 134 | * @deprecated Use {@link ToolChatMessage} instead. 135 | */ 136 | export interface FunctionChatMessage { 137 | role: ChatRole.Function; 138 | 139 | /** 140 | * The content of the chat message. 141 | */ 142 | content: string; 143 | 144 | /** 145 | * The name of the function that was called 146 | */ 147 | name: string; 148 | } 149 | 150 | /** 151 | * The function that the model called. 152 | */ 153 | export interface ChatMessageFunction { 154 | /** 155 | * The arguments to call the function with, as generated by the model in JSON 156 | * format. Note that the model does not always generate valid JSON, and may 157 | * hallucinate parameters not defined by your function schema. Validate the 158 | * arguments in your code before calling your function. 159 | */ 160 | arguments: string; 161 | 162 | /** 163 | * The name of the function to call. 164 | */ 165 | name: string; 166 | } 167 | 168 | /** 169 | * The role of a message in an OpenAI completions request. 170 | */ 171 | export enum ChatRole { 172 | System = 'system', 173 | User = 'user', 174 | Assistant = 'assistant', 175 | Function = 'function', 176 | Tool = 'tool', 177 | } 178 | 179 | /** 180 | * BaseTokensPerCompletion is the minimum tokens for a completion request. 181 | * Replies are primed with <|im_start|>assistant<|message|>, so these tokens represent the 182 | * special token and the role name. 183 | */ 184 | export const BaseTokensPerCompletion = 3; 185 | /* 186 | * Each GPT 3.5 / GPT 4 message comes with 3 tokens per message due to special characters 187 | */ 188 | export const BaseTokensPerMessage = 3; 189 | /* 190 | * Since gpt-3.5-turbo-0613 each name costs 1 token 191 | */ 192 | export const BaseTokensPerName = 1; 193 | -------------------------------------------------------------------------------- /src/base/output/rawTypes.ts: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | import { assertNever } from '../util/assert'; 6 | import type { OutputMode, toMode } from './mode'; 7 | 8 | /** 9 | * A chat message emitted by this library. This can be mapped to other APIs 10 | * easily using {@link toMode}. 11 | * 12 | * Please note: 13 | * - Enumerations and union types are non-exhaustive. More types may be added 14 | * in the future. 15 | * - Data in this representation is very permissible and converting to API 16 | * representations may be lossy. 17 | */ 18 | export type ChatMessage = 19 | | AssistantChatMessage 20 | | SystemChatMessage 21 | | UserChatMessage 22 | | ToolChatMessage; 23 | 24 | /** 25 | * The role of a message in an OpenAI completions request. 26 | */ 27 | export enum ChatRole { 28 | System, 29 | User, 30 | Assistant, 31 | Tool, 32 | } 33 | 34 | export namespace ChatRole { 35 | export function display(role: ChatRole): string { 36 | switch (role) { 37 | case ChatRole.System: 38 | return 'system'; 39 | case ChatRole.User: 40 | return 'user'; 41 | case ChatRole.Assistant: 42 | return 'assistant'; 43 | case ChatRole.Tool: 44 | return 'tool'; 45 | default: 46 | assertNever(role, `unknown chat role ${role}}`); 47 | } 48 | } 49 | } 50 | 51 | export interface BaseChatMessage { 52 | role: ChatRole; 53 | content: ChatCompletionContentPart[]; 54 | /** 55 | * An optional name for the participant. Provides the model information to differentiate between participants of the same role. 56 | */ 57 | name?: string; 58 | } 59 | 60 | export interface SystemChatMessage extends BaseChatMessage { 61 | role: ChatRole.System; 62 | } 63 | 64 | export interface UserChatMessage extends BaseChatMessage { 65 | role: ChatRole.User; 66 | } 67 | 68 | export type ChatCompletionContentPart = 69 | | ChatCompletionContentPartImage 70 | | ChatCompletionContentPartText 71 | | ChatCompletionContentPartOpaque 72 | | ChatCompletionContentPartCacheBreakpoint; 73 | 74 | export enum ChatCompletionContentPartKind { 75 | Image, 76 | Text, 77 | Opaque, 78 | CacheBreakpoint, 79 | } 80 | 81 | /** An image completion */ 82 | export interface ChatCompletionContentPartImage { 83 | imageUrl: ImageURLReference; 84 | type: ChatCompletionContentPartKind.Image; 85 | } 86 | 87 | export interface ChatCompletionContentPartCacheBreakpoint { 88 | type: ChatCompletionContentPartKind.CacheBreakpoint; 89 | /** 90 | * Optional implementation-specific type of the breakpoint. 91 | */ 92 | cacheType?: string; 93 | } 94 | 95 | export interface ImageURLReference { 96 | /** 97 | * Either a URL of the image or the base64 encoded image data. 98 | */ 99 | url: string; 100 | 101 | /** 102 | * Specifies the detail level of the image. Learn more in the 103 | * [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). 104 | */ 105 | detail?: 'low' | 'high'; 106 | } 107 | 108 | export interface ChatCompletionContentPartText { 109 | /** 110 | * The text content. 111 | */ 112 | text: string; 113 | 114 | /** 115 | * The type of the content part. 116 | */ 117 | type: ChatCompletionContentPartKind.Text; 118 | } 119 | 120 | export interface ChatCompletionContentPartOpaque { 121 | /** 122 | * A JSON-stringifiable value 123 | */ 124 | value: unknown; 125 | 126 | /** 127 | * Constant-value token usage of this content part. If undefined, it will 128 | * be assumed 0. 129 | */ 130 | tokenUsage?: number; 131 | 132 | /** 133 | * A bitset of output modes where this content part will be omitted. 134 | * E.g. `scope: OutputMode.Anthropic | OutputMode.VSCode`. Not all outputs 135 | * will support opaque parts everywhere. 136 | */ 137 | scope?: number; 138 | 139 | /** 140 | * The type of the content part. 141 | */ 142 | type: ChatCompletionContentPartKind.Opaque; 143 | } 144 | 145 | export namespace ChatCompletionContentPartOpaque { 146 | export function usableIn(part: ChatCompletionContentPartOpaque, mode: OutputMode) { 147 | return !part.scope || (part.scope & mode) !== 0; 148 | } 149 | } 150 | 151 | export interface ChatMessageToolCall { 152 | /** 153 | * The ID of the tool call. 154 | */ 155 | id: string; 156 | 157 | /** 158 | * The function that the model called. 159 | */ 160 | function: ChatMessageFunction; 161 | 162 | /** 163 | * The type of the tool. Currently, only `function` is supported. 164 | */ 165 | type: 'function'; 166 | } 167 | 168 | export interface AssistantChatMessage extends BaseChatMessage { 169 | role: ChatRole.Assistant; 170 | /** 171 | * An optional name for the participant. Provides the model information to differentiate between participants of the same role. 172 | */ 173 | name?: string; 174 | 175 | /** 176 | * The tool calls generated by the model. 177 | */ 178 | toolCalls?: ChatMessageToolCall[]; 179 | } 180 | 181 | export interface ToolChatMessage extends BaseChatMessage { 182 | role: ChatRole.Tool; 183 | 184 | /** 185 | * Tool call that this message is responding to. 186 | */ 187 | toolCallId: string; 188 | } 189 | 190 | /** 191 | * The function that the model called. 192 | */ 193 | export interface ChatMessageFunction { 194 | /** 195 | * The arguments to call the function with, as generated by the model in JSON 196 | * format. 197 | */ 198 | arguments: string; 199 | 200 | /** 201 | * The name of the function to call. 202 | */ 203 | name: string; 204 | } 205 | -------------------------------------------------------------------------------- /src/base/output/vscode.ts: -------------------------------------------------------------------------------- 1 | import type * as vscodeType from 'vscode'; 2 | import * as Raw from './rawTypes'; 3 | 4 | function onlyStringContent(content: Raw.ChatCompletionContentPart[]): string { 5 | return content 6 | .filter(part => part.type === Raw.ChatCompletionContentPartKind.Text) 7 | .map(part => (part as Raw.ChatCompletionContentPartText).text) 8 | .join(''); 9 | } 10 | 11 | let vscode: typeof vscodeType; 12 | 13 | export function toVsCodeChatMessage( 14 | m: Raw.ChatMessage 15 | ): vscodeType.LanguageModelChatMessage | undefined { 16 | vscode ??= require('vscode'); 17 | 18 | switch (m.role) { 19 | case Raw.ChatRole.Assistant: 20 | const message: vscodeType.LanguageModelChatMessage = 21 | vscode.LanguageModelChatMessage.Assistant(onlyStringContent(m.content), m.name); 22 | if (m.toolCalls) { 23 | message.content = [ 24 | new vscode.LanguageModelTextPart(onlyStringContent(m.content)), 25 | ...m.toolCalls.map(tc => { 26 | // prompt-tsx got args passed as a string, here we assume they are JSON because the vscode-type wants an object 27 | let parsedArgs: object; 28 | try { 29 | parsedArgs = JSON.parse(tc.function.arguments); 30 | } catch (err) { 31 | throw new Error('Invalid JSON in tool call arguments for tool call: ' + tc.id); 32 | } 33 | 34 | return new vscode.LanguageModelToolCallPart(tc.id, tc.function.name, parsedArgs); 35 | }), 36 | ]; 37 | } 38 | return message; 39 | case Raw.ChatRole.User: 40 | return vscode.LanguageModelChatMessage.User(onlyStringContent(m.content), m.name); 41 | case Raw.ChatRole.Tool: { 42 | const message: vscodeType.LanguageModelChatMessage = vscode.LanguageModelChatMessage.User(''); 43 | message.content = [ 44 | new vscode.LanguageModelToolResultPart(m.toolCallId, [ 45 | new vscode.LanguageModelTextPart(onlyStringContent(m.content)), 46 | ]), 47 | ]; 48 | return message; 49 | } 50 | default: 51 | return undefined; 52 | } 53 | } 54 | /** 55 | * Converts an array of {@link ChatMessage} objects to an array of corresponding {@link LanguageModelChatMessage VS Code chat messages}. 56 | * @param messages - The array of {@link ChatMessage} objects to convert. 57 | * @returns An array of {@link LanguageModelChatMessage VS Code chat messages}. 58 | */ 59 | export function toVsCodeChatMessages( 60 | messages: readonly Raw.ChatMessage[] 61 | ): vscodeType.LanguageModelChatMessage[] { 62 | return messages.map(toVsCodeChatMessage).filter(r => !!r); 63 | } 64 | -------------------------------------------------------------------------------- /src/base/promptElement.ts: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | import type { CancellationToken, Progress } from 'vscode'; 6 | import './tsx'; 7 | import { BasePromptElementProps, PromptElementProps, PromptPiece, PromptSizing } from './types'; 8 | import { ChatResponsePart } from './vscodeTypes'; 9 | 10 | /** 11 | * `PromptElement` represents a single element of a prompt. 12 | * A prompt element can be rendered by the {@link PromptRenderer} to produce {@link ChatMessage} chat messages. 13 | * 14 | * @remarks Newlines are not preserved in string literals when rendered, and must be explicitly declared with the builtin `
` attribute. 15 | * 16 | * @template P - The type of the properties for the prompt element. It extends `BasePromptElementProps`. 17 | * @template S - The type of the state for the prompt element. It defaults to `void`. 18 | * 19 | * @property props - The properties of the prompt element. 20 | * @property priority - The priority of the prompt element. If not provided, defaults to 0. 21 | * 22 | * @method prepare - Optionally prepares asynchronous state before the prompt element is rendered. 23 | * @method render - Renders the prompt element. This method is abstract and must be implemented by subclasses. 24 | */ 25 | export abstract class PromptElement< 26 | P extends BasePromptElementProps = BasePromptElementProps, 27 | S = void 28 | > { 29 | public readonly props: PromptElementProps

; 30 | 31 | get priority(): number { 32 | return this.props.priority ?? Number.MAX_SAFE_INTEGER; 33 | } 34 | 35 | get insertLineBreakBefore(): boolean { 36 | return true; 37 | } 38 | 39 | constructor(props: PromptElementProps

) { 40 | this.props = props; 41 | } 42 | 43 | /** 44 | * Optionally prepare asynchronous state before the prompt element is rendered. 45 | * @param progress - Optionally report progress to the user for long-running state preparation. 46 | * @param token - A cancellation token that can be used to signal cancellation to the prompt element. 47 | * 48 | * @returns A promise that resolves to the prompt element's state. 49 | */ 50 | prepare?( 51 | sizing: PromptSizing, 52 | progress?: Progress, 53 | token?: CancellationToken 54 | ): Promise; 55 | 56 | /** 57 | * Renders the prompt element. 58 | * 59 | * @param state - The state of the prompt element. 60 | * @param sizing - The sizing information for the prompt. 61 | * @param progress - Optionally report progress to the user for long-running state preparation. 62 | * @param token - A cancellation token that can be used to signal cancellation to the prompt element. 63 | * @returns The rendered prompt piece or undefined if the element does not want to render anything. 64 | */ 65 | abstract render( 66 | state: S, 67 | sizing: PromptSizing, 68 | progress?: Progress, 69 | token?: CancellationToken 70 | ): Promise | PromptPiece | undefined; 71 | } 72 | -------------------------------------------------------------------------------- /src/base/promptElements.tsx: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | import type { 6 | CancellationToken, 7 | LanguageModelPromptTsxPart, 8 | LanguageModelTextPart, 9 | LanguageModelToolResult, 10 | } from 'vscode'; 11 | import { contentType, Raw } from '.'; 12 | import { PromptElement } from './promptElement'; 13 | import { 14 | BasePromptElementProps, 15 | PromptElementCtor, 16 | PromptElementProps, 17 | PromptPiece, 18 | PromptPieceChild, 19 | PromptSizing, 20 | } from './types'; 21 | import { PromptElementJSON } from './jsonTypes'; 22 | 23 | export type ChatMessagePromptElement = SystemMessage | UserMessage | AssistantMessage; 24 | 25 | export function isChatMessagePromptElement(element: unknown): element is ChatMessagePromptElement { 26 | return ( 27 | element instanceof SystemMessage || 28 | element instanceof UserMessage || 29 | element instanceof AssistantMessage 30 | ); 31 | } 32 | 33 | export interface ChatMessageProps extends BasePromptElementProps { 34 | role?: Raw.ChatRole; 35 | name?: string; 36 | } 37 | 38 | export class BaseChatMessage< 39 | T extends ChatMessageProps = ChatMessageProps 40 | > extends PromptElement { 41 | render() { 42 | return <>{this.props.children}; 43 | } 44 | } 45 | 46 | /** 47 | * A {@link PromptElement} which can be rendered to an OpenAI system chat message. 48 | * 49 | * See {@link https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages} 50 | */ 51 | export class SystemMessage extends BaseChatMessage { 52 | constructor(props: ChatMessageProps) { 53 | props.role = Raw.ChatRole.System; 54 | super(props); 55 | } 56 | } 57 | 58 | /** 59 | * A {@link PromptElement} which can be rendered to an OpenAI user chat message. 60 | * 61 | * See {@link https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages} 62 | */ 63 | export class UserMessage extends BaseChatMessage { 64 | constructor(props: ChatMessageProps) { 65 | props.role = Raw.ChatRole.User; 66 | super(props); 67 | } 68 | } 69 | 70 | export interface ToolCall { 71 | id: string; 72 | function: ToolFunction; 73 | type: 'function'; 74 | /** 75 | * A `` element, created from {@link useKeepWith}, that wraps 76 | * the tool result. This will ensure that if the tool result is pruned, 77 | * the tool call is also pruned to avoid errors. 78 | */ 79 | keepWith?: KeepWithCtor; 80 | } 81 | 82 | export interface ToolFunction { 83 | arguments: string; 84 | name: string; 85 | } 86 | 87 | export interface AssistantMessageProps extends ChatMessageProps { 88 | toolCalls?: ToolCall[]; 89 | } 90 | 91 | /** 92 | * A {@link PromptElement} which can be rendered to an OpenAI assistant chat message. 93 | * 94 | * See {@link https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages} 95 | */ 96 | export class AssistantMessage extends BaseChatMessage { 97 | constructor(props: AssistantMessageProps) { 98 | props.role = Raw.ChatRole.Assistant; 99 | super(props); 100 | } 101 | } 102 | 103 | const WHITESPACE_RE = /\s+/g; 104 | 105 | export interface ToolMessageProps extends ChatMessageProps { 106 | toolCallId: string; 107 | } 108 | 109 | /** 110 | * A {@link PromptElement} which can be rendered to an OpenAI tool chat message. 111 | * 112 | * See {@link https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages} 113 | */ 114 | export class ToolMessage extends BaseChatMessage { 115 | constructor(props: ToolMessageProps) { 116 | props.role = Raw.ChatRole.Tool; 117 | super(props); 118 | } 119 | } 120 | 121 | export interface TextChunkProps extends BasePromptElementProps { 122 | /** 123 | * If defined, the text chunk will potentially truncate its contents at the 124 | * last occurrence of the string or regular expression to ensure its content 125 | * fits within in token budget. 126 | * 127 | * {@see BasePromptElementProps} for options to control how the token budget 128 | * is allocated. 129 | */ 130 | breakOn?: RegExp | string; 131 | 132 | /** A shortcut for setting {@link breakOn} to `/\s+/g` */ 133 | breakOnWhitespace?: boolean; 134 | } 135 | 136 | /** 137 | * @property {string} src - The source of the image. This should be a raw base64 string. 138 | * @property {'low' | 'high'} [detail] - Optional. The detail level of the image. Can be either 'low' or 'high'. If not specified, `high` is used. 139 | */ 140 | export interface ImageProps extends BasePromptElementProps { 141 | src: string; 142 | detail?: 'low' | 'high'; 143 | } 144 | 145 | /** 146 | * A chunk of single-line or multi-line text that is a direct child of a {@link ChatMessagePromptElement}. 147 | * 148 | * TextChunks can only have text literals or intrinsic attributes as children. 149 | * It supports truncating text to fix the token budget if passed a {@link TextChunkProps.tokenizer} and {@link TextChunkProps.breakOn} behavior. 150 | * Like other {@link PromptElement}s, it can specify `priority` to determine how it should be prioritized. 151 | */ 152 | export class TextChunk extends PromptElement { 153 | async prepare( 154 | sizing: PromptSizing, 155 | _progress?: unknown, 156 | token?: CancellationToken 157 | ): Promise { 158 | const breakOn = this.props.breakOnWhitespace ? WHITESPACE_RE : this.props.breakOn; 159 | if (!breakOn) { 160 | return <>{this.props.children}; 161 | } 162 | 163 | let fullText = ''; 164 | const intrinsics: PromptPiece[] = []; 165 | for (const child of this.props.children || []) { 166 | if (child && typeof child === 'object') { 167 | if (typeof child.ctor !== 'string') { 168 | throw new Error('TextChunk children must be text literals or intrinsic attributes.'); 169 | } else if (child.ctor === 'br') { 170 | fullText += '\n'; 171 | } else { 172 | intrinsics.push(child); 173 | } 174 | } else if (child != null) { 175 | fullText += child; 176 | } 177 | } 178 | 179 | const text = await getTextContentBelowBudget(sizing, breakOn, fullText, token); 180 | return ( 181 | <> 182 | {intrinsics} 183 | {text} 184 | 185 | ); 186 | } 187 | 188 | render(piece: PromptPiece) { 189 | return piece; 190 | } 191 | } 192 | 193 | async function getTextContentBelowBudget( 194 | sizing: PromptSizing, 195 | breakOn: string | RegExp, 196 | fullText: string, 197 | cancellation: CancellationToken | undefined 198 | ) { 199 | if (breakOn instanceof RegExp) { 200 | if (!breakOn.global) { 201 | throw new Error(`\`breakOn\` expression must have the global flag set (got ${breakOn})`); 202 | } 203 | 204 | breakOn.lastIndex = 0; 205 | } 206 | 207 | let outputText = ''; 208 | let lastIndex = -1; 209 | while (lastIndex < fullText.length) { 210 | let index: number; 211 | if (typeof breakOn === 'string') { 212 | index = fullText.indexOf(breakOn, lastIndex === -1 ? 0 : lastIndex + breakOn.length); 213 | } else { 214 | index = breakOn.exec(fullText)?.index ?? -1; 215 | } 216 | 217 | if (index === -1) { 218 | index = fullText.length; 219 | } 220 | 221 | const next = outputText + fullText.slice(Math.max(0, lastIndex), index); 222 | if ( 223 | (await sizing.countTokens( 224 | { type: Raw.ChatCompletionContentPartKind.Text, text: next }, 225 | cancellation 226 | )) > sizing.tokenBudget 227 | ) { 228 | return outputText; 229 | } 230 | 231 | outputText = next; 232 | lastIndex = index; 233 | } 234 | 235 | return outputText; 236 | } 237 | 238 | export class Image extends PromptElement { 239 | constructor(props: ImageProps) { 240 | super(props); 241 | } 242 | 243 | render() { 244 | return <>{this.props.children}; 245 | } 246 | } 247 | 248 | export interface PrioritizedListProps extends BasePromptElementProps { 249 | /** 250 | * Priority of the list element. 251 | * All rendered elements in this list receive a priority that is offset from this value. 252 | */ 253 | priority?: number; 254 | /** 255 | * If `true`, assign higher priority to elements declared earlier in this list. 256 | */ 257 | descending: boolean; 258 | } 259 | 260 | /** 261 | * A utility for assigning priorities to a list of prompt elements. 262 | */ 263 | export class PrioritizedList extends PromptElement { 264 | override render() { 265 | const { children, priority = 0, descending } = this.props; 266 | if (!children) { 267 | return; 268 | } 269 | 270 | return ( 271 | <> 272 | {children.map((child, i) => { 273 | if (!child) { 274 | return; 275 | } 276 | 277 | const thisPriority = descending 278 | ? // First element in array of children has highest priority 279 | priority - i 280 | : // Last element in array of children has highest priority 281 | priority - children.length + i; 282 | 283 | if (typeof child !== 'object') { 284 | return {child}; 285 | } 286 | 287 | child.props ??= {}; 288 | child.props.priority = thisPriority; 289 | return child; 290 | })} 291 | 292 | ); 293 | } 294 | } 295 | 296 | export interface IToolResultProps extends BasePromptElementProps { 297 | /** 298 | * Base priority of the tool data. All tool data will be scoped to this priority. 299 | */ 300 | priority?: number; 301 | 302 | /** 303 | * Tool result from VS Code. 304 | */ 305 | data: LanguageModelToolResult; 306 | } 307 | 308 | /** 309 | * A utility to include the result of a tool called using the `vscode.lm.invokeTool` API. 310 | */ 311 | export class ToolResult extends PromptElement { 312 | render(): Promise | PromptPiece | undefined { 313 | // note: future updates to content types should be handled here for backwards compatibility 314 | return ( 315 | <> 316 | {this.props.data.content.map(part => { 317 | if (part && typeof (part as LanguageModelTextPart).value === 'string') { 318 | return (part as LanguageModelTextPart).value; 319 | } else if ( 320 | part && 321 | (part as LanguageModelPromptTsxPart).value && 322 | typeof (part as { value: PromptElementJSON }).value.node === 'object' 323 | ) { 324 | return ( 325 | 326 | ); 327 | } 328 | })} 329 | 330 | ); 331 | } 332 | } 333 | 334 | /** 335 | * Marker element that uses the legacy global prioritization algorithm (0.2.x 336 | * if this library) for pruning child elements. This will be removed in 337 | * the future. 338 | * 339 | * @deprecated 340 | */ 341 | export class LegacyPrioritization extends PromptElement { 342 | render() { 343 | return <>{this.props.children}; 344 | } 345 | } 346 | 347 | /** 348 | * Marker element that ensures all of its children are either included, or 349 | * not included. This is similar to the `` element, but it is more 350 | * basic and can contain extrinsic children. 351 | */ 352 | export class Chunk extends PromptElement { 353 | render() { 354 | return <>{this.props.children}; 355 | } 356 | } 357 | 358 | export interface ExpandableProps extends BasePromptElementProps { 359 | value: (sizing: PromptSizing) => string | Promise; 360 | } 361 | 362 | /** 363 | * An element that can expand to fill the remaining token budget. Takes 364 | * a `value` function that is initially called with the element's token budget, 365 | * and may be called multiple times with the new token budget as the prompt 366 | * is resized. 367 | */ 368 | export class Expandable extends PromptElement { 369 | async render(_state: void, sizing: PromptSizing): Promise { 370 | return <>{await this.props.value(sizing)}; 371 | } 372 | } 373 | 374 | export interface TokenLimitProps extends BasePromptElementProps { 375 | max: number; 376 | } 377 | 378 | /** 379 | * An element that ensures its children don't exceed a certain number of 380 | * `maxTokens`. Its contents are pruned to fit within the budget before 381 | * the overall prompt pruning is run. 382 | */ 383 | export class TokenLimit extends PromptElement { 384 | render(): PromptPiece { 385 | return <>{this.props.children}; 386 | } 387 | } 388 | 389 | export abstract class AbstractKeepWith extends PromptElement { 390 | public abstract readonly id: number; 391 | } 392 | 393 | let keepWidthId = 0; 394 | 395 | export type KeepWithCtor = { 396 | new (props: PromptElementProps): AbstractKeepWith; 397 | id: number; 398 | }; 399 | 400 | /** 401 | * Returns a PromptElement that ensures each wrapped element is retained only 402 | * so long as each other wrapped is not empty. 403 | * 404 | * This is useful when dealing with tool calls, for example. In that case, 405 | * your tool call request should only be rendered if the tool call response 406 | * survived prioritization. In that case, you implement a `render` function 407 | * like so: 408 | * 409 | * ``` 410 | * render() { 411 | * const KeepWith = useKeepWith(); 412 | * return <> 413 | * ... 414 | * ... 415 | * ; 416 | * } 417 | * ``` 418 | * 419 | * Unlike ``, which blocks pruning of any child elements and simply 420 | * removes them as a block, `` in this case will allow the 421 | * `ToolCallResponse` to be pruned, and if it's fully pruned it will also 422 | * remove the `ToolCallRequest`. 423 | */ 424 | export function useKeepWith(): KeepWithCtor { 425 | const id = keepWidthId++; 426 | return class KeepWith extends AbstractKeepWith { 427 | public static readonly id = id; 428 | 429 | public readonly id = id; 430 | 431 | render(): PromptPiece { 432 | return <>{this.props.children}; 433 | } 434 | }; 435 | } 436 | 437 | export interface IfEmptyProps extends BasePromptElementProps { 438 | alt: PromptPieceChild; 439 | } 440 | 441 | /** 442 | * An element that returns its `alt` prop if its children are empty at the 443 | * time when it's rendered. This is especially useful when you require 444 | * fallback logic for opaque child data, such as tool calls. 445 | */ 446 | export class IfEmpty extends PromptElement { 447 | render(): PromptPiece { 448 | return ( 449 | <> 450 | {this.props.alt} 451 | {this.props.children} 452 | 453 | ); 454 | } 455 | } 456 | 457 | export class LogicalWrapper extends PromptElement { 458 | render(): PromptPiece { 459 | return <>{this.props.children}; 460 | } 461 | } 462 | -------------------------------------------------------------------------------- /src/base/results.ts: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | import type { Location, ThemeIcon, Uri } from 'vscode'; 6 | import * as JSON from './jsonTypes'; 7 | import { URI } from './util/vs/common/uri'; 8 | 9 | /** 10 | * Arbitrary metadata which can be retrieved after the prompt is rendered. 11 | */ 12 | export abstract class PromptMetadata { 13 | readonly _marker: undefined; 14 | toString(): string { 15 | return Object.getPrototypeOf(this).constructor.name; 16 | } 17 | } 18 | 19 | export enum ChatResponseReferencePartStatusKind { 20 | Complete = 1, 21 | Partial = 2, 22 | Omitted = 3, 23 | } 24 | 25 | /** 26 | * A reference used for creating the prompt. 27 | */ 28 | export class PromptReference { 29 | public static fromJSON(json: JSON.PromptReferenceJSON): PromptReference { 30 | // todo@connor4312: do we need to create concrete Location/Range types? 31 | const uriOrLocation = (v: JSON.UriOrLocationJSON): Uri | Location => 32 | 'scheme' in v ? URI.from(v) : { uri: URI.from(v.uri), range: v.range }; 33 | 34 | return new PromptReference( 35 | 'variableName' in json.anchor 36 | ? { 37 | variableName: json.anchor.variableName, 38 | value: json.anchor.value && uriOrLocation(json.anchor.value), 39 | } 40 | : uriOrLocation(json.anchor), 41 | json.iconPath && 42 | ('scheme' in json.iconPath 43 | ? URI.from(json.iconPath) 44 | : 'light' in json.iconPath 45 | ? { light: URI.from(json.iconPath.light), dark: URI.from(json.iconPath.dark) } 46 | : json.iconPath), 47 | json.options 48 | ); 49 | } 50 | 51 | constructor( 52 | readonly anchor: Uri | Location | { variableName: string; value?: Uri | Location }, 53 | readonly iconPath?: Uri | ThemeIcon | { light: Uri; dark: Uri }, 54 | readonly options?: { 55 | status?: { description: string; kind: ChatResponseReferencePartStatusKind }; 56 | isFromTool?: boolean; 57 | } 58 | ) {} 59 | 60 | public toJSON(): JSON.PromptReferenceJSON { 61 | return { 62 | anchor: this.anchor, 63 | iconPath: this.iconPath, 64 | options: this.options, 65 | }; 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /src/base/test/elements.test.tsx: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | import * as assert from 'assert'; 6 | import { PromptElement } from '../promptElement'; 7 | import { TextChunk, UserMessage } from '../promptElements'; 8 | import { PromptRenderer } from '../promptRenderer'; 9 | import { ITokenizer } from '../tokenizer/tokenizer'; 10 | import { IChatEndpointInfo } from '../types'; 11 | import { OutputMode, Raw } from '../output/mode'; 12 | 13 | suite('PromptElements', () => { 14 | suite('TextChunk', () => { 15 | const tokenizer = new (class TokenPerWordTokenizer implements ITokenizer { 16 | readonly mode = OutputMode.Raw; 17 | baseTokensPerMessage = 0; 18 | baseTokensPerName = 0; 19 | baseTokensPerCompletion = 0; 20 | 21 | tokenLength(part: Raw.ChatCompletionContentPart): number { 22 | if (part.type !== Raw.ChatCompletionContentPartKind.Text) { 23 | return 0; 24 | } 25 | return this.strToken(part.text); 26 | } 27 | 28 | countMessageTokens(message: Raw.ChatMessage): number { 29 | return this.strToken( 30 | message.content 31 | .filter(p => p.type === Raw.ChatCompletionContentPartKind.Text) 32 | .map(p => p.text) 33 | .join('') 34 | ); 35 | } 36 | 37 | private strToken(s: string) { 38 | return s.trim() === '' ? 1 : s.split(/\s+/g).length; 39 | } 40 | })(); 41 | 42 | const assertThrows = async (message: RegExp, fn: () => Promise) => { 43 | let thrown = false; 44 | try { 45 | await fn(); 46 | } catch (e) { 47 | thrown = true; 48 | assert.ok(message.test((e as Error).message)); 49 | } 50 | assert.ok(thrown, 'expected to throw'); 51 | }; 52 | 53 | test('split behavior', async () => { 54 | const inst = new PromptRenderer( 55 | { modelMaxPromptTokens: 11 } satisfies Partial as IChatEndpointInfo, 56 | class extends PromptElement { 57 | render() { 58 | return ( 59 | 60 | 61 | 1a 62 |
63 | 1b 1c 1d 1e 1f 1g 1h 1i 1j 1k 1l 1m 1n 1o 1p 1q 1r 1s 1t 1u 1v 1w 1x 1y 1z 64 |
65 | 66 | 2a 2b 2c 2d 2e 2f 2g 2h 2i 2j 2k 2l 2m 2n 2o 2p 2q 2r 2s 2t 2u 2v 2w 2x 2y 2z 67 | 68 |
69 | ); 70 | } 71 | }, 72 | {}, 73 | tokenizer 74 | ); 75 | const res = await inst.render(undefined, undefined); 76 | assert.deepStrictEqual(res.messages, [ 77 | { 78 | content: [{ 79 | type: Raw.ChatCompletionContentPartKind.Text, 80 | text: '1a\n1b 1c 1d 1e\n2a 2b 2c 2d 2e', 81 | }], 82 | role: Raw.ChatRole.User, 83 | }, 84 | ]); 85 | }); 86 | 87 | test('throws on extrinsic', async () => { 88 | await assertThrows(/must be text literals/, async () => { 89 | const inst = new PromptRenderer( 90 | { modelMaxPromptTokens: 11 } satisfies Partial as IChatEndpointInfo, 91 | class Foo extends PromptElement { 92 | render() { 93 | return ( 94 | 95 | 96 | 97 | 98 | 99 | ); 100 | } 101 | }, 102 | {}, 103 | tokenizer 104 | ); 105 | await inst.render(undefined, undefined); 106 | }); 107 | }); 108 | }); 109 | }); 110 | -------------------------------------------------------------------------------- /src/base/test/materialized.test.ts: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | import * as assert from 'assert'; 6 | import { 7 | LineBreakBefore, 8 | MaterializedChatMessage, 9 | MaterializedChatMessageTextChunk, 10 | GenericMaterializedContainer, 11 | } from '../materialized'; 12 | import { OutputMode, Raw } from '../output/mode'; 13 | import { ITokenizer } from '../tokenizer/tokenizer'; 14 | import { strFrom } from './testUtils'; 15 | 16 | class MockTokenizer implements ITokenizer { 17 | readonly mode = OutputMode.Raw; 18 | tokenLength(part: Raw.ChatCompletionContentPart): number { 19 | return strFrom(part).length; 20 | } 21 | countMessageTokens(message: Raw.ChatMessage): number { 22 | return strFrom(message).length + 3; 23 | } 24 | } 25 | suite('Materialized', () => { 26 | test('should calculate token count correctly', async () => { 27 | const tokenizer = new MockTokenizer(); 28 | const container = new GenericMaterializedContainer( 29 | undefined, 30 | 1, 31 | undefined, 32 | 1, 33 | parent => [ 34 | new MaterializedChatMessage( 35 | parent, 36 | 0, 37 | Raw.ChatRole.User, 38 | 'user', 39 | undefined, 40 | undefined, 41 | 1, 42 | [], 43 | parent => [ 44 | new MaterializedChatMessageTextChunk(parent, 'Hello', 1, [], LineBreakBefore.None), 45 | new MaterializedChatMessageTextChunk(parent, 'World', 1, [], LineBreakBefore.None), 46 | ] 47 | ), 48 | ], 49 | [], 50 | 0 51 | ); 52 | 53 | assert.deepStrictEqual(await container.tokenCount(tokenizer), 13); 54 | container.removeLowestPriorityChild(); 55 | assert.deepStrictEqual(await container.tokenCount(tokenizer), 8); 56 | }); 57 | 58 | test('should calculate lower bound token count correctly', async () => { 59 | const tokenizer = new MockTokenizer(); 60 | const container = new GenericMaterializedContainer( 61 | undefined, 62 | 1, 63 | undefined, 64 | 1, 65 | parent => [ 66 | new MaterializedChatMessage( 67 | parent, 68 | 0, 69 | Raw.ChatRole.User, 70 | 'user', 71 | undefined, 72 | undefined, 73 | 1, 74 | [], 75 | parent => [ 76 | new MaterializedChatMessageTextChunk(parent, 'Hello', 1, [], LineBreakBefore.None), 77 | new MaterializedChatMessageTextChunk(parent, 'World', 1, [], LineBreakBefore.None), 78 | ] 79 | ), 80 | ], 81 | [], 82 | 0 83 | ); 84 | 85 | assert.deepStrictEqual(await container.upperBoundTokenCount(tokenizer), 13); 86 | container.removeLowestPriorityChild(); 87 | assert.deepStrictEqual(await container.upperBoundTokenCount(tokenizer), 8); 88 | }); 89 | }); 90 | -------------------------------------------------------------------------------- /src/base/test/renderer.bench.tsx: -------------------------------------------------------------------------------- 1 | import { existsSync, readFileSync } from 'fs'; 2 | import { Bench } from 'tinybench'; 3 | import { Cl100KBaseTokenizer } from '../tokenizer/cl100kBaseTokenizer'; 4 | import type * as promptTsx from '..'; 5 | import assert = require('assert'); 6 | 7 | const comparePathVar = 'PROMPT_TSX_COMPARE_PATH'; 8 | const tsxComparePath = 9 | process.env[comparePathVar] || 10 | `${__dirname}/../../../../vscode-copilot/node_modules/@vscode/prompt-tsx`; 11 | const canCompare = existsSync(tsxComparePath); 12 | if (!canCompare) { 13 | console.error( 14 | `$${comparePathVar} was not set / ${tsxComparePath} doesn't exist, so the benchmark will not compare to past behavior` 15 | ); 16 | process.exit(1); 17 | } 18 | 19 | const numberOfRepeats = 1; 20 | const sampleText = readFileSync(`${__dirname}/renderer.test.tsx`, 'utf-8'); 21 | const sampleTextLines = readFileSync(`${__dirname}/renderer.test.tsx`, 'utf-8').split('\n'); 22 | const tokenizer = new Cl100KBaseTokenizer(); 23 | const bench = new Bench({ 24 | name: `trim ${tokenizer.tokenLength({ type: 1, text: sampleText }) * numberOfRepeats}->1k tokens`, 25 | time: 100, 26 | }); 27 | 28 | async function benchTokenizationTrim({ 29 | PromptRenderer, 30 | PromptElement, 31 | UserMessage, 32 | TextChunk, 33 | }: typeof promptTsx) { 34 | const r = await new PromptRenderer( 35 | { modelMaxPromptTokens: 1000 }, 36 | class extends PromptElement { 37 | render() { 38 | return ( 39 | <> 40 | {Array.from({ length: numberOfRepeats }, () => ( 41 | 42 | {sampleTextLines.map(l => ( 43 | {l} 44 | ))} 45 | 46 | ))} 47 | 48 | ); 49 | } 50 | }, 51 | {}, 52 | tokenizer 53 | ).render(); 54 | assert(r.tokenCount <= 1000); 55 | assert(r.tokenCount > 100); 56 | } 57 | 58 | bench.add('current', () => benchTokenizationTrim(require('..'))); 59 | if (canCompare) { 60 | const fn = require(tsxComparePath); 61 | bench.add('previous', () => benchTokenizationTrim(fn)); 62 | } 63 | 64 | bench.run().then(() => { 65 | console.log(bench.name); 66 | console.table(bench.table()); 67 | }); 68 | -------------------------------------------------------------------------------- /src/base/test/testUtils.ts: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | import { Raw } from '../output/mode'; 6 | 7 | export const strFrom = (message: Raw.ChatMessage | Raw.ChatCompletionContentPart): string => { 8 | if ('role' in message) { 9 | return message.content.map(strFrom).join(''); 10 | } else if (message.type === Raw.ChatCompletionContentPartKind.Text) { 11 | return message.text; 12 | } else { 13 | return ''; 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /src/base/tokenizer/cl100kBaseTokenizer.ts: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | import { 6 | createTokenizer, 7 | getRegexByEncoder, 8 | getSpecialTokensByEncoder, 9 | TikTokenizer, 10 | } from '@microsoft/tiktokenizer'; 11 | import { join } from 'path'; 12 | import { ITokenizer } from './tokenizer'; 13 | import { OutputMode, Raw, OpenAI } from '../output/mode'; 14 | 15 | /** 16 | * The Cl100K BPE tokenizer for the `gpt-4`, `gpt-3.5-turbo`, and `text-embedding-ada-002` models. 17 | * 18 | * See https://github.com/microsoft/Tokenizer 19 | */ 20 | export class Cl100KBaseTokenizer implements ITokenizer { 21 | private _cl100kTokenizer: TikTokenizer | undefined; 22 | 23 | public readonly mode = OutputMode.OpenAI; 24 | public readonly models = ['gpt-4', 'gpt-3.5-turbo', 'text-embedding-ada-002']; 25 | 26 | private readonly baseTokensPerMessage = OpenAI.BaseTokensPerMessage; 27 | private readonly baseTokensPerName = OpenAI.BaseTokensPerName; 28 | 29 | constructor() {} 30 | 31 | /** 32 | * Tokenizes the given text using the Cl100K tokenizer. 33 | * @param text The text to tokenize. 34 | * @returns The tokenized text. 35 | */ 36 | private tokenize(text: string): number[] { 37 | if (!this._cl100kTokenizer) { 38 | this._cl100kTokenizer = this.initTokenizer(); 39 | } 40 | return this._cl100kTokenizer.encode(text); 41 | } 42 | 43 | /** 44 | * Calculates the token length of the given text. 45 | * @param text The text to calculate the token length for. 46 | * @returns The number of tokens in the text. 47 | */ 48 | tokenLength(part: Raw.ChatCompletionContentPart): number { 49 | if (part.type === Raw.ChatCompletionContentPartKind.Text) { 50 | return part.text ? this.tokenize(part.text).length : 0; 51 | } 52 | 53 | return 0; 54 | } 55 | 56 | /** 57 | * Counts tokens for a single chat message within a completion request. 58 | * 59 | * Follows https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb for GPT 3.5/4 models. 60 | * 61 | * **Note**: The result does not include base tokens for the completion itself. 62 | */ 63 | countMessageTokens(message: OpenAI.ChatMessage): number { 64 | return this.baseTokensPerMessage + this.countObjectTokens(message); 65 | } 66 | 67 | protected countObjectTokens(obj: any): number { 68 | let numTokens = 0; 69 | for (const [key, value] of Object.entries(obj)) { 70 | if (!value) { 71 | continue; 72 | } 73 | 74 | if (typeof value === 'string') { 75 | numTokens += this.tokenize(value).length; 76 | } else if (value) { 77 | // TODO@roblourens - count tokens for tool_calls correctly 78 | // TODO@roblourens - tool_call_id is always 1 token 79 | numTokens += this.countObjectTokens(value); 80 | } 81 | 82 | if (key === 'name') { 83 | numTokens += this.baseTokensPerName; 84 | } 85 | } 86 | 87 | return numTokens; 88 | } 89 | 90 | private initTokenizer(): TikTokenizer { 91 | return createTokenizer( 92 | // This file is copied to `dist` via the `build/postinstall.ts` script 93 | join(__dirname, './cl100k_base.tiktoken'), 94 | getSpecialTokensByEncoder('cl100k_base'), 95 | getRegexByEncoder('cl100k_base'), 96 | 64000 97 | ); 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /src/base/tokenizer/tokenizer.ts: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | import type { CancellationToken, LanguageModelChatMessage } from 'vscode'; 6 | import { ModeToChatMessageType, OutputMode, Raw } from '../output/mode'; 7 | 8 | /** 9 | * Represents a tokenizer that can be used to tokenize text in chat messages. 10 | */ 11 | export interface ITokenizer { 12 | /** 13 | * This mode this tokenizer operates on. 14 | */ 15 | readonly mode: M; 16 | 17 | /** 18 | * Return the length of `part` in number of tokens. If the model does not 19 | * support the given kind of part, it may return 0. 20 | * 21 | * @param {str} text - The input text 22 | * @returns {number} 23 | */ 24 | tokenLength( 25 | part: Raw.ChatCompletionContentPart, 26 | token?: CancellationToken 27 | ): Promise | number; 28 | 29 | /** 30 | * Returns the token length of the given message. 31 | */ 32 | countMessageTokens(message: ModeToChatMessageType[M]): Promise | number; 33 | } 34 | 35 | export class VSCodeTokenizer implements ITokenizer { 36 | public readonly mode = OutputMode.VSCode; 37 | 38 | constructor( 39 | private countTokens: ( 40 | text: string | LanguageModelChatMessage, 41 | token?: CancellationToken 42 | ) => Thenable, 43 | mode: OutputMode 44 | ) { 45 | if (mode !== OutputMode.VSCode) { 46 | throw new Error( 47 | '`mode` must be set to vscode when using vscode.LanguageModelChat as the tokenizer' 48 | ); 49 | } 50 | } 51 | 52 | async tokenLength( 53 | part: Raw.ChatCompletionContentPart, 54 | token?: CancellationToken 55 | ): Promise { 56 | if (part.type === Raw.ChatCompletionContentPartKind.Text) { 57 | return this.countTokens(part.text, token); 58 | } 59 | 60 | return Promise.resolve(0); 61 | } 62 | 63 | async countMessageTokens(message: LanguageModelChatMessage): Promise { 64 | return this.countTokens(message); 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /src/base/tracer.ts: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | import { GenericMaterializedContainer } from './materialized'; 6 | import { ITokenizer } from './tokenizer/tokenizer'; 7 | 8 | export interface ITraceRenderData { 9 | budget: number; 10 | container: GenericMaterializedContainer; 11 | removed: number; 12 | } 13 | 14 | export interface ITraceData { 15 | /** Budget the tree was rendered with initially. */ 16 | budget: number; 17 | 18 | /** Tree returned from the prompt. */ 19 | renderedTree: ITraceRenderData; 20 | 21 | /** Tokenizer that was used. */ 22 | tokenizer: ITokenizer; 23 | 24 | /** Callback the tracer and use to re-render the tree at the given budget. */ 25 | renderTree(tokenBudget: number): Promise; 26 | } 27 | 28 | export interface IElementEpochData { 29 | id: number; 30 | tokenBudget: number; 31 | } 32 | 33 | export interface ITraceEpoch { 34 | inNode: number | undefined; 35 | flexValue: number; 36 | tokenBudget: number; 37 | reservedTokens: number; 38 | elements: IElementEpochData[]; 39 | } 40 | 41 | /** 42 | * Handler that can trace rendering internals. 43 | */ 44 | export interface ITracer { 45 | /** 46 | * Called when a group of elements is rendered. 47 | */ 48 | addRenderEpoch?(epoch: ITraceEpoch): void; 49 | 50 | /** 51 | * Adds an element into the current epoch. 52 | */ 53 | includeInEpoch?(data: IElementEpochData): void; 54 | 55 | /** 56 | * Called when the elements have been processed into their final tree form. 57 | */ 58 | didMaterializeTree?(traceData: ITraceData): void; 59 | } 60 | -------------------------------------------------------------------------------- /src/base/tsx-globals.ts: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | import { PromptElementJSON } from './jsonTypes'; 6 | import { PromptMetadata, PromptReference } from './results'; 7 | import { URI } from './util/vs/common/uri'; 8 | import { ChatDocumentContext } from './vscodeTypes'; 9 | 10 | declare global { 11 | namespace JSX { 12 | interface IntrinsicElements { 13 | /** 14 | * Add meta data which can be retrieved after the prompt is rendered. 15 | */ 16 | meta: { 17 | value: PromptMetadata; 18 | /** 19 | * If set, the metadata will only be included in the rendered result 20 | * if the chunk it's in survives prioritization. 21 | */ 22 | local?: boolean; 23 | }; 24 | /** 25 | * `\n` character. 26 | */ 27 | br: {}; 28 | /** 29 | * Expose context used for creating the prompt. 30 | */ 31 | usedContext: { 32 | value: ChatDocumentContext[]; 33 | }; 34 | /** 35 | * Expose the references used for creating the prompt. 36 | * Will be displayed to the user. 37 | */ 38 | references: { 39 | value: PromptReference[]; 40 | }; 41 | /** 42 | * Files that were excluded from the prompt. 43 | */ 44 | ignoredFiles: { 45 | value: URI[]; 46 | }; 47 | /** 48 | * A JSON element previously rendered in {@link renderElementJSON}. 49 | */ 50 | elementJSON: { 51 | data: PromptElementJSON; 52 | }; 53 | 54 | /** 55 | * A data object that is emitted directly in the output. You as the 56 | * consumer are responsible for ensuring this data works. This element 57 | * has SHARP EDGES and should be used with great care. 58 | */ 59 | opaque: { 60 | /** Value to be inserted in the output */ 61 | value: unknown; 62 | /** 63 | * The number of tokens consumed by this fragment. This must be AT 64 | * LEAST the number of tokens this part represents in your tokenizer's 65 | * `countMessageTokens` method, or you will get obscure errors. 66 | */ 67 | tokenUsage?: number; 68 | /** Usual priority value. */ 69 | priority?: number; 70 | }; 71 | 72 | /** 73 | * Adds a 'cache breakpoint' to the output. This is exclusively valid 74 | * as a direct child of message types (UserMessage, SystemMessage, etc.) 75 | */ 76 | cacheBreakpoint: { 77 | /** Optional implementation-specific cache type */ 78 | type?: string; 79 | }; 80 | } 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /src/base/tsx.ts: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | interface _InternalPromptPiece

{ 6 | ctor: string | any; 7 | props: P; 8 | children: string | (_InternalPromptPiece | undefined)[]; 9 | } 10 | 11 | /** 12 | * Visual Studio Code Prompt Piece 13 | */ 14 | function _vscpp(ctor: any, props: any, ...children: any[]): _InternalPromptPiece { 15 | return { ctor, props, children: children.flat() }; 16 | } 17 | 18 | /** 19 | * Visual Studio Code Prompt Piece Fragment 20 | */ 21 | function _vscppf() { 22 | throw new Error(`This should not be invoked!`); 23 | } 24 | _vscppf.isFragment = true; 25 | 26 | declare const vscpp: typeof _vscpp; 27 | declare const vscppf: typeof _vscppf; 28 | 29 | (globalThis).vscpp = _vscpp; 30 | (globalThis).vscppf = _vscppf; 31 | -------------------------------------------------------------------------------- /src/base/types.ts: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | import { CancellationToken } from 'vscode'; 6 | import { PromptElement } from './promptElement'; 7 | import { Raw } from './output/mode'; 8 | 9 | /** 10 | * Represents information about a chat endpoint. 11 | */ 12 | export interface IChatEndpointInfo { 13 | /** 14 | * The maximum number of tokens allowed in the model prompt. 15 | */ 16 | readonly modelMaxPromptTokens: number; 17 | } 18 | 19 | /** 20 | * The sizing hint for the prompt element. Prompt elements should take this into account when rendering. 21 | */ 22 | export interface PromptSizing { 23 | /** 24 | * The computed token allocation for this prompt element to adhere to when rendering, 25 | * if it specified {@link BasePromptElementProps.flexBasis}. 26 | */ 27 | readonly tokenBudget: number; 28 | /** 29 | * Metadata about the endpoint being used. 30 | */ 31 | readonly endpoint: IChatEndpointInfo; 32 | 33 | /** 34 | * Counts the number of tokens the text consumes. 35 | */ 36 | countTokens(text: Raw.ChatCompletionContentPart | string, token?: CancellationToken): Promise | number; 37 | } 38 | 39 | export interface BasePromptElementProps { 40 | /** 41 | * The absolute priority of the prompt element. 42 | * 43 | * If the messages to be sent exceed the available token budget, prompt elements will be removed from the rendered result, starting with the element with the lowest priority. 44 | * 45 | * If unset, defaults to `Number.MAX_SAFE_INTEGER`, such that elements with no explicit priority take the highest-priority position. 46 | */ 47 | priority?: number; 48 | /** 49 | * If set, the children of the prompt element will be considered children of the parent during pruning. This allows you to create logical wrapper elements, for example: 50 | * 51 | * ``` 52 | * 53 | * 54 | * 55 | * 56 | * 57 | * 58 | * 59 | * ``` 60 | * 61 | * In this case where we have a wrapper element, the prune order would be `ChildA`, `ChildC`, then `ChildB`. 62 | */ 63 | passPriority?: boolean; 64 | /** 65 | * The proportion of the container's {@link PromptSizing.tokenBudget token budget} that is assigned to this prompt element, based on the total weight requested by the prompt element and all its siblings. 66 | * 67 | * This is used to compute the {@link PromptSizing.tokenBudget token budget} hint that the prompt element receives. 68 | * 69 | * If set on a child element, the token budget is calculated with respect to all children under the element's parent, such that a child can never consume more tokens than its parent was allocated. 70 | * 71 | * Defaults to 1. 72 | */ 73 | flexBasis?: number; 74 | 75 | /** 76 | * If set, sibling elements will be rendered first, followed by this element. The remaining {@link PromptSizing.tokenBudget token budget} from the container will be distributed among the elements with `flexGrow` set. 77 | * 78 | * If multiple elements are present with different values of `flexGrow` set, this process is repeated for each value of `flexGrow` in descending order. 79 | */ 80 | flexGrow?: number; 81 | 82 | /** 83 | * If set with {@link flexGrow}, this defines the number of tokens this element 84 | * will reserve of the container {@link PromptSizing.tokenBudget token budget} 85 | * for sizing purposes in elements rendered before it. 86 | * 87 | * This can be set to a constant number of tokens, or a proportion of the 88 | * container's budget. For example, `/3` would reserve a third of the 89 | * container's budget. 90 | */ 91 | flexReserve?: number | `/${number}`; 92 | } 93 | 94 | export interface PromptElementCtor

{ 95 | isFragment?: boolean; 96 | new (props: P, ...args: any[]): PromptElement; 97 | } 98 | 99 | export interface RuntimePromptElementProps { 100 | children?: PromptPieceChild[]; 101 | } 102 | 103 | export type PromptElementProps = T & BasePromptElementProps & RuntimePromptElementProps; 104 | 105 | export interface PromptPiece

{ 106 | ctor: string | PromptElementCtor; 107 | props: P; 108 | children: PromptPieceChild[]; 109 | } 110 | 111 | export type PromptPieceChild = number | string | PromptPiece | undefined; 112 | -------------------------------------------------------------------------------- /src/base/util/arrays.ts: -------------------------------------------------------------------------------- 1 | // !!! DO NOT modify, this file was COPIED from 'microsoft/vscode' 2 | 3 | /*--------------------------------------------------------------------------------------------- 4 | * Copyright (c) Microsoft Corporation. All rights reserved. 5 | * Licensed under the MIT License. See LICENSE in the project root for license information. 6 | *--------------------------------------------------------------------------------------------*/ 7 | 8 | /** 9 | * @returns New array with all falsy values removed. The original array IS NOT modified. 10 | */ 11 | export function coalesce(array: ReadonlyArray): T[] { 12 | return array.filter(e => !!e); 13 | } 14 | -------------------------------------------------------------------------------- /src/base/util/assert.ts: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | export function assertNever(value: never, msg = `unexpected value ${value}`): never { 6 | throw new Error(`Unreachable: ${msg}`); 7 | } 8 | -------------------------------------------------------------------------------- /src/base/util/vs/common/charCode.ts: -------------------------------------------------------------------------------- 1 | //!!! DO NOT modify, this file was COPIED from 'microsoft/vscode' 2 | 3 | /*--------------------------------------------------------------------------------------------- 4 | * Copyright (c) Microsoft Corporation. All rights reserved. 5 | * Licensed under the MIT License. See LICENSE in the project root for license information. 6 | *--------------------------------------------------------------------------------------------*/ 7 | 8 | // Names from https://blog.codinghorror.com/ascii-pronunciation-rules-for-programmers/ 9 | 10 | /** 11 | * An inlined enum containing useful character codes (to be used with String.charCodeAt). 12 | * Please leave the const keyword such that it gets inlined when compiled to JavaScript! 13 | */ 14 | export const enum CharCode { 15 | Null = 0, 16 | /** 17 | * The `\b` character. 18 | */ 19 | Backspace = 8, 20 | /** 21 | * The `\t` character. 22 | */ 23 | Tab = 9, 24 | /** 25 | * The `\n` character. 26 | */ 27 | LineFeed = 10, 28 | /** 29 | * The `\r` character. 30 | */ 31 | CarriageReturn = 13, 32 | Space = 32, 33 | /** 34 | * The `!` character. 35 | */ 36 | ExclamationMark = 33, 37 | /** 38 | * The `"` character. 39 | */ 40 | DoubleQuote = 34, 41 | /** 42 | * The `#` character. 43 | */ 44 | Hash = 35, 45 | /** 46 | * The `$` character. 47 | */ 48 | DollarSign = 36, 49 | /** 50 | * The `%` character. 51 | */ 52 | PercentSign = 37, 53 | /** 54 | * The `&` character. 55 | */ 56 | Ampersand = 38, 57 | /** 58 | * The `'` character. 59 | */ 60 | SingleQuote = 39, 61 | /** 62 | * The `(` character. 63 | */ 64 | OpenParen = 40, 65 | /** 66 | * The `)` character. 67 | */ 68 | CloseParen = 41, 69 | /** 70 | * The `*` character. 71 | */ 72 | Asterisk = 42, 73 | /** 74 | * The `+` character. 75 | */ 76 | Plus = 43, 77 | /** 78 | * The `,` character. 79 | */ 80 | Comma = 44, 81 | /** 82 | * The `-` character. 83 | */ 84 | Dash = 45, 85 | /** 86 | * The `.` character. 87 | */ 88 | Period = 46, 89 | /** 90 | * The `/` character. 91 | */ 92 | Slash = 47, 93 | 94 | Digit0 = 48, 95 | Digit1 = 49, 96 | Digit2 = 50, 97 | Digit3 = 51, 98 | Digit4 = 52, 99 | Digit5 = 53, 100 | Digit6 = 54, 101 | Digit7 = 55, 102 | Digit8 = 56, 103 | Digit9 = 57, 104 | 105 | /** 106 | * The `:` character. 107 | */ 108 | Colon = 58, 109 | /** 110 | * The `;` character. 111 | */ 112 | Semicolon = 59, 113 | /** 114 | * The `<` character. 115 | */ 116 | LessThan = 60, 117 | /** 118 | * The `=` character. 119 | */ 120 | Equals = 61, 121 | /** 122 | * The `>` character. 123 | */ 124 | GreaterThan = 62, 125 | /** 126 | * The `?` character. 127 | */ 128 | QuestionMark = 63, 129 | /** 130 | * The `@` character. 131 | */ 132 | AtSign = 64, 133 | 134 | A = 65, 135 | B = 66, 136 | C = 67, 137 | D = 68, 138 | E = 69, 139 | F = 70, 140 | G = 71, 141 | H = 72, 142 | I = 73, 143 | J = 74, 144 | K = 75, 145 | L = 76, 146 | M = 77, 147 | N = 78, 148 | O = 79, 149 | P = 80, 150 | Q = 81, 151 | R = 82, 152 | S = 83, 153 | T = 84, 154 | U = 85, 155 | V = 86, 156 | W = 87, 157 | X = 88, 158 | Y = 89, 159 | Z = 90, 160 | 161 | /** 162 | * The `[` character. 163 | */ 164 | OpenSquareBracket = 91, 165 | /** 166 | * The `\` character. 167 | */ 168 | Backslash = 92, 169 | /** 170 | * The `]` character. 171 | */ 172 | CloseSquareBracket = 93, 173 | /** 174 | * The `^` character. 175 | */ 176 | Caret = 94, 177 | /** 178 | * The `_` character. 179 | */ 180 | Underline = 95, 181 | /** 182 | * The ``(`)`` character. 183 | */ 184 | BackTick = 96, 185 | 186 | a = 97, 187 | b = 98, 188 | c = 99, 189 | d = 100, 190 | e = 101, 191 | f = 102, 192 | g = 103, 193 | h = 104, 194 | i = 105, 195 | j = 106, 196 | k = 107, 197 | l = 108, 198 | m = 109, 199 | n = 110, 200 | o = 111, 201 | p = 112, 202 | q = 113, 203 | r = 114, 204 | s = 115, 205 | t = 116, 206 | u = 117, 207 | v = 118, 208 | w = 119, 209 | x = 120, 210 | y = 121, 211 | z = 122, 212 | 213 | /** 214 | * The `{` character. 215 | */ 216 | OpenCurlyBrace = 123, 217 | /** 218 | * The `|` character. 219 | */ 220 | Pipe = 124, 221 | /** 222 | * The `}` character. 223 | */ 224 | CloseCurlyBrace = 125, 225 | /** 226 | * The `~` character. 227 | */ 228 | Tilde = 126, 229 | 230 | /** 231 | * The   (no-break space) character. 232 | * Unicode Character 'NO-BREAK SPACE' (U+00A0) 233 | */ 234 | NoBreakSpace = 160, 235 | 236 | U_Combining_Grave_Accent = 0x0300, // U+0300 Combining Grave Accent 237 | U_Combining_Acute_Accent = 0x0301, // U+0301 Combining Acute Accent 238 | U_Combining_Circumflex_Accent = 0x0302, // U+0302 Combining Circumflex Accent 239 | U_Combining_Tilde = 0x0303, // U+0303 Combining Tilde 240 | U_Combining_Macron = 0x0304, // U+0304 Combining Macron 241 | U_Combining_Overline = 0x0305, // U+0305 Combining Overline 242 | U_Combining_Breve = 0x0306, // U+0306 Combining Breve 243 | U_Combining_Dot_Above = 0x0307, // U+0307 Combining Dot Above 244 | U_Combining_Diaeresis = 0x0308, // U+0308 Combining Diaeresis 245 | U_Combining_Hook_Above = 0x0309, // U+0309 Combining Hook Above 246 | U_Combining_Ring_Above = 0x030A, // U+030A Combining Ring Above 247 | U_Combining_Double_Acute_Accent = 0x030B, // U+030B Combining Double Acute Accent 248 | U_Combining_Caron = 0x030C, // U+030C Combining Caron 249 | U_Combining_Vertical_Line_Above = 0x030D, // U+030D Combining Vertical Line Above 250 | U_Combining_Double_Vertical_Line_Above = 0x030E, // U+030E Combining Double Vertical Line Above 251 | U_Combining_Double_Grave_Accent = 0x030F, // U+030F Combining Double Grave Accent 252 | U_Combining_Candrabindu = 0x0310, // U+0310 Combining Candrabindu 253 | U_Combining_Inverted_Breve = 0x0311, // U+0311 Combining Inverted Breve 254 | U_Combining_Turned_Comma_Above = 0x0312, // U+0312 Combining Turned Comma Above 255 | U_Combining_Comma_Above = 0x0313, // U+0313 Combining Comma Above 256 | U_Combining_Reversed_Comma_Above = 0x0314, // U+0314 Combining Reversed Comma Above 257 | U_Combining_Comma_Above_Right = 0x0315, // U+0315 Combining Comma Above Right 258 | U_Combining_Grave_Accent_Below = 0x0316, // U+0316 Combining Grave Accent Below 259 | U_Combining_Acute_Accent_Below = 0x0317, // U+0317 Combining Acute Accent Below 260 | U_Combining_Left_Tack_Below = 0x0318, // U+0318 Combining Left Tack Below 261 | U_Combining_Right_Tack_Below = 0x0319, // U+0319 Combining Right Tack Below 262 | U_Combining_Left_Angle_Above = 0x031A, // U+031A Combining Left Angle Above 263 | U_Combining_Horn = 0x031B, // U+031B Combining Horn 264 | U_Combining_Left_Half_Ring_Below = 0x031C, // U+031C Combining Left Half Ring Below 265 | U_Combining_Up_Tack_Below = 0x031D, // U+031D Combining Up Tack Below 266 | U_Combining_Down_Tack_Below = 0x031E, // U+031E Combining Down Tack Below 267 | U_Combining_Plus_Sign_Below = 0x031F, // U+031F Combining Plus Sign Below 268 | U_Combining_Minus_Sign_Below = 0x0320, // U+0320 Combining Minus Sign Below 269 | U_Combining_Palatalized_Hook_Below = 0x0321, // U+0321 Combining Palatalized Hook Below 270 | U_Combining_Retroflex_Hook_Below = 0x0322, // U+0322 Combining Retroflex Hook Below 271 | U_Combining_Dot_Below = 0x0323, // U+0323 Combining Dot Below 272 | U_Combining_Diaeresis_Below = 0x0324, // U+0324 Combining Diaeresis Below 273 | U_Combining_Ring_Below = 0x0325, // U+0325 Combining Ring Below 274 | U_Combining_Comma_Below = 0x0326, // U+0326 Combining Comma Below 275 | U_Combining_Cedilla = 0x0327, // U+0327 Combining Cedilla 276 | U_Combining_Ogonek = 0x0328, // U+0328 Combining Ogonek 277 | U_Combining_Vertical_Line_Below = 0x0329, // U+0329 Combining Vertical Line Below 278 | U_Combining_Bridge_Below = 0x032A, // U+032A Combining Bridge Below 279 | U_Combining_Inverted_Double_Arch_Below = 0x032B, // U+032B Combining Inverted Double Arch Below 280 | U_Combining_Caron_Below = 0x032C, // U+032C Combining Caron Below 281 | U_Combining_Circumflex_Accent_Below = 0x032D, // U+032D Combining Circumflex Accent Below 282 | U_Combining_Breve_Below = 0x032E, // U+032E Combining Breve Below 283 | U_Combining_Inverted_Breve_Below = 0x032F, // U+032F Combining Inverted Breve Below 284 | U_Combining_Tilde_Below = 0x0330, // U+0330 Combining Tilde Below 285 | U_Combining_Macron_Below = 0x0331, // U+0331 Combining Macron Below 286 | U_Combining_Low_Line = 0x0332, // U+0332 Combining Low Line 287 | U_Combining_Double_Low_Line = 0x0333, // U+0333 Combining Double Low Line 288 | U_Combining_Tilde_Overlay = 0x0334, // U+0334 Combining Tilde Overlay 289 | U_Combining_Short_Stroke_Overlay = 0x0335, // U+0335 Combining Short Stroke Overlay 290 | U_Combining_Long_Stroke_Overlay = 0x0336, // U+0336 Combining Long Stroke Overlay 291 | U_Combining_Short_Solidus_Overlay = 0x0337, // U+0337 Combining Short Solidus Overlay 292 | U_Combining_Long_Solidus_Overlay = 0x0338, // U+0338 Combining Long Solidus Overlay 293 | U_Combining_Right_Half_Ring_Below = 0x0339, // U+0339 Combining Right Half Ring Below 294 | U_Combining_Inverted_Bridge_Below = 0x033A, // U+033A Combining Inverted Bridge Below 295 | U_Combining_Square_Below = 0x033B, // U+033B Combining Square Below 296 | U_Combining_Seagull_Below = 0x033C, // U+033C Combining Seagull Below 297 | U_Combining_X_Above = 0x033D, // U+033D Combining X Above 298 | U_Combining_Vertical_Tilde = 0x033E, // U+033E Combining Vertical Tilde 299 | U_Combining_Double_Overline = 0x033F, // U+033F Combining Double Overline 300 | U_Combining_Grave_Tone_Mark = 0x0340, // U+0340 Combining Grave Tone Mark 301 | U_Combining_Acute_Tone_Mark = 0x0341, // U+0341 Combining Acute Tone Mark 302 | U_Combining_Greek_Perispomeni = 0x0342, // U+0342 Combining Greek Perispomeni 303 | U_Combining_Greek_Koronis = 0x0343, // U+0343 Combining Greek Koronis 304 | U_Combining_Greek_Dialytika_Tonos = 0x0344, // U+0344 Combining Greek Dialytika Tonos 305 | U_Combining_Greek_Ypogegrammeni = 0x0345, // U+0345 Combining Greek Ypogegrammeni 306 | U_Combining_Bridge_Above = 0x0346, // U+0346 Combining Bridge Above 307 | U_Combining_Equals_Sign_Below = 0x0347, // U+0347 Combining Equals Sign Below 308 | U_Combining_Double_Vertical_Line_Below = 0x0348, // U+0348 Combining Double Vertical Line Below 309 | U_Combining_Left_Angle_Below = 0x0349, // U+0349 Combining Left Angle Below 310 | U_Combining_Not_Tilde_Above = 0x034A, // U+034A Combining Not Tilde Above 311 | U_Combining_Homothetic_Above = 0x034B, // U+034B Combining Homothetic Above 312 | U_Combining_Almost_Equal_To_Above = 0x034C, // U+034C Combining Almost Equal To Above 313 | U_Combining_Left_Right_Arrow_Below = 0x034D, // U+034D Combining Left Right Arrow Below 314 | U_Combining_Upwards_Arrow_Below = 0x034E, // U+034E Combining Upwards Arrow Below 315 | U_Combining_Grapheme_Joiner = 0x034F, // U+034F Combining Grapheme Joiner 316 | U_Combining_Right_Arrowhead_Above = 0x0350, // U+0350 Combining Right Arrowhead Above 317 | U_Combining_Left_Half_Ring_Above = 0x0351, // U+0351 Combining Left Half Ring Above 318 | U_Combining_Fermata = 0x0352, // U+0352 Combining Fermata 319 | U_Combining_X_Below = 0x0353, // U+0353 Combining X Below 320 | U_Combining_Left_Arrowhead_Below = 0x0354, // U+0354 Combining Left Arrowhead Below 321 | U_Combining_Right_Arrowhead_Below = 0x0355, // U+0355 Combining Right Arrowhead Below 322 | U_Combining_Right_Arrowhead_And_Up_Arrowhead_Below = 0x0356, // U+0356 Combining Right Arrowhead And Up Arrowhead Below 323 | U_Combining_Right_Half_Ring_Above = 0x0357, // U+0357 Combining Right Half Ring Above 324 | U_Combining_Dot_Above_Right = 0x0358, // U+0358 Combining Dot Above Right 325 | U_Combining_Asterisk_Below = 0x0359, // U+0359 Combining Asterisk Below 326 | U_Combining_Double_Ring_Below = 0x035A, // U+035A Combining Double Ring Below 327 | U_Combining_Zigzag_Above = 0x035B, // U+035B Combining Zigzag Above 328 | U_Combining_Double_Breve_Below = 0x035C, // U+035C Combining Double Breve Below 329 | U_Combining_Double_Breve = 0x035D, // U+035D Combining Double Breve 330 | U_Combining_Double_Macron = 0x035E, // U+035E Combining Double Macron 331 | U_Combining_Double_Macron_Below = 0x035F, // U+035F Combining Double Macron Below 332 | U_Combining_Double_Tilde = 0x0360, // U+0360 Combining Double Tilde 333 | U_Combining_Double_Inverted_Breve = 0x0361, // U+0361 Combining Double Inverted Breve 334 | U_Combining_Double_Rightwards_Arrow_Below = 0x0362, // U+0362 Combining Double Rightwards Arrow Below 335 | U_Combining_Latin_Small_Letter_A = 0x0363, // U+0363 Combining Latin Small Letter A 336 | U_Combining_Latin_Small_Letter_E = 0x0364, // U+0364 Combining Latin Small Letter E 337 | U_Combining_Latin_Small_Letter_I = 0x0365, // U+0365 Combining Latin Small Letter I 338 | U_Combining_Latin_Small_Letter_O = 0x0366, // U+0366 Combining Latin Small Letter O 339 | U_Combining_Latin_Small_Letter_U = 0x0367, // U+0367 Combining Latin Small Letter U 340 | U_Combining_Latin_Small_Letter_C = 0x0368, // U+0368 Combining Latin Small Letter C 341 | U_Combining_Latin_Small_Letter_D = 0x0369, // U+0369 Combining Latin Small Letter D 342 | U_Combining_Latin_Small_Letter_H = 0x036A, // U+036A Combining Latin Small Letter H 343 | U_Combining_Latin_Small_Letter_M = 0x036B, // U+036B Combining Latin Small Letter M 344 | U_Combining_Latin_Small_Letter_R = 0x036C, // U+036C Combining Latin Small Letter R 345 | U_Combining_Latin_Small_Letter_T = 0x036D, // U+036D Combining Latin Small Letter T 346 | U_Combining_Latin_Small_Letter_V = 0x036E, // U+036E Combining Latin Small Letter V 347 | U_Combining_Latin_Small_Letter_X = 0x036F, // U+036F Combining Latin Small Letter X 348 | 349 | /** 350 | * Unicode Character 'LINE SEPARATOR' (U+2028) 351 | * http://www.fileformat.info/info/unicode/char/2028/index.htm 352 | */ 353 | LINE_SEPARATOR = 0x2028, 354 | /** 355 | * Unicode Character 'PARAGRAPH SEPARATOR' (U+2029) 356 | * http://www.fileformat.info/info/unicode/char/2029/index.htm 357 | */ 358 | PARAGRAPH_SEPARATOR = 0x2029, 359 | /** 360 | * Unicode Character 'NEXT LINE' (U+0085) 361 | * http://www.fileformat.info/info/unicode/char/0085/index.htm 362 | */ 363 | NEXT_LINE = 0x0085, 364 | 365 | // http://www.fileformat.info/info/unicode/category/Sk/list.htm 366 | U_CIRCUMFLEX = 0x005E, // U+005E CIRCUMFLEX 367 | U_GRAVE_ACCENT = 0x0060, // U+0060 GRAVE ACCENT 368 | U_DIAERESIS = 0x00A8, // U+00A8 DIAERESIS 369 | U_MACRON = 0x00AF, // U+00AF MACRON 370 | U_ACUTE_ACCENT = 0x00B4, // U+00B4 ACUTE ACCENT 371 | U_CEDILLA = 0x00B8, // U+00B8 CEDILLA 372 | U_MODIFIER_LETTER_LEFT_ARROWHEAD = 0x02C2, // U+02C2 MODIFIER LETTER LEFT ARROWHEAD 373 | U_MODIFIER_LETTER_RIGHT_ARROWHEAD = 0x02C3, // U+02C3 MODIFIER LETTER RIGHT ARROWHEAD 374 | U_MODIFIER_LETTER_UP_ARROWHEAD = 0x02C4, // U+02C4 MODIFIER LETTER UP ARROWHEAD 375 | U_MODIFIER_LETTER_DOWN_ARROWHEAD = 0x02C5, // U+02C5 MODIFIER LETTER DOWN ARROWHEAD 376 | U_MODIFIER_LETTER_CENTRED_RIGHT_HALF_RING = 0x02D2, // U+02D2 MODIFIER LETTER CENTRED RIGHT HALF RING 377 | U_MODIFIER_LETTER_CENTRED_LEFT_HALF_RING = 0x02D3, // U+02D3 MODIFIER LETTER CENTRED LEFT HALF RING 378 | U_MODIFIER_LETTER_UP_TACK = 0x02D4, // U+02D4 MODIFIER LETTER UP TACK 379 | U_MODIFIER_LETTER_DOWN_TACK = 0x02D5, // U+02D5 MODIFIER LETTER DOWN TACK 380 | U_MODIFIER_LETTER_PLUS_SIGN = 0x02D6, // U+02D6 MODIFIER LETTER PLUS SIGN 381 | U_MODIFIER_LETTER_MINUS_SIGN = 0x02D7, // U+02D7 MODIFIER LETTER MINUS SIGN 382 | U_BREVE = 0x02D8, // U+02D8 BREVE 383 | U_DOT_ABOVE = 0x02D9, // U+02D9 DOT ABOVE 384 | U_RING_ABOVE = 0x02DA, // U+02DA RING ABOVE 385 | U_OGONEK = 0x02DB, // U+02DB OGONEK 386 | U_SMALL_TILDE = 0x02DC, // U+02DC SMALL TILDE 387 | U_DOUBLE_ACUTE_ACCENT = 0x02DD, // U+02DD DOUBLE ACUTE ACCENT 388 | U_MODIFIER_LETTER_RHOTIC_HOOK = 0x02DE, // U+02DE MODIFIER LETTER RHOTIC HOOK 389 | U_MODIFIER_LETTER_CROSS_ACCENT = 0x02DF, // U+02DF MODIFIER LETTER CROSS ACCENT 390 | U_MODIFIER_LETTER_EXTRA_HIGH_TONE_BAR = 0x02E5, // U+02E5 MODIFIER LETTER EXTRA-HIGH TONE BAR 391 | U_MODIFIER_LETTER_HIGH_TONE_BAR = 0x02E6, // U+02E6 MODIFIER LETTER HIGH TONE BAR 392 | U_MODIFIER_LETTER_MID_TONE_BAR = 0x02E7, // U+02E7 MODIFIER LETTER MID TONE BAR 393 | U_MODIFIER_LETTER_LOW_TONE_BAR = 0x02E8, // U+02E8 MODIFIER LETTER LOW TONE BAR 394 | U_MODIFIER_LETTER_EXTRA_LOW_TONE_BAR = 0x02E9, // U+02E9 MODIFIER LETTER EXTRA-LOW TONE BAR 395 | U_MODIFIER_LETTER_YIN_DEPARTING_TONE_MARK = 0x02EA, // U+02EA MODIFIER LETTER YIN DEPARTING TONE MARK 396 | U_MODIFIER_LETTER_YANG_DEPARTING_TONE_MARK = 0x02EB, // U+02EB MODIFIER LETTER YANG DEPARTING TONE MARK 397 | U_MODIFIER_LETTER_UNASPIRATED = 0x02ED, // U+02ED MODIFIER LETTER UNASPIRATED 398 | U_MODIFIER_LETTER_LOW_DOWN_ARROWHEAD = 0x02EF, // U+02EF MODIFIER LETTER LOW DOWN ARROWHEAD 399 | U_MODIFIER_LETTER_LOW_UP_ARROWHEAD = 0x02F0, // U+02F0 MODIFIER LETTER LOW UP ARROWHEAD 400 | U_MODIFIER_LETTER_LOW_LEFT_ARROWHEAD = 0x02F1, // U+02F1 MODIFIER LETTER LOW LEFT ARROWHEAD 401 | U_MODIFIER_LETTER_LOW_RIGHT_ARROWHEAD = 0x02F2, // U+02F2 MODIFIER LETTER LOW RIGHT ARROWHEAD 402 | U_MODIFIER_LETTER_LOW_RING = 0x02F3, // U+02F3 MODIFIER LETTER LOW RING 403 | U_MODIFIER_LETTER_MIDDLE_GRAVE_ACCENT = 0x02F4, // U+02F4 MODIFIER LETTER MIDDLE GRAVE ACCENT 404 | U_MODIFIER_LETTER_MIDDLE_DOUBLE_GRAVE_ACCENT = 0x02F5, // U+02F5 MODIFIER LETTER MIDDLE DOUBLE GRAVE ACCENT 405 | U_MODIFIER_LETTER_MIDDLE_DOUBLE_ACUTE_ACCENT = 0x02F6, // U+02F6 MODIFIER LETTER MIDDLE DOUBLE ACUTE ACCENT 406 | U_MODIFIER_LETTER_LOW_TILDE = 0x02F7, // U+02F7 MODIFIER LETTER LOW TILDE 407 | U_MODIFIER_LETTER_RAISED_COLON = 0x02F8, // U+02F8 MODIFIER LETTER RAISED COLON 408 | U_MODIFIER_LETTER_BEGIN_HIGH_TONE = 0x02F9, // U+02F9 MODIFIER LETTER BEGIN HIGH TONE 409 | U_MODIFIER_LETTER_END_HIGH_TONE = 0x02FA, // U+02FA MODIFIER LETTER END HIGH TONE 410 | U_MODIFIER_LETTER_BEGIN_LOW_TONE = 0x02FB, // U+02FB MODIFIER LETTER BEGIN LOW TONE 411 | U_MODIFIER_LETTER_END_LOW_TONE = 0x02FC, // U+02FC MODIFIER LETTER END LOW TONE 412 | U_MODIFIER_LETTER_SHELF = 0x02FD, // U+02FD MODIFIER LETTER SHELF 413 | U_MODIFIER_LETTER_OPEN_SHELF = 0x02FE, // U+02FE MODIFIER LETTER OPEN SHELF 414 | U_MODIFIER_LETTER_LOW_LEFT_ARROW = 0x02FF, // U+02FF MODIFIER LETTER LOW LEFT ARROW 415 | U_GREEK_LOWER_NUMERAL_SIGN = 0x0375, // U+0375 GREEK LOWER NUMERAL SIGN 416 | U_GREEK_TONOS = 0x0384, // U+0384 GREEK TONOS 417 | U_GREEK_DIALYTIKA_TONOS = 0x0385, // U+0385 GREEK DIALYTIKA TONOS 418 | U_GREEK_KORONIS = 0x1FBD, // U+1FBD GREEK KORONIS 419 | U_GREEK_PSILI = 0x1FBF, // U+1FBF GREEK PSILI 420 | U_GREEK_PERISPOMENI = 0x1FC0, // U+1FC0 GREEK PERISPOMENI 421 | U_GREEK_DIALYTIKA_AND_PERISPOMENI = 0x1FC1, // U+1FC1 GREEK DIALYTIKA AND PERISPOMENI 422 | U_GREEK_PSILI_AND_VARIA = 0x1FCD, // U+1FCD GREEK PSILI AND VARIA 423 | U_GREEK_PSILI_AND_OXIA = 0x1FCE, // U+1FCE GREEK PSILI AND OXIA 424 | U_GREEK_PSILI_AND_PERISPOMENI = 0x1FCF, // U+1FCF GREEK PSILI AND PERISPOMENI 425 | U_GREEK_DASIA_AND_VARIA = 0x1FDD, // U+1FDD GREEK DASIA AND VARIA 426 | U_GREEK_DASIA_AND_OXIA = 0x1FDE, // U+1FDE GREEK DASIA AND OXIA 427 | U_GREEK_DASIA_AND_PERISPOMENI = 0x1FDF, // U+1FDF GREEK DASIA AND PERISPOMENI 428 | U_GREEK_DIALYTIKA_AND_VARIA = 0x1FED, // U+1FED GREEK DIALYTIKA AND VARIA 429 | U_GREEK_DIALYTIKA_AND_OXIA = 0x1FEE, // U+1FEE GREEK DIALYTIKA AND OXIA 430 | U_GREEK_VARIA = 0x1FEF, // U+1FEF GREEK VARIA 431 | U_GREEK_OXIA = 0x1FFD, // U+1FFD GREEK OXIA 432 | U_GREEK_DASIA = 0x1FFE, // U+1FFE GREEK DASIA 433 | 434 | U_IDEOGRAPHIC_FULL_STOP = 0x3002, // U+3002 IDEOGRAPHIC FULL STOP 435 | U_LEFT_CORNER_BRACKET = 0x300C, // U+300C LEFT CORNER BRACKET 436 | U_RIGHT_CORNER_BRACKET = 0x300D, // U+300D RIGHT CORNER BRACKET 437 | U_LEFT_BLACK_LENTICULAR_BRACKET = 0x3010, // U+3010 LEFT BLACK LENTICULAR BRACKET 438 | U_RIGHT_BLACK_LENTICULAR_BRACKET = 0x3011, // U+3011 RIGHT BLACK LENTICULAR BRACKET 439 | 440 | 441 | U_OVERLINE = 0x203E, // Unicode Character 'OVERLINE' 442 | 443 | /** 444 | * UTF-8 BOM 445 | * Unicode Character 'ZERO WIDTH NO-BREAK SPACE' (U+FEFF) 446 | * http://www.fileformat.info/info/unicode/char/feff/index.htm 447 | */ 448 | UTF8_BOM = 65279, 449 | 450 | U_FULLWIDTH_SEMICOLON = 0xFF1B, // U+FF1B FULLWIDTH SEMICOLON 451 | U_FULLWIDTH_COMMA = 0xFF0C, // U+FF0C FULLWIDTH COMMA 452 | } 453 | -------------------------------------------------------------------------------- /src/base/util/vs/common/marshallingIds.ts: -------------------------------------------------------------------------------- 1 | //!!! DO NOT modify, this file was COPIED from 'microsoft/vscode' 2 | 3 | /*--------------------------------------------------------------------------------------------- 4 | * Copyright (c) Microsoft Corporation. All rights reserved. 5 | * Licensed under the MIT License. See LICENSE in the project root for license information. 6 | *--------------------------------------------------------------------------------------------*/ 7 | 8 | export const enum MarshalledId { 9 | Uri = 1, 10 | Regexp, 11 | ScmResource, 12 | ScmResourceGroup, 13 | ScmProvider, 14 | CommentController, 15 | CommentThread, 16 | CommentThreadInstance, 17 | CommentThreadReply, 18 | CommentNode, 19 | CommentThreadNode, 20 | TimelineActionContext, 21 | NotebookCellActionContext, 22 | NotebookActionContext, 23 | TerminalContext, 24 | TestItemContext, 25 | Date, 26 | TestMessageMenuArgs, 27 | } 28 | -------------------------------------------------------------------------------- /src/base/util/vs/common/platform.ts: -------------------------------------------------------------------------------- 1 | //!!! DO NOT modify, this file was COPIED from 'microsoft/vscode' 2 | 3 | /*--------------------------------------------------------------------------------------------- 4 | * Copyright (c) Microsoft Corporation. All rights reserved. 5 | * Licensed under the MIT License. See LICENSE in the project root for license information. 6 | *--------------------------------------------------------------------------------------------*/ 7 | import * as nls from '../nls'; 8 | 9 | export const LANGUAGE_DEFAULT = 'en'; 10 | 11 | let _isWindows = false; 12 | let _isMacintosh = false; 13 | let _isLinux = false; 14 | let _isLinuxSnap = false; 15 | let _isNative = false; 16 | let _isWeb = false; 17 | let _isElectron = false; 18 | let _isIOS = false; 19 | let _isCI = false; 20 | let _isMobile = false; 21 | let _locale: string | undefined = undefined; 22 | let _language: string = LANGUAGE_DEFAULT; 23 | let _platformLocale: string = LANGUAGE_DEFAULT; 24 | let _translationsConfigFile: string | undefined = undefined; 25 | let _userAgent: string | undefined = undefined; 26 | 27 | interface NLSConfig { 28 | locale: string; 29 | osLocale: string; 30 | availableLanguages: { [key: string]: string }; 31 | _translationsConfigFile: string; 32 | } 33 | 34 | export interface IProcessEnvironment { 35 | [key: string]: string | undefined; 36 | } 37 | 38 | /** 39 | * This interface is intentionally not identical to node.js 40 | * process because it also works in sandboxed environments 41 | * where the process object is implemented differently. We 42 | * define the properties here that we need for `platform` 43 | * to work and nothing else. 44 | */ 45 | export interface INodeProcess { 46 | platform: string; 47 | arch: string; 48 | env: IProcessEnvironment; 49 | versions?: { 50 | electron?: string; 51 | chrome?: string; 52 | }; 53 | type?: string; 54 | cwd: () => string; 55 | } 56 | 57 | declare const process: INodeProcess; 58 | 59 | const $globalThis: any = globalThis; 60 | 61 | let nodeProcess: INodeProcess | undefined = undefined; 62 | if (typeof $globalThis.vscode !== 'undefined' && typeof $globalThis.vscode.process !== 'undefined') { 63 | // Native environment (sandboxed) 64 | nodeProcess = $globalThis.vscode.process; 65 | } else if (typeof process !== 'undefined') { 66 | // Native environment (non-sandboxed) 67 | nodeProcess = process; 68 | } 69 | 70 | const isElectronProcess = typeof nodeProcess?.versions?.electron === 'string'; 71 | const isElectronRenderer = isElectronProcess && nodeProcess?.type === 'renderer'; 72 | 73 | interface INavigator { 74 | userAgent: string; 75 | maxTouchPoints?: number; 76 | language: string; 77 | } 78 | declare const navigator: INavigator; 79 | 80 | // Native environment 81 | if (typeof nodeProcess === 'object') { 82 | _isWindows = (nodeProcess.platform === 'win32'); 83 | _isMacintosh = (nodeProcess.platform === 'darwin'); 84 | _isLinux = (nodeProcess.platform === 'linux'); 85 | _isLinuxSnap = _isLinux && !!nodeProcess.env['SNAP'] && !!nodeProcess.env['SNAP_REVISION']; 86 | _isElectron = isElectronProcess; 87 | _isCI = !!nodeProcess.env['CI'] || !!nodeProcess.env['BUILD_ARTIFACTSTAGINGDIRECTORY']; 88 | _locale = LANGUAGE_DEFAULT; 89 | _language = LANGUAGE_DEFAULT; 90 | const rawNlsConfig = nodeProcess.env['VSCODE_NLS_CONFIG']; 91 | if (rawNlsConfig) { 92 | try { 93 | const nlsConfig: NLSConfig = JSON.parse(rawNlsConfig); 94 | const resolved = nlsConfig.availableLanguages['*']; 95 | _locale = nlsConfig.locale; 96 | _platformLocale = nlsConfig.osLocale; 97 | // VSCode's default language is 'en' 98 | _language = resolved ? resolved : LANGUAGE_DEFAULT; 99 | _translationsConfigFile = nlsConfig._translationsConfigFile; 100 | } catch (e) { 101 | } 102 | } 103 | _isNative = true; 104 | } 105 | 106 | // Web environment 107 | else if (typeof navigator === 'object' && !isElectronRenderer) { 108 | _userAgent = navigator.userAgent; 109 | _isWindows = _userAgent.indexOf('Windows') >= 0; 110 | _isMacintosh = _userAgent.indexOf('Macintosh') >= 0; 111 | _isIOS = (_userAgent.indexOf('Macintosh') >= 0 || _userAgent.indexOf('iPad') >= 0 || _userAgent.indexOf('iPhone') >= 0) && !!navigator.maxTouchPoints && navigator.maxTouchPoints > 0; 112 | _isLinux = _userAgent.indexOf('Linux') >= 0; 113 | _isMobile = _userAgent?.indexOf('Mobi') >= 0; 114 | _isWeb = true; 115 | 116 | const configuredLocale = nls.getConfiguredDefaultLocale( 117 | // This call _must_ be done in the file that calls `nls.getConfiguredDefaultLocale` 118 | // to ensure that the NLS AMD Loader plugin has been loaded and configured. 119 | // This is because the loader plugin decides what the default locale is based on 120 | // how it's able to resolve the strings. 121 | nls.localize({ key: 'ensureLoaderPluginIsLoaded', comment: ['{Locked}'] }, '_') 122 | ); 123 | 124 | _locale = configuredLocale || LANGUAGE_DEFAULT; 125 | _language = _locale; 126 | _platformLocale = navigator.language; 127 | } 128 | 129 | // Unknown environment 130 | else { 131 | console.error('Unable to resolve platform.'); 132 | } 133 | 134 | export const enum Platform { 135 | Web, 136 | Mac, 137 | Linux, 138 | Windows 139 | } 140 | export type PlatformName = 'Web' | 'Windows' | 'Mac' | 'Linux'; 141 | 142 | export function PlatformToString(platform: Platform): PlatformName { 143 | switch (platform) { 144 | case Platform.Web: return 'Web'; 145 | case Platform.Mac: return 'Mac'; 146 | case Platform.Linux: return 'Linux'; 147 | case Platform.Windows: return 'Windows'; 148 | } 149 | } 150 | 151 | let _platform: Platform = Platform.Web; 152 | if (_isMacintosh) { 153 | _platform = Platform.Mac; 154 | } else if (_isWindows) { 155 | _platform = Platform.Windows; 156 | } else if (_isLinux) { 157 | _platform = Platform.Linux; 158 | } 159 | 160 | export const isWindows = _isWindows; 161 | export const isMacintosh = _isMacintosh; 162 | export const isLinux = _isLinux; 163 | export const isLinuxSnap = _isLinuxSnap; 164 | export const isNative = _isNative; 165 | export const isElectron = _isElectron; 166 | export const isWeb = _isWeb; 167 | export const isWebWorker = (_isWeb && typeof $globalThis.importScripts === 'function'); 168 | export const webWorkerOrigin = isWebWorker ? $globalThis.origin : undefined; 169 | export const isIOS = _isIOS; 170 | export const isMobile = _isMobile; 171 | /** 172 | * Whether we run inside a CI environment, such as 173 | * GH actions or Azure Pipelines. 174 | */ 175 | export const isCI = _isCI; 176 | export const platform = _platform; 177 | export const userAgent = _userAgent; 178 | 179 | /** 180 | * The language used for the user interface. The format of 181 | * the string is all lower case (e.g. zh-tw for Traditional 182 | * Chinese) 183 | */ 184 | export const language = _language; 185 | 186 | export namespace Language { 187 | 188 | export function value(): string { 189 | return language; 190 | } 191 | 192 | export function isDefaultVariant(): boolean { 193 | if (language.length === 2) { 194 | return language === 'en'; 195 | } else if (language.length >= 3) { 196 | return language[0] === 'e' && language[1] === 'n' && language[2] === '-'; 197 | } else { 198 | return false; 199 | } 200 | } 201 | 202 | export function isDefault(): boolean { 203 | return language === 'en'; 204 | } 205 | } 206 | 207 | /** 208 | * The OS locale or the locale specified by --locale. The format of 209 | * the string is all lower case (e.g. zh-tw for Traditional 210 | * Chinese). The UI is not necessarily shown in the provided locale. 211 | */ 212 | export const locale = _locale; 213 | 214 | /** 215 | * This will always be set to the OS/browser's locale regardless of 216 | * what was specified by --locale. The format of the string is all 217 | * lower case (e.g. zh-tw for Traditional Chinese). The UI is not 218 | * necessarily shown in the provided locale. 219 | */ 220 | export const platformLocale = _platformLocale; 221 | 222 | /** 223 | * The translations that are available through language packs. 224 | */ 225 | export const translationsConfigFile = _translationsConfigFile; 226 | 227 | export const setTimeout0IsFaster = (typeof $globalThis.postMessage === 'function' && !$globalThis.importScripts); 228 | 229 | /** 230 | * See https://html.spec.whatwg.org/multipage/timers-and-user-prompts.html#:~:text=than%204%2C%20then-,set%20timeout%20to%204,-. 231 | * 232 | * Works similarly to `setTimeout(0)` but doesn't suffer from the 4ms artificial delay 233 | * that browsers set when the nesting level is > 5. 234 | */ 235 | export const setTimeout0 = (() => { 236 | if (setTimeout0IsFaster) { 237 | interface IQueueElement { 238 | id: number; 239 | callback: () => void; 240 | } 241 | const pending: IQueueElement[] = []; 242 | 243 | $globalThis.addEventListener('message', (e: any) => { 244 | if (e.data && e.data.vscodeScheduleAsyncWork) { 245 | for (let i = 0, len = pending.length; i < len; i++) { 246 | const candidate = pending[i]; 247 | if (candidate.id === e.data.vscodeScheduleAsyncWork) { 248 | pending.splice(i, 1); 249 | candidate.callback(); 250 | return; 251 | } 252 | } 253 | } 254 | }); 255 | let lastId = 0; 256 | return (callback: () => void) => { 257 | const myId = ++lastId; 258 | pending.push({ 259 | id: myId, 260 | callback: callback 261 | }); 262 | $globalThis.postMessage({ vscodeScheduleAsyncWork: myId }, '*'); 263 | }; 264 | } 265 | return (callback: () => void) => setTimeout(callback); 266 | })(); 267 | 268 | export const enum OperatingSystem { 269 | Windows = 1, 270 | Macintosh = 2, 271 | Linux = 3 272 | } 273 | export const OS = (_isMacintosh || _isIOS ? OperatingSystem.Macintosh : (_isWindows ? OperatingSystem.Windows : OperatingSystem.Linux)); 274 | 275 | let _isLittleEndian = true; 276 | let _isLittleEndianComputed = false; 277 | export function isLittleEndian(): boolean { 278 | if (!_isLittleEndianComputed) { 279 | _isLittleEndianComputed = true; 280 | const test = new Uint8Array(2); 281 | test[0] = 1; 282 | test[1] = 2; 283 | const view = new Uint16Array(test.buffer); 284 | _isLittleEndian = (view[0] === (2 << 8) + 1); 285 | } 286 | return _isLittleEndian; 287 | } 288 | 289 | export const isChrome = !!(userAgent && userAgent.indexOf('Chrome') >= 0); 290 | export const isFirefox = !!(userAgent && userAgent.indexOf('Firefox') >= 0); 291 | export const isSafari = !!(!isChrome && (userAgent && userAgent.indexOf('Safari') >= 0)); 292 | export const isEdge = !!(userAgent && userAgent.indexOf('Edg/') >= 0); 293 | export const isAndroid = !!(userAgent && userAgent.indexOf('Android') >= 0); 294 | 295 | export function isBigSurOrNewer(osVersion: string): boolean { 296 | return parseFloat(osVersion) >= 20; 297 | } 298 | -------------------------------------------------------------------------------- /src/base/util/vs/common/process.ts: -------------------------------------------------------------------------------- 1 | //!!! DO NOT modify, this file was COPIED from 'microsoft/vscode' 2 | 3 | /*--------------------------------------------------------------------------------------------- 4 | * Copyright (c) Microsoft Corporation. All rights reserved. 5 | * Licensed under the MIT License. See LICENSE in the project root for license information. 6 | *--------------------------------------------------------------------------------------------*/ 7 | 8 | import { INodeProcess, isMacintosh, isWindows } from './platform'; 9 | 10 | let safeProcess: Omit & { arch: string | undefined }; 11 | declare const process: INodeProcess; 12 | 13 | // Native sandbox environment 14 | const vscodeGlobal = (globalThis as any).vscode; 15 | if (typeof vscodeGlobal !== 'undefined' && typeof vscodeGlobal.process !== 'undefined') { 16 | const sandboxProcess: INodeProcess = vscodeGlobal.process; 17 | safeProcess = { 18 | get platform() { return sandboxProcess.platform; }, 19 | get arch() { return sandboxProcess.arch; }, 20 | get env() { return sandboxProcess.env; }, 21 | cwd() { return sandboxProcess.cwd(); } 22 | }; 23 | } 24 | 25 | // Native node.js environment 26 | else if (typeof process !== 'undefined') { 27 | safeProcess = { 28 | get platform() { return process.platform; }, 29 | get arch() { return process.arch; }, 30 | get env() { return process.env; }, 31 | cwd() { return process.env['VSCODE_CWD'] || process.cwd(); } 32 | }; 33 | } 34 | 35 | // Web environment 36 | else { 37 | safeProcess = { 38 | 39 | // Supported 40 | get platform() { return isWindows ? 'win32' : isMacintosh ? 'darwin' : 'linux'; }, 41 | get arch() { return undefined; /* arch is undefined in web */ }, 42 | 43 | // Unsupported 44 | get env() { return {}; }, 45 | cwd() { return '/'; } 46 | }; 47 | } 48 | 49 | /** 50 | * Provides safe access to the `cwd` property in node.js, sandboxed or web 51 | * environments. 52 | * 53 | * Note: in web, this property is hardcoded to be `/`. 54 | * 55 | * @skipMangle 56 | */ 57 | export const cwd = safeProcess.cwd; 58 | 59 | /** 60 | * Provides safe access to the `env` property in node.js, sandboxed or web 61 | * environments. 62 | * 63 | * Note: in web, this property is hardcoded to be `{}`. 64 | */ 65 | export const env = safeProcess.env; 66 | 67 | /** 68 | * Provides safe access to the `platform` property in node.js, sandboxed or web 69 | * environments. 70 | */ 71 | export const platform = safeProcess.platform; 72 | 73 | /** 74 | * Provides safe access to the `arch` method in node.js, sandboxed or web 75 | * environments. 76 | * Note: `arch` is `undefined` in web 77 | */ 78 | export const arch = safeProcess.arch; 79 | -------------------------------------------------------------------------------- /src/base/util/vs/nls.ts: -------------------------------------------------------------------------------- 1 | //!!! DO NOT modify, this file was COPIED from 'microsoft/vscode' 2 | 3 | /*--------------------------------------------------------------------------------------------- 4 | * Copyright (c) Microsoft Corporation. All rights reserved. 5 | * Licensed under the MIT License. See LICENSE in the project root for license information. 6 | *--------------------------------------------------------------------------------------------*/ 7 | 8 | export interface ILocalizeInfo { 9 | key: string; 10 | comment: string[]; 11 | } 12 | 13 | interface ILocalizedString { 14 | original: string; 15 | value: string; 16 | } 17 | 18 | function _format(message: string, args: any[]): string { 19 | let result: string; 20 | if (args.length === 0) { 21 | result = message; 22 | } else { 23 | result = message.replace(/\{(\d+)\}/g, function (match, rest) { 24 | const index = rest[0]; 25 | return typeof args[index] !== 'undefined' ? args[index] : match; 26 | }); 27 | } 28 | return result; 29 | } 30 | 31 | export function localize(data: ILocalizeInfo | string, message: string, ...args: any[]): string { 32 | return _format(message, args); 33 | } 34 | 35 | export function localize2(data: ILocalizeInfo | string, message: string, ...args: any[]): ILocalizedString { 36 | const res = _format(message, args); 37 | return { 38 | original: res, 39 | value: res 40 | }; 41 | } 42 | 43 | export function getConfiguredDefaultLocale(_: string) { 44 | return undefined; 45 | } 46 | -------------------------------------------------------------------------------- /src/base/vscodeTypes.d.ts: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | import type { 6 | CancellationToken, 7 | Command, 8 | Location, 9 | MarkdownString, 10 | ProviderResult, 11 | Range, 12 | ThemeIcon, 13 | Uri, 14 | } from 'vscode'; 15 | 16 | /** 17 | * Represents a part of a chat response that is formatted as Markdown. 18 | */ 19 | export class ChatResponseMarkdownPart { 20 | /** 21 | * A markdown string or a string that should be interpreted as markdown. 22 | */ 23 | value: MarkdownString; 24 | 25 | /** 26 | * Create a new ChatResponseMarkdownPart. 27 | * 28 | * @param value A markdown string or a string that should be interpreted as markdown. The boolean form of {@link MarkdownString.isTrusted} is NOT supported. 29 | */ 30 | constructor(value: string | MarkdownString); 31 | } 32 | 33 | /** 34 | * Represents a file tree structure in a chat response. 35 | */ 36 | export interface ChatResponseFileTree { 37 | /** 38 | * The name of the file or directory. 39 | */ 40 | name: string; 41 | 42 | /** 43 | * An array of child file trees, if the current file tree is a directory. 44 | */ 45 | children?: ChatResponseFileTree[]; 46 | } 47 | 48 | /** 49 | * Represents a part of a chat response that is a file tree. 50 | */ 51 | export class ChatResponseFileTreePart { 52 | /** 53 | * File tree data. 54 | */ 55 | value: ChatResponseFileTree[]; 56 | 57 | /** 58 | * The base uri to which this file tree is relative 59 | */ 60 | baseUri: Uri; 61 | 62 | /** 63 | * Create a new ChatResponseFileTreePart. 64 | * @param value File tree data. 65 | * @param baseUri The base uri to which this file tree is relative. 66 | */ 67 | constructor(value: ChatResponseFileTree[], baseUri: Uri); 68 | } 69 | 70 | /** 71 | * Represents a part of a chat response that is an anchor, that is rendered as a link to a target. 72 | */ 73 | export class ChatResponseAnchorPart { 74 | /** 75 | * The target of this anchor. 76 | */ 77 | value: Uri | Location; 78 | 79 | /** 80 | * An optional title that is rendered with value. 81 | */ 82 | title?: string; 83 | 84 | /** 85 | * Create a new ChatResponseAnchorPart. 86 | * @param value A uri or location. 87 | * @param title An optional title that is rendered with value. 88 | */ 89 | constructor(value: Uri | Location, title?: string); 90 | } 91 | 92 | /** 93 | * Represents a part of a chat response that is a progress message. 94 | */ 95 | export class ChatResponseProgressPart { 96 | /** 97 | * The progress message 98 | */ 99 | value: string; 100 | 101 | /** 102 | * Create a new ChatResponseProgressPart. 103 | * @param value A progress message 104 | */ 105 | constructor(value: string); 106 | } 107 | 108 | /** 109 | * Represents a part of a chat response that is a reference, rendered separately from the content. 110 | */ 111 | export class ChatResponseReferencePart { 112 | /** 113 | * The reference target. 114 | */ 115 | value: Uri | Location; 116 | 117 | /** 118 | * The icon for the reference. 119 | */ 120 | iconPath?: 121 | | Uri 122 | | ThemeIcon 123 | | { 124 | /** 125 | * The icon path for the light theme. 126 | */ 127 | light: Uri; 128 | /** 129 | * The icon path for the dark theme. 130 | */ 131 | dark: Uri; 132 | }; 133 | 134 | /** 135 | * Create a new ChatResponseReferencePart. 136 | * @param value A uri or location 137 | * @param iconPath Icon for the reference shown in UI 138 | */ 139 | constructor( 140 | value: Uri | Location, 141 | iconPath?: 142 | | Uri 143 | | ThemeIcon 144 | | { 145 | /** 146 | * The icon path for the light theme. 147 | */ 148 | light: Uri; 149 | /** 150 | * The icon path for the dark theme. 151 | */ 152 | dark: Uri; 153 | } 154 | ); 155 | } 156 | 157 | /** 158 | * Represents a part of a chat response that is a button that executes a command. 159 | */ 160 | export class ChatResponseCommandButtonPart { 161 | /** 162 | * The command that will be executed when the button is clicked. 163 | */ 164 | value: Command; 165 | 166 | /** 167 | * Create a new ChatResponseCommandButtonPart. 168 | * @param value A Command that will be executed when the button is clicked. 169 | */ 170 | constructor(value: Command); 171 | } 172 | 173 | /** 174 | * Represents the different chat response types. 175 | */ 176 | export type ChatResponsePart = 177 | | ChatResponseMarkdownPart 178 | | ChatResponseFileTreePart 179 | | ChatResponseAnchorPart 180 | | ChatResponseProgressPart 181 | | ChatResponseReferencePart 182 | | ChatResponseCommandButtonPart; 183 | 184 | export interface ChatDocumentContext { 185 | uri: Uri; 186 | version: number; 187 | ranges: Range[]; 188 | } 189 | 190 | /** 191 | * Represents the role of a chat message. This is either the user or the assistant. 192 | */ 193 | export enum LanguageModelChatMessageRole { 194 | /** 195 | * The user role, e.g the human interacting with a language model. 196 | */ 197 | User = 1, 198 | 199 | /** 200 | * The assistant role, e.g. the language model generating responses. 201 | */ 202 | Assistant = 2, 203 | } 204 | -------------------------------------------------------------------------------- /src/tracer/hooks.ts: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | import { useEffect, useRef } from 'preact/hooks'; 6 | 7 | export function useDebouncedCallback any>( 8 | callback: T, 9 | delay: number 10 | ) { 11 | const timeoutIdRef = useRef(undefined); 12 | 13 | const debouncedCallback = (...args: Parameters) => { 14 | if (timeoutIdRef.current) { 15 | clearTimeout(timeoutIdRef.current); 16 | } 17 | timeoutIdRef.current = window.setTimeout(() => { 18 | callback(...args); 19 | }, delay); 20 | }; 21 | 22 | useEffect(() => { 23 | return () => { 24 | if (timeoutIdRef.current) { 25 | clearTimeout(timeoutIdRef.current); 26 | } 27 | }; 28 | }, []); 29 | 30 | return debouncedCallback; 31 | } 32 | -------------------------------------------------------------------------------- /src/tracer/i18n.tsx: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | import { Fragment, FunctionComponent, h } from 'preact'; 6 | 7 | const numberFormat = new Intl.NumberFormat('en-US'); 8 | 9 | export const Integer: FunctionComponent<{ value: number }> = ({ value }) => ( 10 | <>{numberFormat.format(value)} 11 | ); 12 | -------------------------------------------------------------------------------- /src/tracer/index.css: -------------------------------------------------------------------------------- 1 | body { 2 | font-family: -apple-system, BlinkMacSystemFont, 'Segoe WPC', 'Segoe UI', system-ui, 'Ubuntu', 3 | 'Droid Sans', sans-serif; 4 | background: #fff; 5 | margin: 0; 6 | } 7 | 8 | /** Old flex display */ 9 | .render-pass { 10 | border-left: 2px solid #ccc; 11 | 12 | &:hover { 13 | border-left-color: #000; 14 | } 15 | } 16 | 17 | .literals li { 18 | white-space: pre; 19 | font-family: 'SF Mono', Monaco, Menlo, Consolas, 'Ubuntu Mono', 'Liberation Mono', 20 | 'DejaVu Sans Mono', 'Courier New', monospace; 21 | } 22 | 23 | .render-flex, 24 | .render-element { 25 | padding-left: 10px; 26 | } 27 | 28 | /** TSX Components */ 29 | 30 | .node { 31 | border: 1px solid rgba(255, 255, 255, 0.5); 32 | margin: 3px 10px; 33 | padding: 3px 10px; 34 | border-radius: 4px; 35 | width: fit-content; 36 | 37 | &.new-in-epoch { 38 | box-shadow: 0 0 3px 2px #ff0000; 39 | } 40 | 41 | &.before-epoch { 42 | pointer-events: none; 43 | filter: grayscale(1); 44 | color: #777 !important; 45 | 46 | .node { 47 | color: #777 !important; 48 | } 49 | } 50 | 51 | &:last-child { 52 | margin-bottom: 0; 53 | } 54 | } 55 | 56 | .node-content { 57 | font-weight: bold; 58 | } 59 | 60 | .node-children { 61 | margin-left: 20px; 62 | border-left: 2px dashed rgba(255, 255, 255, 0.5); 63 | padding-left: 10px; 64 | } 65 | 66 | .node-toggler { 67 | cursor: pointer; 68 | display: flex; 69 | align-items: center; 70 | justify-content: space-between; 71 | 72 | .indicator { 73 | font-size: 0.7em; 74 | } 75 | } 76 | 77 | .node-text { 78 | width: 400px; 79 | 80 | &:focus, 81 | &:focus-within { 82 | outline: 1px solid orange; 83 | 84 | .node-content { 85 | white-space: normal; 86 | } 87 | } 88 | 89 | .node-content { 90 | font-weight: normal; 91 | font-size: 0.8em; 92 | 93 | white-space: nowrap; 94 | text-overflow: ellipsis; 95 | overflow: hidden; 96 | } 97 | } 98 | 99 | .node-stats { 100 | font-family: 'SF Mono', Monaco, Menlo, Consolas, 'Ubuntu Mono', 'Liberation Mono', 101 | 'DejaVu Sans Mono', 'Courier New', monospace; 102 | font-size: 0.8em; 103 | } 104 | 105 | .control-description { 106 | padding: 10px; 107 | 108 | p { 109 | font-size: 0.9em; 110 | max-width: 500px; 111 | margin-top: 0; 112 | } 113 | } 114 | 115 | .controls { 116 | display: flex; 117 | flex-direction: column; 118 | gap: 10px; 119 | position: sticky; 120 | top: 0; 121 | padding: 10px; 122 | background: #fff; 123 | border-bottom: 1px solid #ccc; 124 | z-index: 1; 125 | } 126 | 127 | .controls-slider { 128 | display: flex; 129 | align-items: center; 130 | gap: 10px; 131 | } 132 | 133 | .controls-stats { 134 | display: flex; 135 | gap: 20px; 136 | list-style: none; 137 | padding: 0; 138 | margin-top: 0; 139 | } 140 | 141 | .controls-scoreby { 142 | display: flex; 143 | gap: 10px; 144 | } 145 | 146 | .tabs { 147 | display: flex; 148 | border-bottom: 1px solid #ccc; 149 | margin-bottom: 10px; 150 | } 151 | 152 | .tab { 153 | padding: 10px; 154 | cursor: pointer; 155 | border: 1px solid transparent; 156 | border-bottom: none; 157 | } 158 | 159 | .tab.active { 160 | border-color: #ccc; 161 | border-bottom: 1px solid #fff; 162 | background-color: #f9f9f9; 163 | } 164 | 165 | .tab-content { 166 | display: none; 167 | } 168 | 169 | .tab-content.active { 170 | display: block; 171 | } 172 | -------------------------------------------------------------------------------- /src/tracer/index.tsx: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | import { FunctionComponent, h, render } from 'preact'; 6 | import { useState } from 'preact/hooks'; 7 | import type { HTMLTraceEpoch, IHTMLTraceRenderData } from '../base/htmlTracerTypes'; 8 | import { useDebouncedCallback } from './hooks'; 9 | import { Integer } from './i18n'; 10 | import './index.css'; 11 | import { Root } from './node'; 12 | 13 | declare const DEFAULT_MODEL: IHTMLTraceRenderData; 14 | declare const EPOCHS: HTMLTraceEpoch[]; 15 | declare const DEFAULT_TOKENS: number; 16 | declare const SERVER_ADDRESS: number; 17 | 18 | const SliderWithInputControl: FunctionComponent<{ 19 | label: string; 20 | value: number; 21 | onChange: (newTokens: number) => void; 22 | min: number; 23 | max: number; 24 | }> = ({ label, value, onChange, min, max }) => { 25 | const handleSliderChange = (event: Event) => { 26 | onChange((event.target as HTMLInputElement).valueAsNumber); 27 | }; 28 | const id = `number-slider-${Math.random()}`; 29 | 30 | return ( 31 |

32 | 33 | 34 | 41 |
42 | ); 43 | }; 44 | 45 | const ScoreByControl: FunctionComponent<{ 46 | scoreBy: 'priority' | 'tokens'; 47 | onScoreByChange: (newScoreBy: 'priority' | 'tokens') => void; 48 | }> = ({ scoreBy, onScoreByChange }) => { 49 | const handleScoreByChange = (event: Event) => { 50 | const newScoreBy = (event.target as HTMLInputElement).value as 'priority' | 'tokens'; 51 | onScoreByChange(newScoreBy); 52 | }; 53 | 54 | return ( 55 |
56 | Visualize by 57 | 67 | 77 |
78 | ); 79 | }; 80 | 81 | const App = () => { 82 | const [tokens, setTokens] = useState(DEFAULT_TOKENS); 83 | const [epoch, setEpoch] = useState(EPOCHS.length); 84 | const [model, setModel] = useState(DEFAULT_MODEL); 85 | const [scoreBy, setScoreBy] = useState<'priority' | 'tokens'>('tokens'); 86 | const [activeTab, setActiveTab] = useState<'epoch' | 'tokens'>('epoch'); 87 | 88 | const regenModel = useDebouncedCallback(async (tokens: number) => { 89 | if (tokens === DEFAULT_TOKENS) { 90 | return DEFAULT_MODEL; 91 | } 92 | const response = await fetch(`${SERVER_ADDRESS}regen?n=${tokens}`); 93 | const newModel = await response.json(); 94 | setModel(newModel); 95 | }, 100); 96 | 97 | const handleTokensChange = (newTokens: number) => { 98 | setTokens(newTokens); 99 | regenModel(newTokens); 100 | setEpoch(EPOCHS.length); 101 | }; 102 | 103 | return ( 104 |
105 |
106 |
107 |
setActiveTab('epoch')} 110 | > 111 | View Order 112 |
113 |
setActiveTab('tokens')} 116 | > 117 | Change Token Budget 118 |
119 |
120 |
121 | 128 |
129 |
130 | 137 |
138 |
139 |
140 | {activeTab === 'tokens' ? ( 141 |

142 | Token changes here will prune elements and re-render Expandable ones, but the entire 143 | prompt is not being re-rendered 144 |

145 | ) : ( 146 |

147 | Changing the render epoch lets you see the order in which elements are rendered and how 148 | the token budget is allocated. 149 |

150 | )} 151 |
152 | 153 | Used / tokens 154 | 155 | 156 | Removed nodes 157 | 158 | 159 |
160 |
161 | 162 |
163 | ); 164 | }; 165 | 166 | render(, document.body); 167 | -------------------------------------------------------------------------------- /src/tracer/node.tsx: -------------------------------------------------------------------------------- 1 | /*--------------------------------------------------------------------------------------------- 2 | * Copyright (c) Microsoft Corporation and GitHub. All rights reserved. 3 | *--------------------------------------------------------------------------------------------*/ 4 | 5 | import { FunctionComponent, h } from 'preact'; 6 | import { useState } from 'preact/hooks'; 7 | import { 8 | HTMLTraceEpoch, 9 | ITraceMaterializedChatMessage, 10 | ITraceMaterializedChatMessageImage, 11 | ITraceMaterializedChatMessageTextChunk, 12 | ITraceMaterializedContainer, 13 | ITraceMaterializedNode, 14 | TraceMaterializedNodeType, 15 | } from '../base/htmlTracerTypes'; 16 | import { Integer } from './i18n'; 17 | 18 | declare const EPOCHS: HTMLTraceEpoch[]; 19 | 20 | const RANGE_COLORS = [ 21 | { bg: '#c1e7ff', fg: '#000' }, 22 | { bg: '#abd2ec', fg: '#000' }, 23 | { bg: '#94bed9', fg: '#000' }, 24 | { bg: '#7faac6', fg: '#000' }, 25 | { bg: '#6996b3', fg: '#fff' }, 26 | { bg: '#5383a1', fg: '#fff' }, 27 | { bg: '#3d708f', fg: '#fff' }, 28 | { bg: '#255e7e', fg: '#fff' }, 29 | ]; 30 | 31 | type ScoreField = { field: 'priority' | 'tokens'; min: number; max: number }; 32 | 33 | const Children: FunctionComponent<{ 34 | scoreBy: ScoreField; 35 | nodes: ITraceMaterializedNode[]; 36 | epoch: number; 37 | }> = ({ scoreBy, nodes, epoch }) => { 38 | if (nodes.length === 0) { 39 | return null; 40 | } 41 | 42 | let nextScoreBy = scoreBy; 43 | // priority is always scored relative to the container, while tokens are global 44 | if (scoreBy.field !== 'tokens') { 45 | let max = nodes[0][scoreBy.field]; 46 | let min = nodes[0][scoreBy.field]; 47 | for (let i = 1; i < nodes.length; i++) { 48 | max = Math.max(max, nodes[i][scoreBy.field]); 49 | min = Math.max(min, nodes[i][scoreBy.field]); 50 | } 51 | nextScoreBy = { field: scoreBy.field, max, min }; 52 | } 53 | 54 | return ( 55 |
56 | {nodes.map((child, index) => 57 | child.type === TraceMaterializedNodeType.TextChunk ? ( 58 | 59 | ) : ( 60 | 61 | ) 62 | )} 63 |
64 | ); 65 | }; 66 | 67 | const LNNodeStats: FunctionComponent<{ node: ITraceMaterializedNode }> = ({ node }) => ( 68 |
69 | Used Tokens: 70 | {' / '} 71 | Priority:{' '} 72 | {node.priority === Number.MAX_SAFE_INTEGER ? 'MAX' : } 73 |
74 | ); 75 | 76 | const LMNode: FunctionComponent< 77 | { scoreBy: ScoreField; node: ITraceMaterializedNode } & h.JSX.HTMLAttributes 78 | > = ({ scoreBy, node, children, ...attrs }) => { 79 | let step = 0; 80 | if (scoreBy.max !== scoreBy.min) { 81 | const pct = (node[scoreBy.field] - scoreBy.min) / (scoreBy.max - scoreBy.min); 82 | step = Math.round((RANGE_COLORS.length - 1) * pct); 83 | } 84 | 85 | return ( 86 |
91 | {children} 92 |
93 | ); 94 | }; 95 | 96 | const TextNode: FunctionComponent<{ 97 | scoreBy: ScoreField; 98 | node: ITraceMaterializedChatMessageTextChunk; 99 | }> = ({ scoreBy, node }) => { 100 | return ( 101 | 102 | 103 |
{node.value}
104 |
105 | ); 106 | }; 107 | 108 | const WrapperNode: FunctionComponent<{ 109 | scoreBy: ScoreField; 110 | node: ITraceMaterializedContainer | ITraceMaterializedChatMessage | ITraceMaterializedChatMessageImage; 111 | epoch: number; 112 | }> = ({ scoreBy, node, epoch }) => { 113 | const [collapsed, setCollapsed] = useState(false); 114 | const epochIndex = EPOCHS.findIndex(e => e.elements.some(e => e.id === node.id)); 115 | if (epochIndex === undefined) { 116 | throw new Error(`epoch not found for ${node.id}`); 117 | } 118 | const myEpoch = EPOCHS[epochIndex]; 119 | const thisEpoch = EPOCHS.at(epoch); 120 | const tokenBudget = myEpoch.elements.find(e => e.id === node.id)!.tokenBudget; 121 | const tag = 122 | node.type === TraceMaterializedNodeType.ChatMessage 123 | ? node.name || node.role.slice(0, 1).toUpperCase() + node.role.slice(1) + 'Message' 124 | : node.name; 125 | 126 | const className = 127 | epochIndex === epoch ? 'new-in-epoch' : epoch < epochIndex ? 'before-epoch' : ''; 128 | 129 | return ( 130 | 131 | 132 |
setCollapsed(v => !v)}> 133 | 134 | {thisEpoch?.inNode === node.id ? '🏃 ' : ''} 135 | {`<${tag}>`} 136 | 137 | {collapsed ? '[+]' : '[-]'} 138 |
139 | {epoch === epochIndex && ( 140 |
141 | Token Budget: 142 |
143 | )} 144 | {thisEpoch?.inNode === node.id && ( 145 |
146 | Rendering flexGrow={thisEpoch.flexValue} 147 |
148 |
149 | Splitting{' '} 150 | {thisEpoch.reservedTokens 151 | ? `${thisEpoch.tokenBudget} - ${thisEpoch.reservedTokens} (reserved) = ` 152 | : ''} 153 | tokens among {thisEpoch.elements.length}{' '} 154 | elements 155 |
156 | )} 157 | {!collapsed && } 158 |
159 | ); 160 | }; 161 | 162 | export const Root: FunctionComponent<{ 163 | scoreBy: 'priority' | 'tokens'; 164 | node: ITraceMaterializedContainer; 165 | epoch: number; 166 | }> = ({ scoreBy, node, epoch }) => { 167 | let score: ScoreField; 168 | if (scoreBy === 'tokens') { 169 | score = { field: 'tokens', max: node.tokens, min: 0 }; 170 | } else { 171 | score = { field: 'priority', max: node.priority, min: node.priority }; 172 | } 173 | 174 | return ; 175 | }; 176 | -------------------------------------------------------------------------------- /src/tracer/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "include": ["./"], 3 | "compilerOptions": { 4 | "module": "ESNext", 5 | "noEmit": true, 6 | "strict": true, 7 | "declaration": false, 8 | "jsx": "react", 9 | "jsxFactory": "h", 10 | "jsxFragmentFactory": "Fragment", 11 | "moduleResolution": "Bundler" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "exclude": ["build", "src/tracer"], 3 | "include": ["src"], 4 | "compilerOptions": { 5 | "module": "commonjs", 6 | "target": "ES2022", 7 | "outDir": "dist", 8 | "rootDir": "src", 9 | "strict": true, 10 | "jsx": "react", 11 | "jsxFactory": "vscpp", 12 | "declaration": true, 13 | "jsxFragmentFactory": "vscppf", 14 | "moduleResolution": "node", 15 | "types": ["node", "mocha"] 16 | } 17 | } 18 | --------------------------------------------------------------------------------