├── .nvmrc
├── .npmrc
├── examples
├── .eslintignore
├── priompt
│ ├── ArvidStory
│ │ └── example01.yaml
│ ├── examplePrompt
│ │ └── example01.yaml
│ ├── SimplePrompt
│ │ └── example01.yaml
│ ├── functionCallingPrompt
│ │ └── example01.yaml
│ └── SimpleFunction
│ │ └── example01.yaml
├── .gitignore
├── .eslintrc.json
├── .env.example
├── vitest.config.ts
├── tsconfig.json
├── README.md
├── package.json
└── src
│ ├── priompt-preview-handlers.ts
│ ├── prompt.tsx
│ ├── function-calling-prompt.tsx
│ └── index.ts
├── priompt-preview
├── src
│ ├── load_remote.ts
│ ├── vite-env.d.ts
│ ├── index.css
│ ├── lib
│ │ └── utils.ts
│ ├── main.tsx
│ ├── components
│ │ └── ui
│ │ │ ├── textarea.tsx
│ │ │ ├── button.tsx
│ │ │ ├── dialog.tsx
│ │ │ └── command.tsx
│ └── openai.ts
├── README.md
├── vite.config.d.ts
├── postcss.config.js
├── tsconfig.node.json
├── vite.config.ts
├── vite.config.js
├── .gitignore
├── components.json
├── index.html
├── .eslintrc.cjs
├── tsconfig.json
├── tailwind.config.js
├── scripts
│ └── serve.cjs
└── package.json
├── priompt
├── .gitignore
├── .eslintignore
├── README.md
├── .eslintrc.json
├── vitest.config.ts
├── src
│ ├── index.ts
│ ├── outputCatcher.ai.impl.ts
│ ├── outputCatcher.ai.ts
│ ├── tokenizer.ts
│ ├── components.tsx
│ ├── base.test.tsx
│ ├── openai.ts
│ ├── types.d.ts
│ ├── components.test.tsx
│ ├── outputCatcher.ai.test.ts
│ └── preview.ts
├── tsconfig.json
└── package.json
├── tiktoken-node
├── build.rs
├── npm
│ ├── darwin-x64
│ │ ├── README.md
│ │ └── package.json
│ ├── darwin-arm64
│ │ ├── README.md
│ │ └── package.json
│ ├── linux-x64-gnu
│ │ ├── README.md
│ │ └── package.json
│ ├── win32-x64-msvc
│ │ ├── README.md
│ │ └── package.json
│ ├── linux-arm64-gnu
│ │ ├── README.md
│ │ └── package.json
│ └── win32-arm64-msvc
│ │ ├── README.md
│ │ └── package.json
├── .npmignore
├── README.md
├── Cargo.toml
├── package.json
├── index.d.ts
├── .gitignore
├── index.js
└── src
│ └── lib.rs
├── rustfmt.toml
├── .gitignore
├── init.sh
├── .vscode
└── settings.json
├── LICENSE
├── publish.sh
├── .eslintrc.base.json
├── pull-from-open-source.sh
├── push-to-open-source.sh
├── .github
└── workflows
│ └── publish.yml
└── README.md
/.nvmrc:
--------------------------------------------------------------------------------
1 | 20.6.1
--------------------------------------------------------------------------------
/.npmrc:
--------------------------------------------------------------------------------
1 | publish-branch=publish
--------------------------------------------------------------------------------
/examples/.eslintignore:
--------------------------------------------------------------------------------
1 | vitest.config.ts
--------------------------------------------------------------------------------
/priompt-preview/src/load_remote.ts:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/priompt/.gitignore:
--------------------------------------------------------------------------------
1 | dist
2 | node_modules
--------------------------------------------------------------------------------
/examples/priompt/ArvidStory/example01.yaml:
--------------------------------------------------------------------------------
1 | {}
2 |
--------------------------------------------------------------------------------
/priompt/.eslintignore:
--------------------------------------------------------------------------------
1 | dist
2 | esbuild.ts
3 | vitest.config.ts
--------------------------------------------------------------------------------
/examples/.gitignore:
--------------------------------------------------------------------------------
1 | dist
2 | node_modules
3 | .env
4 | priompt/*/dumps/**/*
--------------------------------------------------------------------------------
/priompt-preview/src/vite-env.d.ts:
--------------------------------------------------------------------------------
1 | ///
2 |
--------------------------------------------------------------------------------
/priompt/README.md:
--------------------------------------------------------------------------------
1 | Run `pnpm build` or `pnpm build-watch` to build the library.
2 |
--------------------------------------------------------------------------------
/priompt-preview/README.md:
--------------------------------------------------------------------------------
1 | Run `pnpm build` or `pnpm build-watch` to build the library.
2 |
--------------------------------------------------------------------------------
/priompt-preview/src/index.css:
--------------------------------------------------------------------------------
1 | @tailwind base;
2 | @tailwind components;
3 | @tailwind utilities;
--------------------------------------------------------------------------------
/tiktoken-node/build.rs:
--------------------------------------------------------------------------------
1 | extern crate napi_build;
2 |
3 | fn main() {
4 | napi_build::setup();
5 | }
6 |
--------------------------------------------------------------------------------
/examples/priompt/examplePrompt/example01.yaml:
--------------------------------------------------------------------------------
1 | message: what is the advantage of rust over c
2 | name: arvid
3 |
--------------------------------------------------------------------------------
/rustfmt.toml:
--------------------------------------------------------------------------------
1 | use_small_heuristics = "Max"
2 | newline_style = "Unix"
3 | edition = "2021"
4 | tab_spaces = 2
5 |
--------------------------------------------------------------------------------
/priompt-preview/vite.config.d.ts:
--------------------------------------------------------------------------------
1 | declare const _default: import("vite").UserConfig;
2 | export default _default;
3 |
--------------------------------------------------------------------------------
/examples/priompt/SimplePrompt/example01.yaml:
--------------------------------------------------------------------------------
1 | text: Cursor är den bästa plattformen för att skriva kod.
2 | language: swahili
3 |
--------------------------------------------------------------------------------
/priompt-preview/postcss.config.js:
--------------------------------------------------------------------------------
1 | export default {
2 | plugins: {
3 | tailwindcss: {},
4 | autoprefixer: {},
5 | },
6 | }
7 |
--------------------------------------------------------------------------------
/examples/priompt/functionCallingPrompt/example01.yaml:
--------------------------------------------------------------------------------
1 | message: bad the prompt buton not work
2 | includeFunctions:
3 | - insert_sql_row
4 |
--------------------------------------------------------------------------------
/examples/.eslintrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": ["../.eslintrc.base"],
3 | "rules": {
4 | // additional rules specific to this config
5 | }
6 | }
7 |
--------------------------------------------------------------------------------
/priompt/.eslintrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": ["../.eslintrc.base"],
3 | "rules": {
4 | // additional rules specific to this config
5 | }
6 | }
7 |
--------------------------------------------------------------------------------
/tiktoken-node/npm/darwin-x64/README.md:
--------------------------------------------------------------------------------
1 | # `@anysphere/tiktoken-node-darwin-x64`
2 |
3 | This is the **x86_64-apple-darwin** binary for `@anysphere/tiktoken-node`
4 |
--------------------------------------------------------------------------------
/tiktoken-node/npm/darwin-arm64/README.md:
--------------------------------------------------------------------------------
1 | # `@anysphere/tiktoken-node-darwin-arm64`
2 |
3 | This is the **aarch64-apple-darwin** binary for `@anysphere/tiktoken-node`
4 |
--------------------------------------------------------------------------------
/tiktoken-node/npm/linux-x64-gnu/README.md:
--------------------------------------------------------------------------------
1 | # `@anysphere/tiktoken-node-linux-x64-gnu`
2 |
3 | This is the **x86_64-unknown-linux-gnu** binary for `@anysphere/tiktoken-node`
4 |
--------------------------------------------------------------------------------
/tiktoken-node/npm/win32-x64-msvc/README.md:
--------------------------------------------------------------------------------
1 | # `@anysphere/tiktoken-node-win32-x64-msvc`
2 |
3 | This is the **x86_64-pc-windows-msvc** binary for `@anysphere/tiktoken-node`
4 |
--------------------------------------------------------------------------------
/tiktoken-node/npm/linux-arm64-gnu/README.md:
--------------------------------------------------------------------------------
1 | # `@anysphere/tiktoken-node-linux-arm64-gnu`
2 |
3 | This is the **aarch64-unknown-linux-gnu** binary for `@anysphere/tiktoken-node`
4 |
--------------------------------------------------------------------------------
/tiktoken-node/npm/win32-arm64-msvc/README.md:
--------------------------------------------------------------------------------
1 | # `@anysphere/tiktoken-node-win32-arm64-msvc`
2 |
3 | This is the **aarch64-pc-windows-msvc** binary for `@anysphere/tiktoken-node`
4 |
--------------------------------------------------------------------------------
/examples/priompt/SimpleFunction/example01.yaml:
--------------------------------------------------------------------------------
1 | code: |-
2 | function x() {
3 | return z.object({
4 | a: z.string(),
5 | b: z.number(),
6 | });
7 | }
8 | error: '''z'' is not defined'
9 |
--------------------------------------------------------------------------------
/tiktoken-node/.npmignore:
--------------------------------------------------------------------------------
1 | target
2 | Cargo.lock
3 | .cargo
4 | .github
5 | npm
6 | .eslintrc
7 | .prettierignore
8 | rustfmt.toml
9 | yarn.lock
10 | *.node
11 | .yarn
12 | __test__
13 | renovate.json
14 |
--------------------------------------------------------------------------------
/priompt-preview/src/lib/utils.ts:
--------------------------------------------------------------------------------
1 | import { type ClassValue, clsx } from "clsx"
2 | import { twMerge } from "tailwind-merge"
3 |
4 | export function cn(...inputs: ClassValue[]) {
5 | return twMerge(clsx(inputs))
6 | }
7 |
--------------------------------------------------------------------------------
/examples/.env.example:
--------------------------------------------------------------------------------
1 | SERVER_PORT=8008
2 | NODE_ENV=development
3 | OPENAI_API_KEY=sk-your-openai-secret-key
4 |
5 | PRIOMPT_PREVIEW_PORT=6284
6 | PRIOMPT_PREVIEW_SERVER_PORT=$SERVER_PORT
7 | PRIOMPT_PREVIEW_OPENAI_KEY=$OPENAI_API_KEY
--------------------------------------------------------------------------------
/examples/vitest.config.ts:
--------------------------------------------------------------------------------
1 | export default {
2 | test: {
3 | include: [
4 | 'src/**/*.{test,spec}.{js,ts,jsx,tsx}',
5 | // Also include top level files
6 | 'src/*.{test,spec}.{js,ts,jsx,tsx}'
7 | ],
8 | exclude: ['build/**/*'],
9 | },
10 | };
--------------------------------------------------------------------------------
/priompt-preview/tsconfig.node.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "composite": true,
4 | "skipLibCheck": true,
5 | "module": "ESNext",
6 | "moduleResolution": "node",
7 | "allowSyntheticDefaultImports": true
8 | },
9 | "include": ["vite.config.ts"]
10 | }
11 |
--------------------------------------------------------------------------------
/priompt/vitest.config.ts:
--------------------------------------------------------------------------------
1 | export default {
2 | test: {
3 | include: [
4 | 'src/**/*.{test,spec}.{js,ts,jsx,tsx}',
5 | // Also include top level files
6 | 'src/*.{test,spec}.{js,ts,jsx,tsx}'
7 | ],
8 | exclude: ['build/**/*'],
9 | // setupFiles: ['dotenv/config']
10 | },
11 | };
--------------------------------------------------------------------------------
/tiktoken-node/README.md:
--------------------------------------------------------------------------------
1 | # @anysphere/tiktoken-node
2 |
3 | We use our own fork for now because we are making changes that may not be useful to everyone. For example, we add support for special tokens, and we also add support for running tokenization asynchronously with the computation happening on a different thread.
4 |
--------------------------------------------------------------------------------
/priompt-preview/src/main.tsx:
--------------------------------------------------------------------------------
1 | import * as React from "react";
2 | import * as ReactDOM from "react-dom/client";
3 | import './index.css';
4 | import App from "./App";
5 |
6 | ReactDOM.createRoot(document.getElementById("root") as HTMLElement).render(
7 |
8 |
9 |
10 | );
11 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | dist
2 | node_modules
3 | priompt-opensource
4 | commit.patch
5 | commit.template
6 | .wireit
7 | target/
8 | *.tsbuildinfo
9 |
10 | # todo: we should figure out a good way to sync these with the internal repo instead of having to generate them with the init script
11 | Cargo.lock
12 | Cargo.toml
13 | pnpm-workspace.yaml
14 | pnpm-lock.yaml
--------------------------------------------------------------------------------
/priompt-preview/vite.config.ts:
--------------------------------------------------------------------------------
1 | import path from "path"
2 | import react from "@vitejs/plugin-react"
3 | import { defineConfig } from "vite"
4 |
5 | // https://vitejs.dev/config/
6 | export default defineConfig({
7 | plugins: [react()],
8 | resolve: {
9 | alias: {
10 | "@": path.resolve(__dirname, "./src"),
11 | },
12 | },
13 | })
14 |
--------------------------------------------------------------------------------
/priompt-preview/vite.config.js:
--------------------------------------------------------------------------------
1 | import path from "path";
2 | import react from "@vitejs/plugin-react";
3 | import { defineConfig } from "vite";
4 | // https://vitejs.dev/config/
5 | export default defineConfig({
6 | plugins: [react()],
7 | resolve: {
8 | alias: {
9 | "@": path.resolve(__dirname, "./src"),
10 | },
11 | },
12 | });
13 |
--------------------------------------------------------------------------------
/priompt-preview/.gitignore:
--------------------------------------------------------------------------------
1 | # Logs
2 | logs
3 | *.log
4 | npm-debug.log*
5 | yarn-debug.log*
6 | yarn-error.log*
7 | pnpm-debug.log*
8 | lerna-debug.log*
9 |
10 | node_modules
11 | dist
12 | dist-ssr
13 | *.local
14 |
15 | # Editor directories and files
16 | .vscode/*
17 | !.vscode/extensions.json
18 | .idea
19 | .DS_Store
20 | *.suo
21 | *.ntvs*
22 | *.njsproj
23 | *.sln
24 | *.sw?
25 |
--------------------------------------------------------------------------------
/tiktoken-node/npm/darwin-x64/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@anysphere/tiktoken-node-darwin-x64",
3 | "version": "0.1.27",
4 | "os": [
5 | "darwin"
6 | ],
7 | "cpu": [
8 | "x64"
9 | ],
10 | "main": "tiktoken-node.darwin-x64.node",
11 | "files": [
12 | "tiktoken-node.darwin-x64.node"
13 | ],
14 | "license": "MIT",
15 | "engines": {
16 | "node": ">= 10"
17 | }
18 | }
--------------------------------------------------------------------------------
/tiktoken-node/npm/darwin-arm64/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@anysphere/tiktoken-node-darwin-arm64",
3 | "version": "0.1.27",
4 | "os": [
5 | "darwin"
6 | ],
7 | "cpu": [
8 | "arm64"
9 | ],
10 | "main": "tiktoken-node.darwin-arm64.node",
11 | "files": [
12 | "tiktoken-node.darwin-arm64.node"
13 | ],
14 | "license": "MIT",
15 | "engines": {
16 | "node": ">= 10"
17 | }
18 | }
--------------------------------------------------------------------------------
/tiktoken-node/npm/win32-x64-msvc/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@anysphere/tiktoken-node-win32-x64-msvc",
3 | "version": "0.1.27",
4 | "os": [
5 | "win32"
6 | ],
7 | "cpu": [
8 | "x64"
9 | ],
10 | "main": "tiktoken-node.win32-x64-msvc.node",
11 | "files": [
12 | "tiktoken-node.win32-x64-msvc.node"
13 | ],
14 | "license": "MIT",
15 | "engines": {
16 | "node": ">= 10"
17 | }
18 | }
--------------------------------------------------------------------------------
/priompt-preview/components.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "https://ui.shadcn.com/schema.json",
3 | "style": "new-york",
4 | "rsc": false,
5 | "tsx": true,
6 | "tailwind": {
7 | "config": "tailwind.config.js",
8 | "css": "src/index.css",
9 | "baseColor": "stone",
10 | "cssVariables": false
11 | },
12 | "aliases": {
13 | "components": "@/components",
14 | "utils": "@/lib/utils"
15 | }
16 | }
--------------------------------------------------------------------------------
/tiktoken-node/npm/win32-arm64-msvc/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@anysphere/tiktoken-node-win32-arm64-msvc",
3 | "version": "0.1.27",
4 | "os": [
5 | "win32"
6 | ],
7 | "cpu": [
8 | "arm64"
9 | ],
10 | "main": "tiktoken-node.win32-arm64-msvc.node",
11 | "files": [
12 | "tiktoken-node.win32-arm64-msvc.node"
13 | ],
14 | "license": "MIT",
15 | "engines": {
16 | "node": ">= 10"
17 | }
18 | }
--------------------------------------------------------------------------------
/priompt-preview/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | Priompt Preview
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/tiktoken-node/npm/linux-x64-gnu/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@anysphere/tiktoken-node-linux-x64-gnu",
3 | "version": "0.1.27",
4 | "os": [
5 | "linux"
6 | ],
7 | "cpu": [
8 | "x64"
9 | ],
10 | "main": "tiktoken-node.linux-x64-gnu.node",
11 | "files": [
12 | "tiktoken-node.linux-x64-gnu.node"
13 | ],
14 | "license": "MIT",
15 | "engines": {
16 | "node": ">= 10"
17 | },
18 | "libc": [
19 | "glibc"
20 | ]
21 | }
--------------------------------------------------------------------------------
/tiktoken-node/npm/linux-arm64-gnu/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@anysphere/tiktoken-node-linux-arm64-gnu",
3 | "version": "0.1.27",
4 | "os": [
5 | "linux"
6 | ],
7 | "cpu": [
8 | "arm64"
9 | ],
10 | "main": "tiktoken-node.linux-arm64-gnu.node",
11 | "files": [
12 | "tiktoken-node.linux-arm64-gnu.node"
13 | ],
14 | "license": "MIT",
15 | "engines": {
16 | "node": ">= 10"
17 | },
18 | "libc": [
19 | "glibc"
20 | ]
21 | }
--------------------------------------------------------------------------------
/examples/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "esModuleInterop": true,
4 | "outDir": "./dist",
5 | "strictNullChecks": true,
6 | "noImplicitAny": true,
7 | "declaration": true,
8 | "isolatedModules": true,
9 | "target": "es2022",
10 | "moduleResolution": "node",
11 | "jsx": "react",
12 | "jsxFactory": "Priompt.createElement",
13 | "jsxFragmentFactory": "Priompt.Fragment",
14 | "strictPropertyInitialization": true
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/priompt/src/index.ts:
--------------------------------------------------------------------------------
1 | export * from './lib';
2 |
3 | export * from './components';
4 |
5 | export { PreviewManager, dumpProps, register } from './preview';
6 | export type { PreviewManagerGetPromptQuery, PreviewManagerLiveModeQuery, PreviewManagerLiveModeResultQuery, PreviewConfig, SynchronousPreviewConfig } from './preview';
7 |
8 | export type { RenderOptions, RenderOutput, JSX, RenderedPrompt, Prompt, PromptElement, BaseProps, PromptProps, ChatAndFunctionPromptFunction, ChatPrompt } from './types';
9 |
--------------------------------------------------------------------------------
/init.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
4 |
5 | echo '{
6 | "packages": ["priompt", "priompt-preview", "examples", "tiktoken-node"]
7 | }' > "$SCRIPT_DIR"/pnpm-workspace.yaml
8 |
9 | echo '[workspace]
10 | members = ["tiktoken-node"]
11 | ' > "$SCRIPT_DIR"/Cargo.toml
12 |
13 | # copy over the examples/.env.example to examples/.env
14 | cp -f "$SCRIPT_DIR"/examples/.env.example "$SCRIPT_DIR"/examples/.env
15 |
16 | pnpm i -r
17 |
--------------------------------------------------------------------------------
/priompt-preview/.eslintrc.cjs:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | env: { browser: true, es2020: true, node: true },
3 | extends: [
4 | 'eslint:recommended',
5 | 'plugin:@typescript-eslint/recommended',
6 | 'plugin:react-hooks/recommended',
7 | ],
8 | parser: '@typescript-eslint/parser',
9 | parserOptions: { ecmaVersion: 'latest', sourceType: 'module' },
10 | plugins: ['react-refresh'],
11 | rules: {
12 | 'react-refresh/only-export-components': 'warn',
13 | // disable unused vars check
14 | '@typescript-eslint/no-unused-vars': 'off',
15 | '@typescript-eslint/no-empty-interface': 'off',
16 | '@typescript-eslint/no-inferrable-types': 'off',
17 | },
18 | }
19 |
--------------------------------------------------------------------------------
/priompt-preview/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "target": "ESNext",
4 | "lib": ["DOM", "DOM.Iterable", "ESNext"],
5 | "module": "ESNext",
6 | "skipLibCheck": true,
7 |
8 | /* Bundler mode */
9 | "moduleResolution": "node",
10 | "resolveJsonModule": true,
11 | "isolatedModules": true,
12 | "noEmit": true,
13 | "jsx": "react-jsx",
14 | "incremental": true,
15 |
16 | /* Linting */
17 | "strict": true,
18 | "noUnusedLocals": true,
19 | "noUnusedParameters": true,
20 | "noFallthroughCasesInSwitch": true,
21 | "baseUrl": ".",
22 | "paths": {
23 | "@/*": ["./src/*"]
24 | }
25 | },
26 | "include": ["src"],
27 | "references": [{ "path": "./tsconfig.node.json" }]
28 | }
29 |
--------------------------------------------------------------------------------
/tiktoken-node/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | edition = "2021"
3 | name = "anysphere_tiktoken-node"
4 | version = "0.0.1"
5 |
6 | [lib]
7 | crate-type = ["cdylib"]
8 |
9 | [dependencies]
10 | # Default enable napi4 feature, see https://nodejs.org/api/n-api.html#node-api-version-matrix
11 | napi = { version = "2.12.2", default-features = false, features = [
12 | "napi4",
13 | "async",
14 | ] }
15 | napi-derive = "2.12.2"
16 | tiktoken = { git = "https://github.com/anysphere/tiktoken-rs", rev = "6b66cac8738428a4c695d7479d881aaddaf926dd" }
17 | rayon = "1.7.0"
18 | anyhow = "1.0.69"
19 | tokio = { version = "1.13.0", features = [
20 | "rt-multi-thread",
21 | "sync",
22 | "rt",
23 | "macros",
24 | ] }
25 |
26 | [build-dependencies]
27 | napi-build = "2.0.1"
28 |
29 | [profile.release]
30 | lto = true
31 |
--------------------------------------------------------------------------------
/examples/README.md:
--------------------------------------------------------------------------------
1 | # Priompt examples
2 |
3 | An example showing how to use `priompt` and `priompt-preview`; somewhat useful for testing random prompts.
4 |
5 | This example uses `fastify` for the server, but any server library or framework should work
6 |
7 | ## Running
8 |
9 | First run:
10 |
11 | ```bash
12 | cd .. && ./init.sh
13 | ```
14 |
15 | Then configure your OpenAI key in `.env`.
16 |
17 | In one terminal:
18 |
19 | ```bash
20 | pnpm priompt
21 | ```
22 |
23 | In another:
24 |
25 | ```bash
26 | pnpm watch
27 | ```
28 |
29 | In a third:
30 |
31 | ```bash
32 | curl 'localhost:8008/message?message=what%20is%20the%20advantage%20of%20rust%20over%20c&name=a%20curious%20explorer'
33 | ```
34 |
35 | You should get a response within a few seconds.
36 |
37 | Go to [localhost:6284](http://localhost:6284) to see the prompt in the priompt preview.
38 |
--------------------------------------------------------------------------------
/priompt/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "esModuleInterop": true,
4 | "outDir": "./dist",
5 | "strictNullChecks": true,
6 | "noImplicitAny": true,
7 | "declaration": true,
8 | "target": "ES2019",
9 | "module": "NodeNext",
10 | // we need this because vitest 1 requires nodenext, and vitest 0.33 and vitest 1 cannot coexist
11 | "moduleResolution": "nodenext",
12 | "jsx": "react",
13 | "jsxFactory": "Priompt.createElement",
14 | "jsxFragmentFactory": "Priompt.Fragment",
15 | "sourceMap": true,
16 | "inlineSources": true,
17 | // we need this to fix this weird vitest problem: https://github.com/vitejs/vite/issues/11552
18 | "skipLibCheck": true,
19 | "strictPropertyInitialization": true,
20 | "declarationMap": true
21 | },
22 | "include": ["./src/**/*.ts", "./src/**/*.tsx"]
23 | }
24 |
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "eslint.workingDirectories": [
3 | {
4 | "directory": "examples",
5 | "changeProcessCWD": true
6 | },
7 | {
8 | "directory": "priompt",
9 | "changeProcessCWD": true
10 | },
11 | {
12 | "directory": "priompt-preview",
13 | "changeProcessCWD": true
14 | }
15 | ],
16 | "[javascript]": {
17 | "editor.defaultFormatter": "vscode.typescript-language-features",
18 | "editor.insertSpaces": false,
19 | "editor.formatOnSave": true
20 | },
21 | "[typescript]": {
22 | "editor.defaultFormatter": "vscode.typescript-language-features",
23 | "editor.insertSpaces": false,
24 | "editor.formatOnSave": true
25 | },
26 | "[jsonc]": {
27 | "editor.defaultFormatter": "esbenp.prettier-vscode",
28 | "editor.formatOnSave": true
29 | },
30 | "files.associations": {
31 | "*.env*": "properties"
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/priompt-preview/src/components/ui/textarea.tsx:
--------------------------------------------------------------------------------
1 | import * as React from "react";
2 |
3 | import { cn } from "@/lib/utils";
4 |
5 | export interface TextareaProps
6 | extends React.TextareaHTMLAttributes {}
7 |
8 | const Textarea = React.forwardRef(
9 | ({ className, ...props }, ref) => {
10 | return (
11 |
19 | );
20 | }
21 | );
22 | Textarea.displayName = "Textarea";
23 |
24 | export { Textarea };
25 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright 2023 Anysphere, Inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
6 |
7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8 |
9 | THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
10 |
--------------------------------------------------------------------------------
/priompt/src/outputCatcher.ai.impl.ts:
--------------------------------------------------------------------------------
1 | import { OutputCatcher } from './outputCatcher.ai';
2 |
3 | export class OutputCatcherImpl implements OutputCatcher {
4 | private outputs: { output: T, priority: number | null }[] = [];
5 | private noPriorityOutputs: { output: T, priority: null }[] = [];
6 |
7 | async onOutput(output: T, options?: { p?: number }): Promise {
8 | if (options?.p !== undefined) {
9 | this.outputs.push({ output, priority: options.p });
10 | this.outputs.sort((a, b) => (b.priority as number) - (a.priority as number));
11 | } else {
12 | this.noPriorityOutputs.push({ output, priority: null });
13 | }
14 | }
15 |
16 | getOutputs(): T[] {
17 | return [...this.outputs, ...this.noPriorityOutputs].map(o => o.output);
18 | }
19 |
20 | getOutput(): T | undefined {
21 | return this.outputs.length > 0 ? this.outputs[0].output : this.noPriorityOutputs.length > 0 ? this.noPriorityOutputs[0].output : undefined;
22 | }
23 | }
24 | // @cursor-agent {"dependsOn": "interface", "hash": "7034e4452cc668449b0b967116683a95303c4509d263ed535851b081164751bb"}
25 |
--------------------------------------------------------------------------------
/priompt-preview/tailwind.config.js:
--------------------------------------------------------------------------------
1 | /** @type {import('tailwindcss').Config} */
2 | module.exports = {
3 | corePlugins: {
4 | preflight: false,
5 | },
6 | darkMode: ["class"],
7 | content: [
8 | './pages/**/*.{ts,tsx}',
9 | './components/**/*.{ts,tsx}',
10 | './app/**/*.{ts,tsx}',
11 | './src/**/*.{ts,tsx}',
12 | ],
13 | theme: {
14 | container: {
15 | center: true,
16 | padding: "2rem",
17 | screens: {
18 | "2xl": "1400px",
19 | },
20 | },
21 | extend: {
22 | keyframes: {
23 | "accordion-down": {
24 | from: { height: 0 },
25 | to: { height: "var(--radix-accordion-content-height)" },
26 | },
27 | "accordion-up": {
28 | from: { height: "var(--radix-accordion-content-height)" },
29 | to: { height: 0 },
30 | },
31 | },
32 | animation: {
33 | "accordion-down": "accordion-down 0.2s ease-out",
34 | "accordion-up": "accordion-up 0.2s ease-out",
35 | },
36 | },
37 | },
38 | plugins: [require("tailwindcss-animate")],
39 | }
--------------------------------------------------------------------------------
/priompt/src/outputCatcher.ai.ts:
--------------------------------------------------------------------------------
1 | import { OutputCatcherImpl } from './outputCatcher.ai.impl';
2 | export interface OutputCatcher {
3 | // p is a priority
4 | onOutput(output: T, options?: { p?: number }): Promise;
5 |
6 | // get a sorted list of the outputs, with the highest priority first
7 | // then come all the ones with no priority assigned, in the order they were added
8 | getOutputs(): T[];
9 |
10 | // get the first output
11 | getOutput(): T | undefined;
12 | }
13 |
14 | // @cursor-agent {"dependsOn": "interface", "hash": "7034e4452cc668449b0b967116683a95303c4509d263ed535851b081164751bb"}
15 | // @cursor-agent {"dependsOn": "implementation", "hash": "083f9244af4f56b541391df75e2a6bfe7e352f5ee6ed3ffe2eabd36dc06cdcf8"}
16 | export function NewOutputCatcher(): OutputCatcher {
17 | return new OutputCatcherImpl();
18 | }
19 |
20 | // @cursor-agent {"dependsOn": "interface", "hash": "7034e4452cc668449b0b967116683a95303c4509d263ed535851b081164751bb"}
21 | // @cursor-agent {"passedInitialVerification": true}
22 |
23 |
24 | // @cursor-agent {"dependsOn": "allFiles", "hash": "bd574f28bd5ebd5a493044d7bdaf54e43529f4b849ae540d9a5df45b9ad44ad1"}
25 | // @cursor-agent {"passedAllTests": true}
26 |
--------------------------------------------------------------------------------
/publish.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
6 | cd "$SCRIPT_DIR"
7 |
8 | if [[ -n $(git status --porcelain) ]]; then
9 | echo -e "${RED}Your git state is not empty. Aborting the script...${NC}"
10 | exit 1
11 | fi
12 |
13 | # Check if a version bumping flag is provided
14 | if [ $# -ne 1 ]; then
15 | echo "Error: Version bumping flag (patch, minor, or major) is required."
16 | exit 1
17 | fi
18 |
19 | # Validate the version bumping flag
20 | case $1 in
21 | patch|minor|major)
22 | ;;
23 | *)
24 | echo "Error: Invalid version bumping flag. Use patch, minor, or major."
25 | exit 1
26 | ;;
27 | esac
28 | # Change to the priompt directory, increment the version, and publish the package
29 | cd $SCRIPT_DIR/priompt
30 | npm version $1
31 | cd $SCRIPT_DIR/priompt-preview
32 | npm version $1
33 | cd $SCRIPT_DIR/tiktoken-node
34 | npm version $1
35 |
36 | git commit -am "update version"
37 |
38 | git push
39 |
40 | CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD)
41 | DEPLOY_BRANCH=publish
42 | git branch -D $DEPLOY_BRANCH || true
43 | git checkout -b $DEPLOY_BRANCH
44 | git push origin $DEPLOY_BRANCH -f
45 | git checkout $CURRENT_BRANCH
--------------------------------------------------------------------------------
/tiktoken-node/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@anysphere/tiktoken-node",
3 | "version": "0.1.27",
4 | "main": "index.js",
5 | "types": "index.d.ts",
6 | "napi": {
7 | "name": "tiktoken-node",
8 | "triples": {
9 | "additional": [
10 | "aarch64-apple-darwin",
11 | "aarch64-pc-windows-msvc",
12 | "aarch64-unknown-linux-gnu"
13 | ]
14 | }
15 | },
16 | "devDependencies": {
17 | "@napi-rs/cli": "^2.16.2",
18 | "ava": "^5.1.1",
19 | "wireit": "^0.14.0"
20 | },
21 | "engines": {
22 | "npm": "please-use-pnpm",
23 | "yarn": "please-use-pnpm",
24 | "pnpm": "^8.6.0",
25 | "node": ">= 18.15.0"
26 | },
27 | "scripts": {
28 | "artifacts": "napi artifacts",
29 | "build": "wireit",
30 | "build:debug": "napi build --platform",
31 | "prepublishOnly": "napi prepublish -t npm --skip-gh-release",
32 | "universal": "napi universal",
33 | "version": "napi version"
34 | },
35 | "wireit": {
36 | "build": {
37 | "command": "napi build --platform --release",
38 | "files": [
39 | "src/**",
40 | "Cargo.toml",
41 | "Cargo.lock",
42 | "build.rs"
43 | ],
44 | "output": [
45 | "tiktoken-node.*.node",
46 | "index.js",
47 | "index.d.ts"
48 | ]
49 | }
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/examples/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "priompt-examples",
3 | "license": "MIT",
4 | "engines": {
5 | "npm": "please-use-pnpm",
6 | "yarn": "please-use-pnpm",
7 | "pnpm": ">= 8.3.0 < 9",
8 | "node": ">= 20.6.0"
9 | },
10 | "scripts": {
11 | "watch": "npm-run-all -p watch-src watch-priompt watch-priompt-preview",
12 | "watch-src": "sleep 3 && dotenv -e .env tsx watch --clear-screen=false src",
13 | "watch-priompt": "cd ../priompt && pnpm build-watch",
14 | "watch-priompt-preview": "cd ../priompt-preview && pnpm build-watch",
15 | "lint": "tsc --noEmit && eslint .",
16 | "test": "vitest",
17 | "coverage": "vitest run --coverage",
18 | "priompt": "dotenv -e .env pnpm npx @anysphere/priompt-preview serve"
19 | },
20 | "devDependencies": {
21 | "@anysphere/priompt-preview": "workspace:*",
22 | "@types/node": "^20.6.0",
23 | "@typescript-eslint/eslint-plugin": "^5.59.0",
24 | "@typescript-eslint/parser": "^5.59.0",
25 | "@vitest/coverage-v8": "^1.2.2",
26 | "eslint": "^8.38.0",
27 | "npm-run-all": "^4.1.5",
28 | "tsx": "^3.12.6",
29 | "typescript": "^5.2.0",
30 | "vitest": "^1.2.2"
31 | },
32 | "dependencies": {
33 | "@anysphere/priompt": "workspace:*",
34 | "@fastify/cors": "^8.3.0",
35 | "dotenv": "^16.1.4",
36 | "dotenv-cli": "^7.2.1",
37 | "fastify": "^4.17.0",
38 | "openai-v4": "npm:openai@4.0.0-beta.6",
39 | "openai": "^3.3.0",
40 | "zod": "^3.21.4"
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/tiktoken-node/index.d.ts:
--------------------------------------------------------------------------------
1 | /* tslint:disable */
2 | /* eslint-disable */
3 |
4 | /* auto-generated by NAPI-RS */
5 |
6 | export const enum SupportedEncoding {
7 | Cl100k = 0
8 | }
9 | export const enum SpecialTokenAction {
10 | /** The special token is forbidden. If it is included in the string, an error will be returned. */
11 | Forbidden = 0,
12 | /** The special token is tokenized as normal text. */
13 | NormalText = 1,
14 | /** The special token is treated as the special token it is. If this is applied to a specific text and the text is NOT a special token then an error will be returned. If it is the default action no error will be returned, don't worry. */
15 | Special = 2
16 | }
17 | export function getTokenizer(): Tokenizer
18 | export class Tokenizer {
19 | exactNumTokensCl100KNoSpecialTokens(text: string): Promise
20 | exactNumTokens(text: string, encoding: SupportedEncoding, specialTokenDefaultAction: SpecialTokenAction, specialTokenOverrides: Record): Promise
21 | encodeCl100KNoSpecialTokens(text: string): Promise>
22 | approxNumTokens(text: string, encoding: SupportedEncoding): Promise
23 | encode(text: string, encoding: SupportedEncoding, specialTokenDefaultAction: SpecialTokenAction, specialTokenOverrides: Record): Promise>
24 | encodeSingleToken(bytes: Uint8Array, encoding: SupportedEncoding): Promise
25 | decodeCl100KByte(token: number): Promise
26 | decodeCl100K(encodedTokens: Array): Promise
27 | }
28 | export class SyncTokenizer {
29 | constructor()
30 | approxNumTokens(text: string, encoding: SupportedEncoding): number
31 | }
32 |
--------------------------------------------------------------------------------
/examples/src/priompt-preview-handlers.ts:
--------------------------------------------------------------------------------
1 | import { FastifyInstance } from 'fastify';
2 | import { PreviewManager, PreviewManagerGetPromptQuery, PreviewManagerLiveModeQuery, PreviewManagerLiveModeResultQuery } from '@anysphere/priompt';
3 | import { PreviewManagerGetPromptOutputQuery } from '@anysphere/priompt/dist/preview';
4 |
5 |
6 | export async function handlePriomptPreview(S: FastifyInstance) {
7 | S.get("/priompt/getPreviews", async (_, reply) => {
8 | return reply.type("text/json").send(JSON.stringify(PreviewManager.getPreviews()));
9 | });
10 |
11 | S.get('/priompt/getPrompt', async (request, reply) => {
12 | const query = request.query as PreviewManagerGetPromptQuery;
13 | return reply.type("text/json").send(JSON.stringify(await PreviewManager.getPrompt(query)));
14 | });
15 |
16 | S.get('/priompt/getPromptOutput', async (request, reply) => {
17 | // eslint-disable-next-line @typescript-eslint/no-explicit-any
18 | const newQ = request.query as any;
19 | const query = JSON.parse(newQ.v) as PreviewManagerGetPromptOutputQuery;
20 | const stringified = JSON.stringify(await PreviewManager.getPromptOutput(query));
21 | return reply.type("text/json").send(stringified);
22 | });
23 |
24 | S.get('/priompt/liveMode', async (request, reply) => {
25 | const query = request.query as PreviewManagerLiveModeQuery;
26 |
27 | try {
28 | const output = await PreviewManager.liveMode(query)
29 | await reply.type("text/json").send(JSON.stringify(output));
30 | } catch (error) {
31 | if (error.name === 'AbortError') {
32 | return reply.status(500).send({ error: 'Request aborted' });
33 | } else {
34 | throw error;
35 | }
36 | }
37 | });
38 | S.get("/priompt/liveModeResult", (request, reply) => {
39 | const query = request.query as PreviewManagerLiveModeResultQuery;
40 | PreviewManager.liveModeResult(query);
41 | return reply.type("text/json").send(JSON.stringify({}));
42 | });
43 |
44 | }
--------------------------------------------------------------------------------
/.eslintrc.base.json:
--------------------------------------------------------------------------------
1 | {
2 | "env": {
3 | "es2021": true,
4 | "node": true
5 | },
6 | "extends": [
7 | "eslint:recommended",
8 | "plugin:@typescript-eslint/recommended"
9 | ],
10 | "rules": {
11 | "@typescript-eslint/require-array-sort-compare": "error",
12 | "@typescript-eslint/strict-boolean-expressions": ["error"],
13 | "@typescript-eslint/no-floating-promises": ["error", {"ignoreVoid": false}],
14 | "@typescript-eslint/await-thenable": "error",
15 | "@typescript-eslint/no-misused-promises": "error",
16 | "constructor-super": "error",
17 | "eqeqeq": "error",
18 | "@typescript-eslint/switch-exhaustiveness-check": "error",
19 | "@typescript-eslint/no-inferrable-types": "off",
20 | "no-buffer-constructor": "error",
21 | "no-caller": "error",
22 | "no-case-declarations": "error",
23 | "no-debugger": "error",
24 | "no-duplicate-case": "error",
25 | "no-eval": "error",
26 | "no-async-promise-executor": "error",
27 | "no-extra-semi": "error",
28 | "sonarjs/no-ignored-return": "error",
29 | "no-new-wrappers": "error",
30 | "no-redeclare": "off",
31 | "no-sparse-arrays": "error",
32 | "@typescript-eslint/no-unused-vars": "off",
33 | "@typescript-eslint/no-unused-expressions": "error",
34 | "@typescript-eslint/no-empty-function": "off",
35 | "no-throw-literal": "error",
36 | "no-constant-condition": "off",
37 | "no-unsafe-finally": "error",
38 | "no-unused-labels": "error",
39 | "no-restricted-globals": [
40 | "warn",
41 | "name",
42 | "length",
43 | "event",
44 | "closed",
45 | "external",
46 | "status",
47 | "origin",
48 | "orientation",
49 | "context"
50 | ],
51 | "no-var": "error",
52 | "semi": "off",
53 | "@typescript-eslint/naming-convention": [
54 | "error",
55 | {
56 | "selector": "class",
57 | "format": ["PascalCase"]
58 | }
59 | ]
60 | },
61 | "overrides": [],
62 | "parser": "@typescript-eslint/parser",
63 | "parserOptions": {
64 | "ecmaVersion": "latest",
65 | "sourceType": "module",
66 | "project": "./tsconfig.json",
67 | "extraFileExtensions": ["ipynb"]
68 | },
69 | "ignorePatterns": ["node_modules/"],
70 | "plugins": ["@typescript-eslint", "sonarjs"]
71 | }
72 |
--------------------------------------------------------------------------------
/priompt/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@anysphere/priompt",
3 | "license": "MIT",
4 | "version": "0.1.27",
5 | "description": "A JSX-based prompt design library.",
6 | "keywords": [
7 | "prompting",
8 | "prompt design",
9 | "prompt engineering"
10 | ],
11 | "repository": {
12 | "type": "git",
13 | "url": "https://github.com/anysphere/priompt"
14 | },
15 | "homepage": "https://github.com/anysphere/priompt",
16 | "author": "Arvid Lunnemark",
17 | "engines": {
18 | "npm": "please-use-pnpm",
19 | "yarn": "please-use-pnpm",
20 | "pnpm": ">= 8.3.0 < 9",
21 | "node": ">= 18.15.0"
22 | },
23 | "main": "./dist/index.js",
24 | "types": "./dist/index.d.ts",
25 | "scripts": {
26 | "watch": "tsx watch --clear-screen=false src",
27 | "build": "wireit",
28 | "build-watch": "nodemon --watch 'src/**/*' --ext '*' --exec 'pnpm build'",
29 | "lint": "tsc --noEmit && eslint .",
30 | "test": "wireit",
31 | "test:nowatch": "vitest run",
32 | "coverage": "vitest run --coverage"
33 | },
34 | "wireit": {
35 | "build": {
36 | "command": "tsc --build --pretty && cp src/types.d.ts dist/types.d.ts",
37 | "files": [
38 | "src/**/*",
39 | "tsconfig.json"
40 | ],
41 | "output": [
42 | "dist/**/*"
43 | ],
44 | "dependencies": [
45 | "../tiktoken-node:build"
46 | ]
47 | },
48 | "test": {
49 | "command": "vitest",
50 | "dependencies": [
51 | "build"
52 | ]
53 | }
54 | },
55 | "devDependencies": {
56 | "@types/js-yaml": "^4.0.5",
57 | "@types/json-schema": "^7.0.12",
58 | "@types/node": "^20.6.0",
59 | "@typescript-eslint/eslint-plugin": "^5.59.0",
60 | "@typescript-eslint/parser": "^5.59.0",
61 | "@vitest/coverage-v8": "^1.2.2",
62 | "esbuild": "^0.18.20",
63 | "eslint": "^8.38.0",
64 | "nodemon": "^2.0.22",
65 | "npm-run-all": "^4.1.5",
66 | "rimraf": "^5.0.0",
67 | "tiny-glob": "^0.2.9",
68 | "tsx": "^3.12.6",
69 | "typescript": "^5.2.0",
70 | "vitest": "^1.2.2",
71 | "vite": "^5.0.12",
72 | "wireit": "^0.14.0"
73 | },
74 | "dependencies": {
75 | "js-yaml": "https://github.com/anysphere/js-yaml.git#4761daebc257cf86e64bb775ba00696f30d7ff22",
76 | "openai": "^3.3.0",
77 | "@anysphere/tiktoken-node": "workspace:*",
78 | "zod": "^3.21.4",
79 | "zod-to-json-schema": "^3.21.3"
80 | }
81 | }
82 |
--------------------------------------------------------------------------------
/priompt-preview/src/components/ui/button.tsx:
--------------------------------------------------------------------------------
1 | import * as React from "react";
2 | import { Slot } from "@radix-ui/react-slot";
3 | import { cva, type VariantProps } from "class-variance-authority";
4 |
5 | import { cn } from "@/lib/utils";
6 |
7 | const buttonVariants = cva(
8 | "inline-flex items-center justify-center rounded-md text-sm font-medium transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-stone-400 disabled:pointer-events-none disabled:opacity-50 dark:focus-visible:ring-stone-800",
9 | {
10 | variants: {
11 | variant: {
12 | default:
13 | "bg-stone-900 text-stone-50 shadow hover:bg-stone-900/90 dark:bg-stone-50 dark:text-stone-900 dark:hover:bg-stone-50/90",
14 | destructive:
15 | "bg-red-500 text-stone-50 shadow-sm hover:bg-red-500/90 dark:bg-red-900 dark:text-red-50 dark:hover:bg-red-900/90",
16 | outline:
17 | "border border-stone-200 bg-white shadow-sm hover:bg-stone-100 hover:text-stone-900 dark:border-stone-800 dark:bg-stone-950 dark:hover:bg-stone-800 dark:hover:text-stone-50",
18 | secondary:
19 | "bg-stone-100 text-stone-900 shadow-sm hover:bg-stone-100/80 dark:bg-stone-800 dark:text-stone-50 dark:hover:bg-stone-800/80",
20 | ghost:
21 | "hover:bg-stone-100 hover:text-stone-900 dark:hover:bg-stone-800 dark:hover:text-stone-50",
22 | link: "text-stone-900 underline-offset-4 hover:underline dark:text-stone-50",
23 | },
24 | size: {
25 | default: "h-9 px-1 py-1",
26 | sm: "h-8 rounded-md px-1 text-xs",
27 | lg: "h-10 rounded-md px-1",
28 | icon: "h-9 w-9",
29 | },
30 | },
31 | defaultVariants: {
32 | variant: "default",
33 | size: "default",
34 | },
35 | }
36 | );
37 |
38 | export interface ButtonProps
39 | extends React.ButtonHTMLAttributes,
40 | VariantProps {
41 | asChild?: boolean;
42 | }
43 |
44 | const Button = React.forwardRef(
45 | ({ className, variant, size, asChild = false, ...props }, ref) => {
46 | const Comp = asChild ? Slot : "button";
47 | return (
48 |
53 | );
54 | }
55 | );
56 | Button.displayName = "Button";
57 |
58 | // eslint-disable-next-line react-refresh/only-export-components
59 | export { Button, buttonVariants };
60 |
--------------------------------------------------------------------------------
/pull-from-open-source.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e
4 |
5 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
6 |
7 | if [[ -n $(git status --porcelain) ]]; then
8 | echo -e "${RED}Your git state is not empty. Aborting the script...${NC}"
9 | exit 1
10 | fi
11 |
12 | # make sure we are on main, otherwise print warning
13 | if [[ $(git branch --show-current) != "main" ]]; then
14 | echo "WARNING: You are not on main branch, please switch to main branch before running this script."
15 | exit 1
16 | fi
17 |
18 | if [[ ! -d "$SCRIPT_DIR/priompt-opensource" ]]; then
19 | git clone git@github.com:anysphere/priompt "$SCRIPT_DIR/priompt-opensource"
20 | fi
21 |
22 | cd "$SCRIPT_DIR/priompt-opensource"
23 | git checkout main
24 | git checkout -- . || true
25 | git restore --staged . || true
26 | git checkout -- . || true
27 | git clean -fd . || true
28 | git pull
29 | if [[ -n $(git status --porcelain) ]]; then
30 | echo -e "${RED}Your git state inside priompt-opensource is not empty. Aborting the script...${NC}"
31 | exit 1
32 | fi
33 |
34 | LAST_SYNCED_COMMIT=$(cat "$SCRIPT_DIR/../priompt-last-open-source-synced-commit.txt")
35 | echo "LAST_SYNCED_COMMIT: $LAST_SYNCED_COMMIT"
36 | COMMIT_IDS=$(git rev-list --reverse HEAD...$LAST_SYNCED_COMMIT)
37 |
38 | echo "Commit IDs:"
39 | echo $COMMIT_IDS
40 |
41 | for COMMIT_ID in $COMMIT_IDS
42 | do
43 | cd "$SCRIPT_DIR/priompt-opensource"
44 | git show $COMMIT_ID > "$SCRIPT_DIR/commit.patch"
45 | sd 'a/' 'a/' "$SCRIPT_DIR/commit.patch"
46 | sd 'b/' 'b/' "$SCRIPT_DIR/commit.patch"
47 | cd "$SCRIPT_DIR/../../.."
48 | git apply "$SCRIPT_DIR/commit.patch"
49 | git add .
50 | COMMIT_MSG=$(cd $SCRIPT_DIR/priompt-opensource && git log -1 --pretty=%B $COMMIT_ID | tr -d '\r')
51 | echo "$COMMIT_MSG" > "$SCRIPT_DIR/commit.template"
52 | echo -e "\n\n" >> "$SCRIPT_DIR/commit.template"
53 | COMMIT_AUTHOR=$(cd $SCRIPT_DIR/priompt-opensource && git log -1 --pretty=%an $COMMIT_ID)
54 | COMMIT_EMAIL=$(cd $SCRIPT_DIR/priompt-opensource && git log -1 --pretty=%ae $COMMIT_ID)
55 | echo "Co-authored-by: $COMMIT_AUTHOR <$COMMIT_EMAIL>" >> "$SCRIPT_DIR/commit.template"
56 | echo -e "\n\n" >> "$SCRIPT_DIR/commit.template"
57 | FULL_COMMIT=$(cd $SCRIPT_DIR && cat "$SCRIPT_DIR/commit.patch")
58 | echo "$FULL_COMMIT" | while IFS= read -r line
59 | do
60 | echo -e "# $line" >> "$SCRIPT_DIR/commit.template"
61 | done
62 | git commit --template="$SCRIPT_DIR/commit.template"
63 | COMMIT_ID_MAIN=$(git rev-parse HEAD)
64 | echo "$COMMIT_ID_MAIN" > "$SCRIPT_DIR/../priompt-last-internal-synced-commit.txt"
65 | echo "$COMMIT_ID" > "$SCRIPT_DIR/../priompt-last-open-source-synced-commit.txt"
66 | done
67 |
68 | echo "DONE! Now please push inside the main repo."
69 |
70 |
--------------------------------------------------------------------------------
/push-to-open-source.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e
4 |
5 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
6 |
7 | if [[ -n $(git status --porcelain) ]]; then
8 | echo -e "${RED}Your git state is not empty. Aborting the script...${NC}"
9 | exit 1
10 | fi
11 |
12 | # make sure we are on main, otherwise print warning
13 | if [[ $(git branch --show-current) != "main" ]]; then
14 | echo "WARNING: You are not on main branch, please switch to main branch before running this script."
15 | exit 1
16 | fi
17 |
18 | # copy over the eslintrc.base.json
19 | cp -f "$SCRIPT_DIR"/../../.eslintrc.base.json "$SCRIPT_DIR"/.eslintrc.base.json
20 | if [[ -n $(git status --porcelain) ]]; then
21 | git add .
22 | git commit -m "update the eslintrc.base.json"
23 | fi
24 |
25 | if [[ ! -d "$SCRIPT_DIR/priompt-opensource" ]]; then
26 | git clone git@github.com:anysphere/priompt "$SCRIPT_DIR/priompt-opensource"
27 | fi
28 |
29 | cd "$SCRIPT_DIR/priompt-opensource"
30 | git checkout main
31 | git checkout -- . || true
32 | git restore --staged . || true
33 | git checkout -- . || true
34 | git clean -fd . || true
35 | # git pull
36 | if [[ -n $(git status --porcelain) ]]; then
37 | echo -e "${RED}Your git state inside priompt-opensource is not empty. Aborting the script...${NC}"
38 | exit 1
39 | fi
40 | cd "$SCRIPT_DIR"
41 |
42 | cd "$SCRIPT_DIR/../../.."
43 |
44 | LAST_SYNCED_COMMIT=$(cat "$SCRIPT_DIR/../priompt-last-internal-synced-commit.txt")
45 | echo "LAST_SYNCED_COMMIT: $LAST_SYNCED_COMMIT"
46 | COMMIT_IDS=$(git rev-list --reverse HEAD...$LAST_SYNCED_COMMIT -- "backend/packages/priompt")
47 |
48 |
49 | echo "Commit IDs:"
50 | echo $COMMIT_IDS
51 |
52 | for COMMIT_ID in $COMMIT_IDS
53 | do
54 | git show $COMMIT_ID -- "backend/packages/priompt" > "$SCRIPT_DIR/commit.patch"
55 | sd '' '' "$SCRIPT_DIR/commit.patch"
56 | cd "$SCRIPT_DIR/priompt-opensource"
57 | git apply "$SCRIPT_DIR/commit.patch"
58 | git add .
59 | COMMIT_MSG=$(cd $SCRIPT_DIR && git log -1 --pretty=%B $COMMIT_ID | tr -d '\r')
60 | echo "$COMMIT_MSG" > "$SCRIPT_DIR/commit.template"
61 | echo -e "\n\n" >> "$SCRIPT_DIR/commit.template"
62 | COMMIT_AUTHOR=$(cd $SCRIPT_DIR && git log -1 --pretty=%an $COMMIT_ID)
63 | COMMIT_EMAIL=$(cd $SCRIPT_DIR && git log -1 --pretty=%ae $COMMIT_ID)
64 | echo "Co-authored-by: $COMMIT_AUTHOR <$COMMIT_EMAIL>" >> "$SCRIPT_DIR/commit.template"
65 | echo -e "\n\n" >> "$SCRIPT_DIR/commit.template"
66 | FULL_COMMIT=$(cd $SCRIPT_DIR && cat "$SCRIPT_DIR/commit.patch")
67 | echo "$FULL_COMMIT" | while IFS= read -r line
68 | do
69 | echo -e "# $line" >> "$SCRIPT_DIR/commit.template"
70 | done
71 | git commit --template="$SCRIPT_DIR/commit.template"
72 | COMMIT_ID_OPENSOURCE=$(git rev-parse HEAD)
73 | cd -
74 | echo "$COMMIT_ID_OPENSOURCE" > "$SCRIPT_DIR/../priompt-last-open-source-synced-commit.txt"
75 | echo "$COMMIT_ID" > "$SCRIPT_DIR/../priompt-last-internal-synced-commit.txt"
76 | done
77 |
78 | echo "DONE! Now please push inside the open source folder."
79 |
--------------------------------------------------------------------------------
/priompt-preview/scripts/serve.cjs:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 | /* eslint-disable no-undef */
3 | /* eslint-disable @typescript-eslint/no-var-requires */
4 |
5 | const http = require('http');
6 | const fs = require('fs');
7 | const path = require('path');
8 | const url = require('url'); // Added this line
9 |
10 | const port = process.env.PRIOMPT_PREVIEW_PORT || 6283;
11 | const server_port = process.env.PRIOMPT_PREVIEW_SERVER_PORT;
12 | if (!server_port) {
13 | console.error('PRIOMPT_PREVIEW_SERVER_PORT is not set. it needs to be set of the port where the priompt server is run');
14 | process.exit(1);
15 | }
16 | const distPath = path.join(path.dirname(__dirname), 'dist');
17 |
18 | const requestListener = (req, res) => {
19 | const parsedUrl = url.parse(req.url); // Parse the URL
20 | const filePath = path.join(distPath, parsedUrl.pathname === '/' ? 'index.html' : parsedUrl.pathname); // Use parsedUrl.pathname instead of req.url
21 | const extname = String(path.extname(filePath)).toLowerCase();
22 | const mimeTypes = {
23 | '.html': 'text/html',
24 | '.js': 'application/javascript',
25 | '.css': 'text/css',
26 | '.jpg': 'image/jpeg',
27 | '.jpeg': 'image/jpeg',
28 | '.png': 'image/png',
29 | '.gif': 'image/gif',
30 | '.svg': 'image/svg+xml',
31 | // Add more MIME types if needed
32 | };
33 |
34 | const contentType = mimeTypes[extname] || 'application/octet-stream';
35 |
36 | fs.readFile(filePath, (err, data) => {
37 | if (err) {
38 | if (err.code === 'ENOENT') {
39 | res.writeHead(404);
40 | res.end('Not found');
41 | } else {
42 | res.writeHead(500);
43 | res.end('Error loading file');
44 | }
45 | } else {
46 | res.writeHead(200, { 'Content-Type': contentType });
47 | // check if can convert to string
48 | if (data.toString().includes('localhost:3000')) {
49 | data = data.toString().replace(/localhost:3000/g, `localhost:${server_port}`);
50 | }
51 | if (process.env.PRIOMPT_PREVIEW_MODELS) {
52 | const models = process.env.PRIOMPT_PREVIEW_MODELS.split(',');
53 | data = data.toString().replace(/gpt-3.5-turbo,gpt-4,gpt-4-32k/, models);
54 | }
55 | if (process.env.PRIOMPT_PREVIEW_COMPLETION_MODELS) {
56 | const completionModels = process.env.PRIOMPT_PREVIEW_COMPLETION_MODELS.split(',');
57 | data = data.toString().replace(/text-davinci-003,code-davinci-002/, completionModels);
58 | }
59 |
60 | if ((extname === '.html' || extname === '.js') && data.toString().includes('PRIOMPT_PREVIEW_OPENAI_KEY')) {
61 | data = data.toString().replace(/PRIOMPT_PREVIEW_OPENAI_KEY/g, `${process.env.PRIOMPT_PREVIEW_OPENAI_KEY}`);
62 | }
63 | if ((extname === '.html' || extname === '.js') && data.toString().includes('PRIOMPT_PREVIEW_OSS_ENDPOINTS_JSON_STRING')) {
64 | data = data.toString().replace(/PRIOMPT_PREVIEW_OSS_ENDPOINTS_JSON_STRING/g, `${process.env.PRIOMPT_PREVIEW_OSS_ENDPOINTS_JSON_STRING ?? "PRIOMPT_PREVIEW_OSS_ENDPOINTS_JSON_STRING"}`);
65 | }
66 | res.end(data);
67 | }
68 | });
69 | };
70 |
71 | const server = http.createServer(requestListener);
72 | server.listen(port, () => {
73 | console.log(`Server is running on http://localhost:${port}`);
74 | });
75 |
--------------------------------------------------------------------------------
/priompt-preview/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@anysphere/priompt-preview",
3 | "license": "MIT",
4 | "version": "0.1.27",
5 | "description": "An interactive preview of priompt prompts.",
6 | "repository": {
7 | "type": "git",
8 | "url": "https://github.com/anysphere/priompt"
9 | },
10 | "homepage": "https://github.com/anysphere/priompt",
11 | "author": "Arvid Lunnemark",
12 | "type": "module",
13 | "scripts": {
14 | "dev": "vite",
15 | "build": "wireit",
16 | "build-watch": "nodemon --watch 'src/**/*' --ext '*' --exec 'pnpm build'",
17 | "lint": "wireit",
18 | "tsc-build": "wireit",
19 | "priompt:build": "wireit",
20 | "preview": "vite preview"
21 | },
22 | "wireit": {
23 | "build": {
24 | "command": "vite build",
25 | "files": [
26 | "src/**/*",
27 | "tsconfig.json"
28 | ],
29 | "output": [
30 | "dist/**/*"
31 | ],
32 | "clean": "if-file-deleted",
33 | "dependencies": [
34 | "tsc-build"
35 | ]
36 | },
37 | "tsc-build": {
38 | "command": "tsc --build --pretty",
39 | "files": [
40 | "src/**/*",
41 | "tsconfig.json"
42 | ],
43 | "output": [
44 | "dist/**/*"
45 | ],
46 | "dependencies": [
47 | "priompt:build"
48 | ],
49 | "clean": "if-file-deleted"
50 | },
51 | "priompt:build": {
52 | "command": "pnpm i",
53 | "dependencies": [
54 | "../priompt:build"
55 | ],
56 | "files": [],
57 | "output": []
58 | },
59 | "lint": {
60 | "command": "eslint src --ext ts,tsx --report-unused-disable-directives --max-warnings 0",
61 | "files": [
62 | "src/**/*",
63 | "tsconfig.json"
64 | ],
65 | "clean": "if-file-deleted",
66 | "dependencies": [
67 | "priompt:build"
68 | ]
69 | }
70 | },
71 | "dependencies": {
72 | "@radix-ui/react-dialog": "^1.0.5",
73 | "@radix-ui/react-icons": "^1.3.0",
74 | "@radix-ui/react-slot": "^1.0.2",
75 | "class-variance-authority": "^0.7.0",
76 | "clsx": "^2.0.0",
77 | "cmdk": "^0.2.0",
78 | "js-tiktoken": "^1.0.7",
79 | "lucide-react": "^0.263.1",
80 | "tailwind-merge": "^1.14.0",
81 | "tailwindcss-animate": "^1.0.6",
82 | "uuid": "^9.0.0",
83 | "@anysphere/priompt": "workspace:*"
84 | },
85 | "devDependencies": {
86 | "@types/react": "^18.0.28",
87 | "@types/react-dom": "^18.0.11",
88 | "@types/uuid": "^9.0.1",
89 | "@typescript-eslint/eslint-plugin": "^5.57.1",
90 | "@typescript-eslint/parser": "^5.57.1",
91 | "@vitejs/plugin-react": "^4.0.0",
92 | "autoprefixer": "^10.4.14",
93 | "axios": "^0.26.1",
94 | "eslint": "^8.38.0",
95 | "eslint-plugin-react-hooks": "^4.6.0",
96 | "eslint-plugin-react-refresh": "^0.3.4",
97 | "nodemon": "^2.0.22",
98 | "postcss": "^8.4.27",
99 | "react": "^18.2.0",
100 | "react-dom": "^18.2.0",
101 | "tailwindcss": "^3.3.3",
102 | "typescript": "^5.2.0",
103 | "use-debounce": "^9.0.4",
104 | "vite": "^4.3.2",
105 | "wireit": "^0.14.0"
106 | },
107 | "bin": {
108 | "serve": "./scripts/serve.cjs"
109 | }
110 | }
111 |
--------------------------------------------------------------------------------
/examples/src/prompt.tsx:
--------------------------------------------------------------------------------
1 | import * as Priompt from "@anysphere/priompt";
2 | import {
3 | PreviewConfig,
4 | PreviewManager,
5 | PromptElement,
6 | PromptProps,
7 | SystemMessage,
8 | UserMessage,
9 | } from "@anysphere/priompt";
10 |
11 | const ExamplePromptConfig: PreviewConfig = {
12 | id: "examplePrompt",
13 | prompt: ExamplePrompt,
14 | };
15 | PreviewManager.registerConfig(ExamplePromptConfig);
16 |
17 | export type ExamplePromptProps = PromptProps<{
18 | name: string;
19 | message: string;
20 | }>;
21 |
22 | export function ExamplePrompt(
23 | props: ExamplePromptProps,
24 | args?: { dump?: boolean }
25 | ): PromptElement {
26 | if (args?.dump === true) {
27 | PreviewManager.dump(ExamplePromptConfig, props);
28 | }
29 | return (
30 | <>
31 |
32 | The user's name is {props.name}. Please always greet them in an
33 | extremely formal, medieval style, with lots of fanfare. Then seamlessly
34 | proceed to reply to their message in the most casual, 2010s, cool dude
35 | texting style. Please be over-the-top in both respects, and make the
36 | transition seem like it never happened.
37 |
38 | {props.message}
39 |
40 | >
41 | );
42 | }
43 |
44 | PreviewManager.register(SimplePrompt);
45 | export function SimplePrompt(
46 | props: PromptProps<
47 | {
48 | language: string;
49 | text: string;
50 | },
51 | boolean
52 | >
53 | ): PromptElement {
54 | return (
55 | <>
56 |
57 | Please determine if the following text is in {props.language}. If it is,
58 | please reply with "yes". If it is not, please reply with "no". Do not
59 | output anything else.
60 |
61 | {props.text}
62 |
63 | {
65 | if (output.content?.toLowerCase().includes("yes") === true) {
66 | return await props.onReturn(true);
67 | } else if (output.content?.toLowerCase().includes("no") === true) {
68 | return await props.onReturn(false);
69 | }
70 | // bad
71 | throw new Error(`Invalid output: ${output.content}`);
72 | }}
73 | />
74 | >
75 | );
76 | }
77 |
78 | PreviewManager.register(ArvidStory);
79 | export function ArvidStory(
80 | props: PromptProps>
81 | ): PromptElement {
82 | return (
83 | <>
84 |
85 | Please write a short story about a young boy named Arvid. Only a
86 | paragraph please.
87 |
88 |
89 | {
91 | // we want to replace every R with a J
92 | await props.onReturn(
93 | (async function* () {
94 | for await (const chunk of stream) {
95 | if (chunk.content === undefined) {
96 | continue;
97 | }
98 | yield chunk.content.replace(/r/g, "j");
99 | }
100 | })()
101 | );
102 | }}
103 | />
104 | >
105 | );
106 | }
107 |
--------------------------------------------------------------------------------
/priompt/src/tokenizer.ts:
--------------------------------------------------------------------------------
1 | // we use tiktoken-node instead of @dqbd/tiktoken because the latter one, while having more
2 | // github stars and being more supported, is extremely slow
3 | // the @dqbq/tiktoken runs tiktoken in wasm, and since we're in node there is no reason
4 | // for us not to use napi bindings to run the native tiktoken
5 | // it may be well worth forking tiktoken-node though, as it is not super well maintained
6 | // and we probably want to compile our own tiktoken because i'm slightly worried about
7 | // supply-chain attacks here
8 | import tiktoken, { getTokenizer, SyncTokenizer } from '@anysphere/tiktoken-node';
9 | import { UsableTokenizer } from './openai';
10 |
11 |
12 | export const tokenizerObject = tiktoken.getTokenizer();
13 | export const syncTokenizer = new SyncTokenizer();
14 |
15 | export async function numTokens(text: string, opts: {
16 | tokenizer: UsableTokenizer;
17 | }) {
18 | const tokenizerName = opts.tokenizer;
19 |
20 | switch (tokenizerName) {
21 | case 'cl100k_base':
22 | return await tokenizerObject.exactNumTokensCl100KNoSpecialTokens(text);
23 | case 'cl100k_base_special_tokens':
24 | return await tokenizerObject.exactNumTokens(text, tiktoken.SupportedEncoding.Cl100k, tiktoken.SpecialTokenAction.Special, {});
25 | default:
26 | throw new Error(`Unknown tokenizer ${tokenizerName}`);
27 | }
28 | }
29 |
30 | // if you tokenize a lot of tokens, this can block the event loop
31 | // only use this in a data job or with very few tokens
32 | export function estimateNumTokensFast_SYNCHRONOUS_BE_CAREFUL(text: string, opts: {
33 | tokenizer: UsableTokenizer;
34 | }) {
35 | const tokenizerName = opts.tokenizer;
36 |
37 | switch (tokenizerName) {
38 | case 'cl100k_base':
39 | case 'cl100k_base_special_tokens':
40 | return syncTokenizer.approxNumTokens(text, tiktoken.SupportedEncoding.Cl100k);
41 | default:
42 | throw new Error(`Unknown tokenizer ${tokenizerName}`);
43 | }
44 | }
45 |
46 |
47 | export async function encodeTokens(text: string, opts: {
48 | tokenizer: UsableTokenizer;
49 | }): Promise {
50 | const tokenizerName = opts.tokenizer;
51 |
52 | switch (tokenizerName) {
53 | case 'cl100k_base':
54 | return await tokenizerObject.encodeCl100KNoSpecialTokens(text);
55 | case 'cl100k_base_special_tokens':
56 | return await tokenizerObject.encode(text, tiktoken.SupportedEncoding.Cl100k, tiktoken.SpecialTokenAction.Special, {});
57 | default:
58 | throw new Error(`Unknown tokenizer ${tokenizerName}`);
59 | }
60 | }
61 |
62 | const encoder = new TextEncoder();
63 | // returns a very conservative [lower, upper] bound on the number of tokens
64 | export function estimateTokensUsingBytecount(text: string, tokenizer: UsableTokenizer): [number, number] {
65 | const byteLength = encoder.encode(text).length;
66 | switch (tokenizer) {
67 | case 'cl100k_base':
68 | return [byteLength / 10, byteLength / 2.5];
69 | case 'cl100k_base_special_tokens':
70 | return [byteLength / 10, byteLength / 2.5];
71 | default:
72 | // conservative!
73 | return [byteLength / 10, byteLength / 2];
74 | }
75 | }
76 | export function estimateTokensUsingCharcount(text: string, tokenizer: UsableTokenizer): [number, number] {
77 | const length = text.length;
78 | switch (tokenizer) {
79 | case 'cl100k_base':
80 | return [length / 10, length / 1.5];
81 | case 'cl100k_base_special_tokens':
82 | return [length / 10, length / 1.5];
83 | default:
84 | // conservative!
85 | return [length / 10, length];
86 | }
87 | }
--------------------------------------------------------------------------------
/tiktoken-node/.gitignore:
--------------------------------------------------------------------------------
1 | # Created by https://www.toptal.com/developers/gitignore/api/node
2 | # Edit at https://www.toptal.com/developers/gitignore?templates=node
3 |
4 | ### Node ###
5 | # Logs
6 | logs
7 | *.log
8 | npm-debug.log*
9 | yarn-debug.log*
10 | yarn-error.log*
11 | lerna-debug.log*
12 |
13 | # Diagnostic reports (https://nodejs.org/api/report.html)
14 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
15 |
16 | # Runtime data
17 | pids
18 | *.pid
19 | *.seed
20 | *.pid.lock
21 |
22 | # Directory for instrumented libs generated by jscoverage/JSCover
23 | lib-cov
24 |
25 | # Coverage directory used by tools like istanbul
26 | coverage
27 | *.lcov
28 |
29 | # nyc test coverage
30 | .nyc_output
31 |
32 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
33 | .grunt
34 |
35 | # Bower dependency directory (https://bower.io/)
36 | bower_components
37 |
38 | # node-waf configuration
39 | .lock-wscript
40 |
41 | # Compiled binary addons (https://nodejs.org/api/addons.html)
42 | build/Release
43 |
44 | # Dependency directories
45 | node_modules/
46 | jspm_packages/
47 |
48 | # TypeScript v1 declaration files
49 | typings/
50 |
51 | # TypeScript cache
52 | *.tsbuildinfo
53 |
54 | # Optional npm cache directory
55 | .npm
56 |
57 | # Optional eslint cache
58 | .eslintcache
59 |
60 | # Microbundle cache
61 | .rpt2_cache/
62 | .rts2_cache_cjs/
63 | .rts2_cache_es/
64 | .rts2_cache_umd/
65 |
66 | # Optional REPL history
67 | .node_repl_history
68 |
69 | # Output of 'npm pack'
70 | *.tgz
71 |
72 | # Yarn Integrity file
73 | .yarn-integrity
74 |
75 | # dotenv environment variables file
76 | .env
77 | .env.test
78 |
79 | # parcel-bundler cache (https://parceljs.org/)
80 | .cache
81 |
82 | # Next.js build output
83 | .next
84 |
85 | # Nuxt.js build / generate output
86 | .nuxt
87 | dist
88 |
89 | # Gatsby files
90 | .cache/
91 | # Comment in the public line in if your project uses Gatsby and not Next.js
92 | # https://nextjs.org/blog/next-9-1#public-directory-support
93 | # public
94 |
95 | # vuepress build output
96 | .vuepress/dist
97 |
98 | # Serverless directories
99 | .serverless/
100 |
101 | # FuseBox cache
102 | .fusebox/
103 |
104 | # DynamoDB Local files
105 | .dynamodb/
106 |
107 | # TernJS port file
108 | .tern-port
109 |
110 | # Stores VSCode versions used for testing VSCode extensions
111 | .vscode-test
112 |
113 | # End of https://www.toptal.com/developers/gitignore/api/node
114 |
115 | # Created by https://www.toptal.com/developers/gitignore/api/macos
116 | # Edit at https://www.toptal.com/developers/gitignore?templates=macos
117 |
118 | ### macOS ###
119 | # General
120 | .DS_Store
121 | .AppleDouble
122 | .LSOverride
123 |
124 | # Icon must end with two
125 | Icon
126 |
127 |
128 | # Thumbnails
129 | ._*
130 |
131 | # Files that might appear in the root of a volume
132 | .DocumentRevisions-V100
133 | .fseventsd
134 | .Spotlight-V100
135 | .TemporaryItems
136 | .Trashes
137 | .VolumeIcon.icns
138 | .com.apple.timemachine.donotpresent
139 |
140 | # Directories potentially created on remote AFP share
141 | .AppleDB
142 | .AppleDesktop
143 | Network Trash Folder
144 | Temporary Items
145 | .apdisk
146 |
147 | ### macOS Patch ###
148 | # iCloud generated files
149 | *.icloud
150 |
151 | # End of https://www.toptal.com/developers/gitignore/api/macos
152 |
153 | # Created by https://www.toptal.com/developers/gitignore/api/windows
154 | # Edit at https://www.toptal.com/developers/gitignore?templates=windows
155 |
156 | ### Windows ###
157 | # Windows thumbnail cache files
158 | Thumbs.db
159 | Thumbs.db:encryptable
160 | ehthumbs.db
161 | ehthumbs_vista.db
162 |
163 | # Dump file
164 | *.stackdump
165 |
166 | # Folder config file
167 | [Dd]esktop.ini
168 |
169 | # Recycle Bin used on file shares
170 | $RECYCLE.BIN/
171 |
172 | # Windows Installer files
173 | *.cab
174 | *.msi
175 | *.msix
176 | *.msm
177 | *.msp
178 |
179 | # Windows shortcuts
180 | *.lnk
181 |
182 | # End of https://www.toptal.com/developers/gitignore/api/windows
183 |
184 | #Added by cargo
185 |
186 | /target
187 | Cargo.lock
188 |
189 | .pnp.*
190 | .yarn/*
191 | !.yarn/patches
192 | !.yarn/plugins
193 | !.yarn/releases
194 | !.yarn/sdks
195 | !.yarn/versions
196 |
197 | *.node
198 |
--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | name: Publish
2 |
3 | on:
4 | workflow_dispatch:
5 | push:
6 | branches: [main, publish]
7 | pull_request:
8 |
9 | env:
10 | DEBUG: "napi:*"
11 | MACOSX_DEPLOYMENT_TARGET: "10.13"
12 |
13 | jobs:
14 | build:
15 | runs-on: ubuntu-latest
16 | defaults:
17 | run:
18 | working-directory: tiktoken-node
19 | strategy:
20 | matrix:
21 | target:
22 | - x86_64-pc-windows-msvc
23 | - x86_64-unknown-linux-gnu
24 | - aarch64-unknown-linux-gnu
25 | - x86_64-apple-darwin
26 | - aarch64-apple-darwin
27 |
28 | steps:
29 | - uses: actions/checkout@v3
30 |
31 | - name: Setup Node
32 | uses: actions/setup-node@v3
33 | with:
34 | node-version: 20.6.1
35 |
36 | - uses: anysphere/action-setup@c3b53f6a16e57305370b4ae5a540c2077a1d50dd
37 | name: Install pnpm
38 | id: pnpm-install
39 | with:
40 | version: "=8.6.0"
41 |
42 | - name: Setup Rust
43 | uses: dtolnay/rust-toolchain@stable
44 | with:
45 | toolchain: stable
46 | targets: ${{ matrix.target }}
47 |
48 | - name: Run init.sh
49 | working-directory: .
50 | run: ./init.sh
51 |
52 | - uses: Swatinem/rust-cache@v2
53 |
54 | - name: Install ziglang
55 | uses: goto-bus-stop/setup-zig@v1
56 | with:
57 | version: 0.10.0
58 |
59 | - run: cargo install cargo-xwin
60 | if: matrix.target == 'x86_64-pc-windows-msvc'
61 |
62 | - name: Check formatting
63 | run: cargo fmt --all --check
64 |
65 | - name: Node install
66 | run: pnpm i
67 |
68 | - name: Build Mac and Linux
69 | if: matrix.target != 'x86_64-pc-windows-msvc' && matrix.target != 'x86_64-unknown-linux-gnu'
70 | run: pnpm run build -- --zig --target ${{ matrix.target }}
71 |
72 | - name: Build Windows
73 | if: matrix.target == 'x86_64-pc-windows-msvc' || matrix.target == 'x86_64-unknown-linux-gnu'
74 | run: pnpm run build -- --target ${{ matrix.target }}
75 |
76 | - name: Upload artifact
77 | uses: actions/upload-artifact@v3
78 | with:
79 | name: bindings-${{ matrix.target }}
80 | path: tiktoken-node/tiktoken-node.*.node
81 | if-no-files-found: error
82 |
83 | publish:
84 | if: ${{ github.repository == 'anysphere/priompt' && github.event_name == 'push' && github.ref == 'refs/heads/publish' }}
85 | runs-on: ubuntu-20.04
86 | needs: build
87 |
88 | steps:
89 | - uses: actions/checkout@v3
90 |
91 | - name: Setup Node
92 | uses: actions/setup-node@v3
93 | with:
94 | node-version: 20.6.1
95 |
96 | - uses: anysphere/action-setup@c3b53f6a16e57305370b4ae5a540c2077a1d50dd
97 | name: Install pnpm
98 | id: pnpm-install
99 | with:
100 | version: "=8.6.0"
101 |
102 | - name: Run init.sh
103 | working-directory: .
104 | run: ./init.sh
105 |
106 | - name: Download build
107 | uses: actions/download-artifact@v3
108 | with:
109 | path: tiktoken-node/artifacts
110 |
111 | - name: LS artifacts
112 | run: ls -R tiktoken-node/artifacts
113 | shell: bash
114 |
115 | - name: Move artifacts
116 | working-directory: tiktoken-node
117 | run: pnpm artifacts
118 |
119 | - name: LS post-move
120 | run: ls -R tiktoken-node/npm
121 | shell: bash
122 |
123 | - name: npm version
124 | run: npm --version
125 | shell: bash
126 |
127 | - name: Build priompt
128 | working-directory: priompt
129 | run: pnpm build
130 |
131 | - name: Build priompt-preview
132 | working-directory: priompt-preview
133 | run: pnpm build
134 |
135 | - name: globally install napi-rs
136 | run: npm install -g @napi-rs/cli
137 |
138 | - name: Set publishing config
139 | run: pnpm config set '//registry.npmjs.org/:_authToken' "${NODE_AUTH_TOKEN}"
140 | env:
141 | NODE_AUTH_TOKEN: ${{secrets.NPM_TOKEN}}
142 |
143 | - name: Publish to npm
144 | run: pnpm publish --recursive --access=public --no-git-checks
145 |
--------------------------------------------------------------------------------
/priompt-preview/src/components/ui/dialog.tsx:
--------------------------------------------------------------------------------
1 | import * as React from "react"
2 | import * as DialogPrimitive from "@radix-ui/react-dialog"
3 | import { Cross2Icon } from "@radix-ui/react-icons"
4 |
5 | import { cn } from "@/lib/utils"
6 |
7 | const Dialog = DialogPrimitive.Root
8 |
9 | const DialogTrigger = DialogPrimitive.Trigger
10 |
11 | const DialogPortal = DialogPrimitive.Portal
12 |
13 | const DialogClose = DialogPrimitive.Close
14 |
15 | const DialogOverlay = React.forwardRef<
16 | React.ElementRef,
17 | React.ComponentPropsWithoutRef
18 | >(({ className, ...props }, ref) => (
19 |
27 | ))
28 | DialogOverlay.displayName = DialogPrimitive.Overlay.displayName
29 |
30 | const DialogContent = React.forwardRef<
31 | React.ElementRef,
32 | React.ComponentPropsWithoutRef
33 | >(({ className, children, ...props }, ref) => (
34 |
35 |
36 |
44 | {children}
45 |
46 |
47 | Close
48 |
49 |
50 |
51 | ))
52 | DialogContent.displayName = DialogPrimitive.Content.displayName
53 |
54 | const DialogHeader = ({
55 | className,
56 | ...props
57 | }: React.HTMLAttributes) => (
58 |
65 | )
66 | DialogHeader.displayName = "DialogHeader"
67 |
68 | const DialogFooter = ({
69 | className,
70 | ...props
71 | }: React.HTMLAttributes) => (
72 |
79 | )
80 | DialogFooter.displayName = "DialogFooter"
81 |
82 | const DialogTitle = React.forwardRef<
83 | React.ElementRef,
84 | React.ComponentPropsWithoutRef
85 | >(({ className, ...props }, ref) => (
86 |
94 | ))
95 | DialogTitle.displayName = DialogPrimitive.Title.displayName
96 |
97 | const DialogDescription = React.forwardRef<
98 | React.ElementRef,
99 | React.ComponentPropsWithoutRef
100 | >(({ className, ...props }, ref) => (
101 |
106 | ))
107 | DialogDescription.displayName = DialogPrimitive.Description.displayName
108 |
109 | export {
110 | Dialog,
111 | DialogPortal,
112 | DialogOverlay,
113 | DialogTrigger,
114 | DialogClose,
115 | DialogContent,
116 | DialogHeader,
117 | DialogFooter,
118 | DialogTitle,
119 | DialogDescription,
120 | }
121 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Priompt
2 |
3 | Priompt (_priority + prompt_) is a JSX-based prompting library. It uses priorities to decide what to include in the context window.
4 |
5 | Priompt is an attempt at a _prompt design_ library, inspired by web design libraries like React. Read more about the motivation [here](https://arvid.xyz/prompt-design).
6 |
7 | ## Installation
8 |
9 | Install from npm:
10 |
11 | ```bash
12 | npm install @anysphere/priompt && npm install -D @anysphere/priompt-preview
13 | ```
14 |
15 | or
16 |
17 | ```bash
18 | yarn add @anysphere/priompt && yarn add @anysphere/priompt-preview --dev
19 | ```
20 |
21 | or
22 |
23 | ```bash
24 | pnpm add @anysphere/priompt && pnpm add -D @anysphere/priompt-preview
25 | ```
26 |
27 | ## Examples
28 |
29 | Read [examples/README.md](examples/README.md) to run the examples.
30 |
31 | ## Principles
32 |
33 | Prompts are rendered from a JSX component, which can look something like this:
34 |
35 | ```jsx
36 | function ExamplePrompt(
37 | props: PromptProps<{
38 | name: string,
39 | message: string,
40 | history: { case: "user" | "assistant", message: string },
41 | }>
42 | ): PromptElement {
43 | const capitalizedName = props.name[0].toUpperCase() + props.name.slice(1);
44 | return (
45 | <>
46 |
47 | The user's name is {capitalizedName}. Please respond to them kindly.
48 |
49 | {props.history.map((m, i) => (
50 |
51 | {m.case === "user" ? (
52 | {m.message}
53 | ) : (
54 | {m.message}
55 | )}
56 |
57 | ))}
58 | {props.message}
59 |
60 | >
61 | );
62 | }
63 | ```
64 |
65 | A component is rendered only once. Each child has a priority, where a higher priority means that the child is more important to include in the prompt. If no priority is specified, the child is included if and only if its parent is included. Absolute priorities are specified with `p` and relative ones are specified with `prel`.
66 |
67 | In the example above, we always include the system message and the latest user message, and are including as many messages from the history as possible, where later messages are prioritized over earlier messages.
68 |
69 | The key promise of the priompt renderer is:
70 |
71 | > Let $T$ be the token limit and $\text{Prompt}(p_\text{cutoff})$ be the function that creates a prompt by including all scopes with priority $p_\text{scope} \geq p_\text{cutoff}$, and no other. Then, the rendered prompt is $\text{\textbf{P}} = \text{Prompt}(p_\text{opt-cutoff})$ where $p_\text{opt-cutoff}$ is the minimum value such that $|\text{Prompt}(p_\text{opt-cutoff})| \leq T$.
72 |
73 | The building blocks of a priompt prompt are:
74 |
75 | 1. ``: this allows you to set priorities `p` for absolute or `prel` for relative.
76 | 2. ``: the first child with a sufficiently high priority will be included, and all children below it will not. This is useful for fallbacks for implementing something like "when the result is too long we want to say `(result omitted)`".
77 | 3. ``: for specifying empty space, useful for reserving tokens for generation.
78 | 4. ``: capture the output and parse it right within the prompt.
79 | 5. ``: isolate a section of the prompt with its own token limit. This is useful for guaranteeing that the start of the prompt will be the same for caching purposes. it would be nice to extend this to allow token limits like `100% - 100`.
80 |
81 | You can create components all you want, just like in React.
82 |
83 | ## Future
84 |
85 | Some building blocks we're thinking of adding:
86 |
87 | 1. ``: specify a `limit` on the number of tokens within a scope
88 | 2. `onExcluded={() => {...}}`: a callback for when a particular scope is excluded, which allows you to do things like "summarize this result when it doesn't fit in the prompt anymore".
89 |
90 | We're also thinking about making a framework around Priompt for agents. It would look something like interactive web design but for agents, where `onClicks` are simulated by having the agent call a function. We would love ideas here!
91 |
92 | ## Caveats
93 |
94 | 1. We've discovered that adding priorities to everything is sort of an anti-pattern. It is possible that priorities are the wrong abstraction. We have found them useful though for including long files in the prompt in a line-by-line way.
95 | 2. The Priompt renderer has no builtin support for creating cacheable prompts. If you overuse priorities, it is easy to make hard-to-cache prompts, which may increase your cost or latency for LLM inference. We are interested in good solutions here, but for now it is up to the prompt designer to think about caching.
96 | 3. The current version of priompt only supports around 10K scopes reasonably fast (this is enough for most use cases). If you want to include a file in the prompt that is really long (>10K lines), and you split it line-by-line, you probably want to implement something like "for lines farther than 1000 lines away from the cursor position we have coarser scopes of 10 lines at a time".
97 | 4. For latency-critical prompts you want to monitor the time usage in the priompt preview dashboard. If there are too many scopes you may want to optimize for performance.
98 | 5. The Priompt renderer is not always guaranteed to produce the perfect $p_\text{opt-cutoff}$. For example, if a higher-priority child of a `` has more tokens than a lower-priority child, the currently implemented binary search renderer may return a (very slightly) incorrect result.
99 |
100 | ## Contributions
101 |
102 | Contributions are very welcome! This entire repo is MIT-licensed.
103 |
--------------------------------------------------------------------------------
/priompt-preview/src/components/ui/command.tsx:
--------------------------------------------------------------------------------
1 | import * as React from "react"
2 | import { DialogProps } from "@radix-ui/react-dialog"
3 | import { MagnifyingGlassIcon } from "@radix-ui/react-icons"
4 | import { Command as CommandPrimitive } from "cmdk"
5 |
6 | import { cn } from "@/lib/utils"
7 | import { Dialog, DialogContent } from "@/components/ui/dialog"
8 |
9 | const Command = React.forwardRef<
10 | React.ElementRef,
11 | React.ComponentPropsWithoutRef
12 | >(({ className, ...props }, ref) => (
13 |
21 | ))
22 | Command.displayName = CommandPrimitive.displayName
23 |
24 | interface CommandDialogProps extends DialogProps {}
25 |
26 | const CommandDialog = ({ children, ...props }: CommandDialogProps) => {
27 | return (
28 |
35 | )
36 | }
37 |
38 | const CommandInput = React.forwardRef<
39 | React.ElementRef,
40 | React.ComponentPropsWithoutRef
41 | >(({ className, ...props }, ref) => (
42 |
43 |
44 |
52 |
53 | ))
54 |
55 | CommandInput.displayName = CommandPrimitive.Input.displayName
56 |
57 | const CommandList = React.forwardRef<
58 | React.ElementRef,
59 | React.ComponentPropsWithoutRef
60 | >(({ className, ...props }, ref) => (
61 |
66 | ))
67 |
68 | CommandList.displayName = CommandPrimitive.List.displayName
69 |
70 | const CommandEmpty = React.forwardRef<
71 | React.ElementRef,
72 | React.ComponentPropsWithoutRef
73 | >((props, ref) => (
74 |
79 | ))
80 |
81 | CommandEmpty.displayName = CommandPrimitive.Empty.displayName
82 |
83 | const CommandGroup = React.forwardRef<
84 | React.ElementRef,
85 | React.ComponentPropsWithoutRef
86 | >(({ className, ...props }, ref) => (
87 |
95 | ))
96 |
97 | CommandGroup.displayName = CommandPrimitive.Group.displayName
98 |
99 | const CommandSeparator = React.forwardRef<
100 | React.ElementRef,
101 | React.ComponentPropsWithoutRef
102 | >(({ className, ...props }, ref) => (
103 |
108 | ))
109 | CommandSeparator.displayName = CommandPrimitive.Separator.displayName
110 |
111 | const CommandItem = React.forwardRef<
112 | React.ElementRef,
113 | React.ComponentPropsWithoutRef
114 | >(({ className, ...props }, ref) => (
115 |
123 | ))
124 |
125 | CommandItem.displayName = CommandPrimitive.Item.displayName
126 |
127 | const CommandShortcut = ({
128 | className,
129 | ...props
130 | }: React.HTMLAttributes) => {
131 | return (
132 |
139 | )
140 | }
141 | CommandShortcut.displayName = "CommandShortcut"
142 |
143 | export {
144 | Command,
145 | CommandDialog,
146 | CommandInput,
147 | CommandList,
148 | CommandEmpty,
149 | CommandGroup,
150 | CommandItem,
151 | CommandShortcut,
152 | CommandSeparator,
153 | }
154 |
--------------------------------------------------------------------------------
/examples/src/function-calling-prompt.tsx:
--------------------------------------------------------------------------------
1 | import * as Priompt from "@anysphere/priompt";
2 | import {
3 | PreviewConfig,
4 | PreviewManager,
5 | PromptElement,
6 | PromptProps,
7 | SystemMessage,
8 | UserMessage,
9 | Function,
10 | FunctionMessage,
11 | AssistantMessage,
12 | ZFunction,
13 | } from "@anysphere/priompt";
14 | import { z } from "zod";
15 |
16 | const FunctionCallingPromptConfig: PreviewConfig = {
17 | id: "functionCallingPrompt",
18 | prompt: FunctionCallingPrompt,
19 | };
20 |
21 | export type FunctionCallingPromptProps = PromptProps<{
22 | message: string;
23 | includeFunctions: string[];
24 | causeConfusion: boolean;
25 | }>;
26 |
27 | PreviewManager.registerConfig(FunctionCallingPromptConfig);
28 |
29 | // array of 10000 integers
30 | const arr = Array.from(Array(800).keys());
31 |
32 | export function FunctionCallingPrompt(
33 | props: FunctionCallingPromptProps,
34 | args?: { dump?: boolean }
35 | ): PromptElement {
36 | if (args?.dump === true) {
37 | PreviewManager.dump(FunctionCallingPromptConfig, props);
38 | }
39 | return (
40 | <>
41 | {props.includeFunctions.includes("insert_sql_row") && (
42 |
61 | )}
62 | {props.includeFunctions.includes("update_sql_row") && (
63 |
86 | )}
87 |
88 | You are a database manager, responsible for taking the user's message
89 | and inserting it into our database.
90 |
91 | {props.causeConfusion && (
92 | <>
93 | i love the color theme
94 |
103 |
104 | Inserted 1 row.
105 |
106 | >
107 | )}
108 |
109 | {props.message}
110 | {/* {arr.map((i) => (
111 | {props.message}
112 | ))} */}
113 |
114 |
115 | >
116 | );
117 | }
118 |
119 | // returns the new code
120 | PreviewManager.register(SimpleFunction);
121 | export function SimpleFunction(
122 | props: PromptProps<
123 | {
124 | code: string;
125 | error: string;
126 | },
127 | | {
128 | type: "newImport";
129 | newImport: string;
130 | }
131 | | {
132 | type: "newCode";
133 | newCode: string;
134 | }
135 | >
136 | ) {
137 | return (
138 | <>
139 | {
148 | return await props.onReturn({
149 | type: "newImport",
150 | newImport: args.import,
151 | });
152 | }}
153 | />
154 |
155 | You are a coding assistant. The user will give you a function that has
156 | linter errors. Your job is to fix the errors. You have two options:
157 | either, you can call the `add_import` function, which adds an import
158 | statement at the top of the file, or you can rewrite the entire
159 | function. If you rewrite the function, start your message with ```.
160 |
161 |
162 | Function:
163 |
164 | ```
165 |
166 | {props.code}
167 |
168 | ```
169 |
170 |
171 | Errors:
172 |
173 | ```
174 |
175 | {props.error}
176 |
177 | ```
178 |
179 |
180 | {
182 | if (msg.content !== undefined) {
183 | return await props.onReturn({
184 | type: "newCode",
185 | newCode: msg.content,
186 | });
187 | }
188 | }}
189 | />
190 | >
191 | );
192 | }
193 |
--------------------------------------------------------------------------------
/priompt/src/components.tsx:
--------------------------------------------------------------------------------
1 | import { ChatCompletionResponseMessage } from "openai";
2 | import * as Priompt from "./lib";
3 | import {
4 | BasePromptProps,
5 | ImageProps,
6 | PromptElement,
7 | PromptProps,
8 | } from "./types";
9 | import { JSONSchema7 } from "json-schema";
10 | import { z } from "zod";
11 | import zodToJsonSchemaImpl from "zod-to-json-schema";
12 |
13 | export function SystemMessage(
14 | props: PromptProps<{
15 | name?: string;
16 | to?: string;
17 | }>
18 | ): PromptElement {
19 | return {
20 | type: "chat",
21 | role: "system",
22 | name: props.name,
23 | to: props.to,
24 | children:
25 | props.children !== undefined
26 | ? Array.isArray(props.children)
27 | ? props.children.flat()
28 | : [props.children]
29 | : [],
30 | };
31 | }
32 |
33 | export function UserMessage(
34 | props: PromptProps<{
35 | name?: string;
36 | to?: string;
37 | }>
38 | ): PromptElement {
39 | return {
40 | type: "chat",
41 | role: "user",
42 | name: props.name,
43 | to: props.to,
44 | children:
45 | props.children !== undefined
46 | ? Array.isArray(props.children)
47 | ? props.children.flat()
48 | : [props.children]
49 | : [],
50 | };
51 | }
52 |
53 | export function AssistantMessage(
54 | props: PromptProps<{
55 | functionCall?: {
56 | name: string;
57 | arguments: string; // json string
58 | };
59 | to?: string;
60 | }>
61 | ): PromptElement {
62 | return {
63 | type: "chat",
64 | role: "assistant",
65 | functionCall: props.functionCall,
66 | to: props.to,
67 | children:
68 | props.children !== undefined
69 | ? Array.isArray(props.children)
70 | ? props.children.flat()
71 | : [props.children]
72 | : [],
73 | };
74 | }
75 |
76 | export function ImageComponent(props: PromptProps): PromptElement {
77 | return {
78 | type: "image",
79 | bytes: props.bytes,
80 | dimensions: props.dimensions,
81 | detail: props.detail,
82 | };
83 | }
84 |
85 | export function FunctionMessage(
86 | props: PromptProps<{
87 | name: string;
88 | to?: string;
89 | }>
90 | ): PromptElement {
91 | return {
92 | type: "chat",
93 | role: "function",
94 | name: props.name,
95 | to: props.to,
96 | children:
97 | props.children !== undefined
98 | ? Array.isArray(props.children)
99 | ? props.children.flat()
100 | : [props.children]
101 | : [],
102 | };
103 | }
104 |
105 | export function ToolResultMessage(
106 | props: PromptProps<{
107 | name: string;
108 | to?: string;
109 | }>
110 | ): PromptElement {
111 | return {
112 | type: "chat",
113 | role: "tool",
114 | name: props.name,
115 | to: props.to,
116 | children:
117 | props.children !== undefined
118 | ? Array.isArray(props.children)
119 | ? props.children.flat()
120 | : [props.children]
121 | : [],
122 | };
123 | }
124 |
125 | export function Function(
126 | props: PromptProps<{
127 | name: string;
128 | description: string;
129 | parameters: JSONSchema7;
130 | onCall?: (args: string) => Promise;
131 | }>
132 | ): PromptElement {
133 | if (!validFunctionName(props.name)) {
134 | throw new Error(
135 | `Invalid function name: ${props.name}. Function names must be between 1 and 64 characters long and may only contain a-z, A-Z, 0-9, and underscores.`
136 | );
137 | }
138 |
139 | // eslint-disable-next-line @typescript-eslint/ban-ts-comment
140 | // @ts-ignore
141 | return (
142 | <>
143 | {{
144 | type: "functionDefinition",
145 | name: props.name,
146 | description: props.description,
147 | parameters: props.parameters,
148 | }}
149 | {{
150 | type: "capture",
151 | onOutput: async (output: ChatCompletionResponseMessage) => {
152 | if (
153 | props.onCall !== undefined &&
154 | output.function_call !== undefined &&
155 | output.function_call.name === props.name &&
156 | output.function_call.arguments !== undefined
157 | ) {
158 | await props.onCall(output.function_call.arguments);
159 | }
160 | },
161 | }}
162 | >
163 | );
164 | }
165 |
166 | // May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.
167 | function validFunctionName(name: string): boolean {
168 | return /^[a-zA-Z0-9_]{1,64}$/.test(name);
169 | }
170 |
171 | export function ZFunction(
172 | props: PromptProps<{
173 | name: string;
174 | description: string;
175 | parameters: z.ZodType;
176 | // if the args fail to parse, we throw here
177 | onCall?: (args: ParamT) => Promise;
178 | // if onParseError is provided, then we don't throw
179 | // this can be useful in case a failed parse can still be useful for us
180 | // in cases when we really want the output, we can also call a model here to parse the output
181 | onParseError?: (error: z.ZodError, rawArgs: string) => Promise;
182 | // TODO: add an autoheal here
183 | }>
184 | ) {
185 | return (
186 | // eslint-disable-next-line @typescript-eslint/ban-ts-comment
187 | // @ts-ignore
188 | {
193 | if (props.onCall === undefined) {
194 | // do nothing
195 | return;
196 | }
197 | try {
198 | const args = props.parameters.parse(JSON.parse(rawArgs));
199 | await props.onCall(args);
200 | } catch (error) {
201 | if (props.onParseError !== undefined) {
202 | await props.onParseError(error, rawArgs);
203 | } else {
204 | throw error;
205 | }
206 | }
207 | }}
208 | />
209 | );
210 | }
211 |
212 | function zodToJsonSchema(schema: z.ZodType): JSONSchema7 {
213 | const fullSchema = zodToJsonSchemaImpl(schema, { $refStrategy: "none" });
214 | const {
215 | $schema,
216 | default: defaultVal,
217 | definitions,
218 | description,
219 | markdownDescription,
220 | ...rest
221 | } = fullSchema;
222 | // delete additionalProperties
223 | if ("additionalProperties" in rest) {
224 | // eslint-disable-next-line @typescript-eslint/ban-ts-comment
225 | // @ts-ignore
226 | delete rest.additionalProperties;
227 | }
228 | return rest as JSONSchema7;
229 | }
230 |
--------------------------------------------------------------------------------
/priompt-preview/src/openai.ts:
--------------------------------------------------------------------------------
1 | import { ChatCompletionRequestMessage, CreateChatCompletionRequest, CreateCompletionRequest, CreateCompletionResponse, StreamChatCompletionResponse } from '@anysphere/priompt/dist/openai';
2 | import { encodingForModel } from "js-tiktoken";
3 |
4 |
5 | const API_KEY = 'PRIOMPT_PREVIEW_OPENAI_KEY';
6 |
7 | const s = "";
8 | const ossEndpointsJson = `PRIOMPT_PREVIEW_OSS_ENDPOINTS_JSON_STRING${s}`;
9 |
10 | const ossEndpoints = JSON.parse(ossEndpointsJson.includes("PREVIEW_OSS_ENDPOINTS_JSON_STRING") ? '{}' : ossEndpointsJson);
11 |
12 | export const OSS_MODELS = [
13 | { displayName: "deepseek-7b", modelKey: "ft:deepseek-7b-cpp" },
14 | { displayName: "deepseek-33b", modelKey: "ft:deepseek-33b-cpp" },
15 | ]
16 |
17 | export function isOSS(model: string): boolean {
18 | return model.includes('deepseek') || model.includes('mistral');
19 | }
20 |
21 | function getBaseUrl(model: string) {
22 | let url = 'https://api.openai.com/v1/';
23 | if (Object.keys(ossEndpoints).includes(model)) {
24 | url = ossEndpoints[model as keyof typeof ossEndpoints];
25 | }
26 | return url;
27 | }
28 |
29 | export async function* streamChat(createChatCompletionRequest: CreateChatCompletionRequest, options?: RequestInit, abortSignal?: AbortSignal): AsyncGenerator {
30 | let streamer: AsyncGenerator | undefined = undefined;
31 |
32 | const newAbortSignal = new AbortController();
33 | abortSignal?.addEventListener('abort', () => {
34 | newAbortSignal.abort();
35 | });
36 |
37 | let timeout = setTimeout(() => {
38 | console.error("OpenAI request timed out after 40 seconds..... Not good.")
39 | // Next, we abort the signal
40 | newAbortSignal.abort();
41 | }, 40_000);
42 |
43 | try {
44 | const requestOptions: RequestInit = {
45 | ...options,
46 | method: 'POST',
47 | headers: {
48 | ...options?.headers,
49 | 'Authorization': `Bearer ${API_KEY}`,
50 | 'Content-Type': 'application/json',
51 | },
52 | signal: newAbortSignal.signal,
53 | body: JSON.stringify({
54 | ...createChatCompletionRequest,
55 | stream: true
56 | }),
57 | };
58 |
59 | const url = getBaseUrl(createChatCompletionRequest.model) + '/chat/completions';
60 | const response = await fetch(url, requestOptions);
61 | if (!response.ok) {
62 | throw new Error(`HTTP error! status: ${response.status}. message: ${await response.text()}`);
63 | }
64 | // eslint-disable-next-line @typescript-eslint/no-non-null-assertion
65 | streamer = streamSource(response.body!);
66 | for await (const data of streamer) {
67 | clearTimeout(timeout);
68 |
69 | timeout = setTimeout(() => {
70 | console.error("OpenAI request timed out after 10 seconds..... Not good.")
71 | newAbortSignal.abort();
72 | }, 10_000);
73 |
74 | yield data;
75 |
76 | clearTimeout(timeout);
77 | }
78 | } finally {
79 | clearTimeout(timeout);
80 | if (streamer !== undefined) {
81 | await streamer.return(undefined);
82 | }
83 | newAbortSignal.abort();
84 | }
85 | }
86 |
87 | const TOKEN_LIMIT: Record = {
88 | "gpt-3.5-turbo": 4096,
89 | "azure-3.5-turbo": 4096,
90 | "gpt-4": 8192,
91 | "gpt-4-cursor-completions": 128_000,
92 | "gpt-4-cursor-vinod": 128_000,
93 | "gpt-4-0314": 8192,
94 | "gpt-4-32k": 32000,
95 | "gpt-4-1106-preview": 128000,
96 | "gpt-4-0125-preview": 128000,
97 | "gpt-3.5-turbo-1106": 16000,
98 | "text-davinci-003": 4096,
99 | "code-davinci-002": 4096,
100 | };
101 | const enc = encodingForModel("gpt-4");
102 | const enc_old = encodingForModel("text-davinci-003");
103 |
104 |
105 | export async function* streamChatCompletion(createChatCompletionRequest: CreateChatCompletionRequest, options?: RequestInit, abortSignal?: AbortSignal): AsyncGenerator {
106 | const prompt = joinMessages(createChatCompletionRequest.messages, true);
107 | let tokens = enc.encode(prompt).length;
108 | if (createChatCompletionRequest.model.includes('00')) {
109 | tokens = enc_old.encode(prompt).length;
110 | }
111 | const createCompletionRequest = {
112 | max_tokens: (TOKEN_LIMIT[createChatCompletionRequest.model] ?? 4096) - tokens,
113 | ...createChatCompletionRequest,
114 | messages: undefined,
115 | prompt,
116 | stop: ['<|im_end|>', '<|diff_marker|>']
117 | } as CreateCompletionRequest;
118 |
119 | let streamer: AsyncGenerator | undefined = undefined;
120 |
121 | const newAbortSignal = new AbortController();
122 | abortSignal?.addEventListener('abort', () => {
123 | newAbortSignal.abort();
124 | });
125 |
126 | let timeout = setTimeout(() => {
127 | console.error("OpenAI request timed out after 40 seconds..... Not good.")
128 | newAbortSignal.abort();
129 | }, 40_000);
130 |
131 | try {
132 | const requestOptions: RequestInit = {
133 | ...options,
134 | method: 'POST',
135 | headers: {
136 | ...options?.headers,
137 | 'Authorization': `Bearer ${API_KEY}`,
138 | 'Content-Type': 'application/json',
139 | },
140 | signal: newAbortSignal.signal,
141 | body: JSON.stringify({
142 | ...createCompletionRequest,
143 | stream: true
144 | }),
145 | };
146 |
147 | const url = getBaseUrl(createChatCompletionRequest.model) + '/completions';
148 | const response = await fetch(url, requestOptions);
149 | if (!response.ok) {
150 | throw new Error(`HTTP error! status: ${response.status}. message: ${await response.text()}`);
151 | }
152 | // eslint-disable-next-line @typescript-eslint/no-non-null-assertion
153 | streamer = streamSource(response.body!);
154 | for await (const data of streamer) {
155 | clearTimeout(timeout);
156 |
157 | timeout = setTimeout(() => {
158 | console.error("OpenAI request timed out after 10 seconds..... Not good.")
159 | newAbortSignal.abort();
160 | }, 10_000);
161 |
162 | yield {
163 | ...data,
164 | choices: data.choices.map((choice) => {
165 | return {
166 | delta: {
167 | role: 'assistant',
168 | content: choice.text?.replace(prompt, ''),
169 | }
170 | }
171 | })
172 | }
173 |
174 | clearTimeout(timeout);
175 | }
176 | } finally {
177 | clearTimeout(timeout);
178 | if (streamer !== undefined) {
179 | await streamer.return(undefined);
180 | }
181 | newAbortSignal.abort();
182 | }
183 | }
184 |
185 |
186 | async function* streamSource(stream: ReadableStream): AsyncGenerator {
187 | // Buffer exists for overflow when event stream doesn't end on a newline
188 | let buffer = '';
189 |
190 | // Create a reader to read the response body as a stream
191 | const reader = stream.getReader();
192 |
193 | // Loop until the stream is done
194 | while (true) {
195 | const { done, value } = await reader.read();
196 | if (done) {
197 | break;
198 | }
199 |
200 | buffer += new TextDecoder().decode(value);
201 | const lines = buffer.split('\n');
202 | for (const line of lines.slice(0, -1)) {
203 | if (line.startsWith('data: ')) {
204 | const jsonString = line.slice(6);
205 | if (jsonString === '[DONE]') {
206 | return;
207 | }
208 | try {
209 | const ans = JSON.parse(jsonString) as T;
210 | yield ans;
211 | } catch (e) {
212 | console.log(jsonString);
213 | throw e;
214 | }
215 | }
216 | }
217 | buffer = lines[lines.length - 1];
218 | }
219 |
220 | if (buffer.startsWith('data: ')) {
221 | const jsonString = buffer.slice(6);
222 | if (jsonString === '[DONE]') {
223 | return;
224 | }
225 | try {
226 | const ans = JSON.parse(jsonString) as T;
227 | yield ans;
228 | } catch (e) {
229 | console.log(jsonString);
230 | throw e;
231 | }
232 | }
233 | }
234 |
235 | export function joinMessages(messages: ChatCompletionRequestMessage[], lastIsIncomplete: boolean = false) {
236 | return messages.map((message, index) => {
237 | let ret = `<|im_start|>${message.role}<|im_sep|>${message.content}`;
238 | if (!lastIsIncomplete || index !== messages.length - 1) {
239 | ret += `<|im_end|>`;
240 | }
241 | return ret;
242 | }).join('');
243 | }
--------------------------------------------------------------------------------
/priompt/src/base.test.tsx:
--------------------------------------------------------------------------------
1 | import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
2 | import * as Priompt from "./index";
3 | import {
4 | isChatPrompt,
5 | isPlainPrompt,
6 | promptHasFunctions,
7 | promptToTokens,
8 | render,
9 | } from "./lib";
10 | import { PromptElement, PromptProps } from "./types";
11 | import { AssistantMessage, SystemMessage, UserMessage } from "./components";
12 |
13 | describe("isolate", () => {
14 | function Isolate(
15 | props: PromptProps<{ isolate: boolean; tokenLimit: number }>
16 | ): PromptElement {
17 | if (props.isolate) {
18 | return (
19 | <>
20 |
21 | {props.children}
22 |
23 | >
24 | );
25 | } else {
26 | return (
27 | <>
28 |
29 | {props.children}
30 |
31 | >
32 | );
33 | }
34 | }
35 |
36 | function Test(props: PromptProps<{ isolate: boolean }>): PromptElement {
37 | return (
38 | <>
39 | This is the start of the prompt.
40 |
41 | {Array.from({ length: 1000 }, (_, i) => (
42 | <>
43 |
44 | This is an SHOULDBEINCLUDEDONLYIFISOLATED user message number{" "}
45 | {i}
46 |
47 | >
48 | ))}
49 |
50 | {Array.from({ length: 1000 }, (_, i) => (
51 | <>
52 | This is user message number {i}
53 | >
54 | ))}
55 |
56 | {Array.from({ length: 1000 }, (_, i) => (
57 | <>
58 |
59 | {i},xl,x,,
60 | {i > 100 ? "SHOULDBEINCLUDEDONLYIFNOTISOLATED" : ""}
61 |
62 | >
63 | ))}
64 |
65 | >
66 | );
67 | }
68 |
69 | it("should have isolate work", async () => {
70 | const renderedIsolated = await render(Test({ isolate: true }), {
71 | tokenLimit: 1000,
72 | tokenizer: "cl100k_base",
73 | });
74 | expect(renderedIsolated.tokenCount).toBeLessThanOrEqual(1000);
75 | expect(isPlainPrompt(renderedIsolated.prompt)).toBe(true);
76 | if (!isPlainPrompt(renderedIsolated.prompt)) return;
77 | expect(
78 | renderedIsolated.prompt.includes("SHOULDBEINCLUDEDONLYIFISOLATED")
79 | ).toBe(true);
80 | expect(
81 | renderedIsolated.prompt.includes("SHOULDBEINCLUDEDONLYIFNOTISOLATED")
82 | ).toBe(false);
83 |
84 | const renderedUnIsolated = await render(Test({ isolate: false }), {
85 | tokenLimit: 1000,
86 | tokenizer: "cl100k_base",
87 | });
88 | expect(renderedUnIsolated.tokenCount).toBeLessThanOrEqual(1000);
89 | expect(isPlainPrompt(renderedUnIsolated.prompt)).toBe(true);
90 | if (!isPlainPrompt(renderedUnIsolated.prompt)) return;
91 | expect(
92 | renderedUnIsolated.prompt.includes("SHOULDBEINCLUDEDONLYIFISOLATED")
93 | ).toBe(false);
94 | expect(
95 | renderedUnIsolated.prompt.includes("SHOULDBEINCLUDEDONLYIFNOTISOLATED")
96 | ).toBe(true);
97 | });
98 |
99 | function SimplePrompt(
100 | props: PromptProps<{ breaktoken: boolean }>
101 | ): PromptElement {
102 | return (
103 | <>
104 | This is the start of the p{props.breaktoken ? : <>>}
105 | rompt. This is the second part of the prompt.
106 | >
107 | );
108 | }
109 |
110 | it("promptToTokens should work", async () => {
111 | const donotbreak = await render(SimplePrompt({ breaktoken: false }), {
112 | tokenLimit: 1000,
113 | tokenizer: "cl100k_base",
114 | });
115 | const toTokens = await promptToTokens(donotbreak.prompt, "cl100k_base");
116 | expect(donotbreak.tokenCount).toBe(toTokens.length);
117 | expect(toTokens).toStrictEqual([
118 | 2028, 374, 279, 1212, 315, 279, 10137, 13, 1115, 374, 279, 2132, 961, 315,
119 | 279, 10137, 13,
120 | ]);
121 |
122 | const dobreak = await render(SimplePrompt({ breaktoken: true }), {
123 | tokenLimit: 1000,
124 | tokenizer: "cl100k_base",
125 | });
126 | expect(dobreak.tokenCount).toBe(donotbreak.tokenCount + 1);
127 | const toTokens2 = await promptToTokens(dobreak.prompt, "cl100k_base");
128 | expect(dobreak.tokenCount).toBe(toTokens2.length);
129 | expect(toTokens2).toStrictEqual([
130 | 2028, 374, 279, 1212, 315, 279, 281, 15091, 13, 1115, 374, 279, 2132, 961,
131 | 315, 279, 10137, 13,
132 | ]);
133 | });
134 |
135 | function SimpleMessagePrompt(
136 | props: PromptProps<{ breaktoken: boolean }>
137 | ): PromptElement {
138 | return (
139 | <>
140 |
141 | This is the start of the prompt.
142 |
143 | {props.breaktoken ? : <>>}
144 |
145 | This is the second part of the prompt.
146 |
147 | hi!
148 | >
149 | );
150 | }
151 |
152 | it("promptToTokens should work", async () => {
153 | const donotbreak = await render(
154 | SimpleMessagePrompt({ breaktoken: false }),
155 | {
156 | tokenLimit: 1000,
157 | tokenizer: "cl100k_base",
158 | lastMessageIsIncomplete: true,
159 | }
160 | );
161 | const toTokens = await promptToTokens(donotbreak.prompt, "cl100k_base");
162 | expect(donotbreak.tokenCount).toBe(toTokens.length);
163 | expect(toTokens).toStrictEqual([
164 | 100264, 9125, 100266, 2028, 374, 279, 1212, 315, 279, 10137, 382, 2028,
165 | 374, 279, 2132, 961, 315, 279, 10137, 13, 100265, 100264, 882, 100266,
166 | 6151, 0,
167 | ]);
168 |
169 | const dobreak = await render(SimpleMessagePrompt({ breaktoken: true }), {
170 | tokenLimit: 1000,
171 | tokenizer: "cl100k_base",
172 | lastMessageIsIncomplete: true,
173 | });
174 | expect(dobreak.tokenCount).toBe(donotbreak.tokenCount + 1);
175 | const toTokens2 = await promptToTokens(dobreak.prompt, "cl100k_base");
176 | expect(dobreak.tokenCount).toBe(toTokens2.length);
177 | expect(toTokens2).toStrictEqual([
178 | 100264, 9125, 100266, 2028, 374, 279, 1212, 315, 279, 10137, 627, 198,
179 | 2028, 374, 279, 2132, 961, 315, 279, 10137, 13, 100265, 100264, 882,
180 | 100266, 6151, 0,
181 | ]);
182 | });
183 |
184 | function SpecialTokensPrompt(): PromptElement {
185 | return (
186 | <>
187 | {"<|im_start|>"}
188 | {"<|diff_marker|>"}
189 | {"<|endoftext|>"}
190 | >
191 | );
192 | }
193 |
194 | it("handle special tokens", async () => {
195 | const specialTokens = await render(SpecialTokensPrompt(), {
196 | tokenLimit: 1000,
197 | tokenizer: "cl100k_base",
198 | lastMessageIsIncomplete: true,
199 | });
200 | expect(specialTokens.tokenCount).toBeGreaterThanOrEqual(24);
201 | const toTokens = await promptToTokens(specialTokens.prompt, "cl100k_base");
202 | expect(specialTokens.tokenCount).toBe(toTokens.length);
203 | });
204 | });
205 |
206 | describe("config", () => {
207 | function TestConfig(
208 | props: PromptProps<{ numConfigs: number }>
209 | ): PromptElement {
210 | return (
211 | <>
212 | This is the start of the prompt.
213 |
214 |
215 | >
216 | );
217 | }
218 |
219 | it("should have config work", async () => {
220 | const rendered = await render(TestConfig({ numConfigs: 1 }), {
221 | tokenLimit: 1000,
222 | tokenizer: "cl100k_base",
223 | });
224 | expect(rendered.tokenCount).toBeLessThanOrEqual(1000);
225 | expect(isPlainPrompt(rendered.prompt)).toBe(true);
226 | expect(rendered.config.stop).toBe("\n");
227 | expect(rendered.config.maxResponseTokens).toBe("tokensReserved");
228 | });
229 | });
230 |
--------------------------------------------------------------------------------
/examples/src/index.ts:
--------------------------------------------------------------------------------
1 | import { promptToOpenAIChatMessages, promptToOpenAIChatRequest, render, renderun } from '@anysphere/priompt';
2 | import { handlePriomptPreview } from './priompt-preview-handlers';
3 | import { ArvidStory, ExamplePrompt, SimplePrompt } from './prompt';
4 | import fastifyCors from "@fastify/cors";
5 | import Fastify, { FastifyError, FastifyLoggerOptions, FastifyReply, FastifyRequest, RawServerDefault, RouteGenericInterface } from "fastify";
6 | import { OpenAI as OpenAIV4 } from 'openai-v4';
7 | import { FunctionCallingPrompt, SimpleFunction } from './function-calling-prompt';
8 | import { ChatCompletionResponseMessage, Configuration, CreateChatCompletionRequest, OpenAIApi } from 'openai';
9 |
10 | const portString = process.env.SERVER_PORT;
11 | if (portString === undefined || Number.isNaN(parseInt(portString))) {
12 | throw new Error("SERVER_PORT is undefined. Please run the ./init.sh script to create a .env file.");
13 | }
14 | const port = parseInt(portString);
15 |
16 | const S = Fastify();
17 |
18 | if (process.env.OPENAI_API_KEY === undefined || process.env.OPENAI_API_KEY === "" || process.env.OPENAI_API_KEY === "sk-your-openai-secret-key") {
19 | throw new Error("OPENAI_API_KEY is undefined. Please run the ./init.sh script to create a .env file, and then insert your API key in the .env file.");
20 | }
21 |
22 | const openaiV4 = new OpenAIV4({
23 | apiKey: process.env.OPENAI_API_KEY,
24 | });
25 | const configuration = new Configuration({
26 | apiKey: process.env.OPENAI_API_KEY,
27 | });
28 | const openai = new OpenAIApi(configuration);
29 |
30 | function messageAdapter(old: ChatCompletionResponseMessage[]): OpenAIV4.Chat.CompletionCreateParams.CreateChatCompletionRequestNonStreaming.Message[] {
31 | return old as OpenAIV4.Chat.CompletionCreateParams.CreateChatCompletionRequestNonStreaming.Message[];
32 | }
33 | function messageAdapterReverse(n: OpenAIV4.Chat.CompletionCreateParams.CreateChatCompletionRequestNonStreaming.Message): ChatCompletionResponseMessage {
34 | return n as ChatCompletionResponseMessage;
35 | }
36 | function requestAdapter(old: CreateChatCompletionRequest): OpenAIV4.Chat.CompletionCreateParams.CreateChatCompletionRequestNonStreaming {
37 | return old as OpenAIV4.Chat.CompletionCreateParams.CreateChatCompletionRequestNonStreaming;
38 | }
39 |
40 | async function main() {
41 |
42 | if (process.env.NODE_ENV === "development") {
43 | await handlePriomptPreview(S);
44 | }
45 |
46 | await S.register(fastifyCors, {
47 | origin: [
48 | `http://localhost:${process.env.PRIOMPT_PREVIEW_PORT}`
49 | ],
50 | });
51 |
52 | // here we can add any other routes we want! this can be good for testing stuff
53 | S.get("/", (_, reply) => {
54 | return reply.type("text/plain").send(`Welcome to Priompt examples.`);
55 | });
56 | S.get("/message", async (request, reply) => {
57 | const query = request.query as { message: string; name: string };
58 | if (query.message === undefined || query.name === undefined) {
59 | return reply.status(400).send("Bad request; message and name are required.");
60 | }
61 | const message = query.message as string;
62 | const name = query.name as string;
63 | const prompt = ExamplePrompt({ message, name }, { dump: process.env.NODE_ENV === "development" });
64 | const output = await render(prompt, {
65 | model: "gpt-3.5-turbo"
66 | });
67 |
68 | const requestConfig: CreateChatCompletionRequest = {
69 | model: "gpt-3.5-turbo",
70 | messages: promptToOpenAIChatMessages(output.prompt),
71 | };
72 |
73 | try {
74 | const openaiResult = await openai.createChatCompletion(requestConfig);
75 |
76 | const openaiOutput = openaiResult.data.choices[0].message;
77 |
78 | return reply.type("text/plain").send(openaiOutput?.content);
79 | } catch (error) {
80 | console.error(error);
81 | return reply.status(500).send("Internal server error.");
82 | }
83 | });
84 | S.get("/database", async (request, reply) => {
85 | const query = request.query as { message: string; confuse: string | undefined; };
86 | if (query.message === undefined) {
87 | return reply.status(400).send("Bad request; message is required.");
88 | }
89 | const message = query.message as string;
90 | const prompt = FunctionCallingPrompt({ message, includeFunctions: ["insert_sql_row", "update_sql_row"], causeConfusion: query.confuse === "true" }, { dump: process.env.NODE_ENV === "development" });
91 | const output = await render(prompt, {
92 | model: "gpt-3.5-turbo"
93 | });
94 |
95 | console.log(JSON.stringify(output.prompt, null, 2));
96 |
97 | const requestConfig: CreateChatCompletionRequest = {
98 | ...promptToOpenAIChatRequest(output.prompt),
99 | model: "gpt-3.5-turbo-0613",
100 | };
101 |
102 | // make this print all nested values in node
103 | console.log(JSON.stringify(requestConfig, null, 2));
104 |
105 | try {
106 | const openaiResult = await openai.createChatCompletion(requestConfig);
107 |
108 | const openaiOutput = openaiResult.data.choices[0];
109 |
110 | return reply.type("text/json").send(JSON.stringify(openaiOutput));
111 | } catch (error) {
112 | console.error(error);
113 | return reply.status(500).send("Internal server error.");
114 | }
115 | });
116 |
117 | S.get("/simple", async (request, reply) => {
118 | const query = request.query as { language: string; };
119 | if (query.language === undefined) {
120 | return reply.status(400).send("Bad request; language is required.");
121 | }
122 | const language = query.language as string;
123 | const text = "Cursor är den bästa plattformen för att skriva kod.";
124 | try {
125 | const answer = await renderun({
126 | prompt: SimplePrompt,
127 | props: { text, language },
128 | renderOptions: {
129 | model: "gpt-3.5-turbo",
130 | },
131 | modelCall: async (x) => { return { type: "output", value: (await openai.createChatCompletion({ ...x, model: "gpt-3.5-turbo" })).data } }
132 | });
133 | return reply.type("text/plain").send(JSON.stringify({ answer }));
134 | } catch (error) {
135 | console.error(error);
136 | return reply.status(500).send("Internal server error.");
137 | }
138 | });
139 |
140 | S.get("/arvidstory", async (request, reply) => {
141 | try {
142 | const answer = await renderun({
143 | prompt: ArvidStory,
144 | props: {},
145 | renderOptions: {
146 | model: "gpt-3.5-turbo",
147 | },
148 | modelCall: async (x) => {
149 | const y = await openaiV4.chat.completions.create({ ...requestAdapter({ ...x, model: "gpt-3.5-turbo" }), stream: true });
150 | return {
151 | type: "stream",
152 | value: (async function* () {
153 | for await (const message of y) {
154 | // eslint-disable-next-line @typescript-eslint/no-explicit-any
155 | yield messageAdapterReverse(message.choices[0].delta as any);
156 | }
157 | })()
158 | }
159 | }
160 | });
161 | let s = "";
162 | for await (const part of answer) {
163 | s += part;
164 | console.log(part);
165 | }
166 | return reply.type("text/plain").send(JSON.stringify({ answer: s }));
167 | } catch (error) {
168 | console.error(error);
169 | return reply.status(500).send("Internal server error.");
170 | }
171 | });
172 |
173 | S.get("/fixcode", async (request, reply) => {
174 | const query = request.query as { type: string; };
175 | let code, error: string;
176 | if (query.type === undefined || query.type !== 'code') {
177 | code = "function x() {\n\treturn z.object({\n\t\ta: z.string(),\n\t\tb: z.number(),\n\t});\n}";
178 | error = "'z' is not defined";
179 | } else {
180 | code = "function x() {\n\treturn z.object({\n\t\ta: z.string(),\n\t\tb: z.umber(),\n\t});\n}";
181 | error = "'umber' is not defined";
182 | }
183 | try {
184 | const action = await renderun({
185 | prompt: SimpleFunction,
186 | props: { code, error },
187 | renderOptions: {
188 | model: "gpt-4",
189 | },
190 | modelCall: async (x) => { return { type: 'output', value: (await openai.createChatCompletion({ ...x, model: "gpt-4" })).data } }
191 | });
192 | return reply.type("text/plain").send(JSON.stringify(action));
193 | } catch (error) {
194 | console.error(error);
195 | return reply.status(500).send("Internal server error.");
196 | }
197 | });
198 |
199 | await S.listen({ host: "0.0.0.0", port });
200 |
201 | console.log(`Server listening on port ${port}.`);
202 | }
203 |
204 | void main();
--------------------------------------------------------------------------------
/priompt/src/openai.ts:
--------------------------------------------------------------------------------
1 | import {
2 | ChatCompletionResponseMessage,
3 | CreateChatCompletionResponseChoicesInner,
4 | CreateChatCompletionResponse,
5 | ChatCompletionRequestMessageRoleEnum,
6 | ChatCompletionRequestMessageFunctionCall,
7 | ChatCompletionFunctions,
8 | CreateChatCompletionRequestFunctionCall,
9 | CreateChatCompletionRequestStop
10 | } from 'openai';
11 |
12 | export {
13 | CreateChatCompletionResponse,
14 | ChatCompletionResponseMessage,
15 | ChatCompletionFunctions,
16 | // Setup
17 | OpenAIApi,
18 | Configuration,
19 | // Embeddings
20 | CreateEmbeddingRequest,
21 | CreateEmbeddingResponse,
22 | CreateEmbeddingResponseDataInner,
23 | // Completions
24 | CreateCompletionRequest,
25 | CreateCompletionResponse,
26 | CreateCompletionRequestPrompt,
27 | ChatCompletionRequestMessageRoleEnum,
28 | // Function
29 | CreateChatCompletionRequestFunctionCall,
30 | ChatCompletionRequestMessageFunctionCall,
31 | // Misc
32 | CreateCompletionResponseChoicesInnerLogprobs
33 | } from 'openai';
34 |
35 | // tokenizers
36 | export const CL100K_BASE = 'cl100k_base';
37 | export const R50K_BASE = 'r50k_base';
38 | export const P50K_BASE = 'p50k_base';
39 | export const GPT2_TOKENIZER = 'gpt2';
40 |
41 | export const usableTokenizers = [
42 | CL100K_BASE,
43 | 'cl100k_base_special_tokens',
44 | R50K_BASE,
45 | P50K_BASE,
46 | GPT2_TOKENIZER
47 | ] as const;
48 |
49 | export type UsableTokenizer = typeof usableTokenizers[number];
50 |
51 | const encoder = new TextEncoder();
52 | export function approximateTokensUsingBytecount(text: string, tokenizer: UsableTokenizer): number {
53 | const byteLength = encoder.encode(text).length;
54 | switch (tokenizer) {
55 | case 'cl100k_base':
56 | return byteLength / 4;
57 | default:
58 | return byteLength / 3;
59 | }
60 | }
61 |
62 | // docs here: https://platform.openai.com/docs/guides/chat/introduction (out of date!)
63 | // linear factor is <|im_start|>system<|im_sep|> and <|im_end|>
64 | export const CHATML_PROMPT_EXTRA_TOKEN_COUNT_LINEAR_FACTOR = 4;
65 | // this is <|im_start|>assistant<|im_sep|>
66 | export const CHATML_PROMPT_EXTRA_TOKEN_COUNT_CONSTANT = 3;
67 |
68 | export type Content = {
69 | type: 'text';
70 | text: string;
71 | } | {
72 | type: 'image';
73 | image_url: {
74 | url: string,
75 | detail?: 'low' | 'high' | 'auto'
76 | // Temporary addition by Aman needed for token calculation
77 | dimensions: {
78 | width: number;
79 | height: number;
80 | }
81 | },
82 | }
83 |
84 |
85 | export interface ChatCompletionRequestMessage {
86 | /**
87 | * The role of the messages author. One of `system`, `user`, `assistant`, or `function`.
88 | * @type {string}
89 | * @memberof ChatCompletionRequestMessage
90 | */
91 | 'role': ChatCompletionRequestMessageRoleEnum;
92 | /**
93 | * The contents of the message. `content` is required for all messages except assistant messages with function calls.
94 | * @type {string}
95 | * @memberof ChatCompletionRequestMessage
96 | */
97 | 'content'?: string | Content[];
98 | /**
99 | * The name of the author of this message. `name` is required if role is `function`, and it should be the name of the function whose response is in the `content`. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.
100 | * @type {string}
101 | * @memberof ChatCompletionRequestMessage
102 | */
103 | 'name'?: string;
104 | /**
105 | *
106 | * @type {ChatCompletionRequestMessageFunctionCall}
107 | * @memberof ChatCompletionRequestMessage
108 | */
109 | 'function_call'?: ChatCompletionRequestMessageFunctionCall;
110 | }
111 |
112 | export interface CreateChatCompletionRequest {
113 | /**
114 | * ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.
115 | * @type {string}
116 | * @memberof CreateChatCompletionRequest
117 | */
118 | 'model': string;
119 | /**
120 | * A list of messages comprising the conversation so far. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb).
121 | * @type {Array}
122 | * @memberof CreateChatCompletionRequest
123 | */
124 | 'messages': Array;
125 | /**
126 | * A list of functions the model may generate JSON inputs for.
127 | * @type {Array}
128 | * @memberof CreateChatCompletionRequest
129 | */
130 | 'functions'?: Array;
131 | /**
132 | *
133 | * @type {CreateChatCompletionRequestFunctionCall}
134 | * @memberof CreateChatCompletionRequest
135 | */
136 | 'function_call'?: CreateChatCompletionRequestFunctionCall;
137 | /**
138 | * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
139 | * @type {number}
140 | * @memberof CreateChatCompletionRequest
141 | */
142 | 'temperature'?: number | null;
143 | /**
144 | * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
145 | * @type {number}
146 | * @memberof CreateChatCompletionRequest
147 | */
148 | 'top_p'?: number | null;
149 | /**
150 | * How many chat completion choices to generate for each input message.
151 | * @type {number}
152 | * @memberof CreateChatCompletionRequest
153 | */
154 | 'n'?: number | null;
155 | /**
156 | * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb).
157 | * @type {boolean}
158 | * @memberof CreateChatCompletionRequest
159 | */
160 | 'stream'?: boolean | null;
161 | /**
162 | *
163 | * @type {CreateChatCompletionRequestStop}
164 | * @memberof CreateChatCompletionRequest
165 | */
166 | 'stop'?: CreateChatCompletionRequestStop;
167 | /**
168 | * The maximum number of [tokens](/tokenizer) to generate in the chat completion. The total length of input tokens and generated tokens is limited by the model\'s context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
169 | * @type {number}
170 | * @memberof CreateChatCompletionRequest
171 | */
172 | 'max_tokens'?: number;
173 | /**
174 | * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
175 | * @type {number}
176 | * @memberof CreateChatCompletionRequest
177 | */
178 | 'presence_penalty'?: number | null;
179 | /**
180 | * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
181 | * @type {number}
182 | * @memberof CreateChatCompletionRequest
183 | */
184 | 'frequency_penalty'?: number | null;
185 | /**
186 | * Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
187 | * @type {object}
188 | * @memberof CreateChatCompletionRequest
189 | */
190 | 'logit_bias'?: object | null;
191 | /**
192 | * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
193 | * @type {string}
194 | * @memberof CreateChatCompletionRequest
195 | */
196 | 'user'?: string;
197 | }
198 |
199 | export interface StreamChatCompletionResponse extends CreateChatCompletionResponse {
200 | /**
201 | *
202 | * @type {Array}
203 | * @memberof StreamChatCompletionResponse
204 | */
205 | 'choices': Array;
206 | }
207 |
208 | interface StreamChatCompletionResponseChoicesInner extends CreateChatCompletionResponseChoicesInner {
209 | delta?: ChatCompletionResponseMessage;
210 | }
211 |
--------------------------------------------------------------------------------
/tiktoken-node/index.js:
--------------------------------------------------------------------------------
1 | /* tslint:disable */
2 | /* eslint-disable */
3 | /* prettier-ignore */
4 |
5 | /* auto-generated by NAPI-RS */
6 |
7 | const { existsSync, readFileSync } = require('fs')
8 | const { join } = require('path')
9 |
10 | const { platform, arch } = process
11 |
12 | let nativeBinding = null
13 | let localFileExisted = false
14 | let loadError = null
15 |
16 | function isMusl() {
17 | // For Node 10
18 | if (!process.report || typeof process.report.getReport !== 'function') {
19 | try {
20 | const lddPath = require('child_process').execSync('which ldd').toString().trim()
21 | return readFileSync(lddPath, 'utf8').includes('musl')
22 | } catch (e) {
23 | return true
24 | }
25 | } else {
26 | const { glibcVersionRuntime } = process.report.getReport().header
27 | return !glibcVersionRuntime
28 | }
29 | }
30 |
31 | switch (platform) {
32 | case 'android':
33 | switch (arch) {
34 | case 'arm64':
35 | localFileExisted = existsSync(join(__dirname, 'tiktoken-node.android-arm64.node'))
36 | try {
37 | if (localFileExisted) {
38 | nativeBinding = require('./tiktoken-node.android-arm64.node')
39 | } else {
40 | nativeBinding = require('@anysphere/tiktoken-node-android-arm64')
41 | }
42 | } catch (e) {
43 | loadError = e
44 | }
45 | break
46 | case 'arm':
47 | localFileExisted = existsSync(join(__dirname, 'tiktoken-node.android-arm-eabi.node'))
48 | try {
49 | if (localFileExisted) {
50 | nativeBinding = require('./tiktoken-node.android-arm-eabi.node')
51 | } else {
52 | nativeBinding = require('@anysphere/tiktoken-node-android-arm-eabi')
53 | }
54 | } catch (e) {
55 | loadError = e
56 | }
57 | break
58 | default:
59 | throw new Error(`Unsupported architecture on Android ${arch}`)
60 | }
61 | break
62 | case 'win32':
63 | switch (arch) {
64 | case 'x64':
65 | localFileExisted = existsSync(
66 | join(__dirname, 'tiktoken-node.win32-x64-msvc.node')
67 | )
68 | try {
69 | if (localFileExisted) {
70 | nativeBinding = require('./tiktoken-node.win32-x64-msvc.node')
71 | } else {
72 | nativeBinding = require('@anysphere/tiktoken-node-win32-x64-msvc')
73 | }
74 | } catch (e) {
75 | loadError = e
76 | }
77 | break
78 | case 'ia32':
79 | localFileExisted = existsSync(
80 | join(__dirname, 'tiktoken-node.win32-ia32-msvc.node')
81 | )
82 | try {
83 | if (localFileExisted) {
84 | nativeBinding = require('./tiktoken-node.win32-ia32-msvc.node')
85 | } else {
86 | nativeBinding = require('@anysphere/tiktoken-node-win32-ia32-msvc')
87 | }
88 | } catch (e) {
89 | loadError = e
90 | }
91 | break
92 | case 'arm64':
93 | localFileExisted = existsSync(
94 | join(__dirname, 'tiktoken-node.win32-arm64-msvc.node')
95 | )
96 | try {
97 | if (localFileExisted) {
98 | nativeBinding = require('./tiktoken-node.win32-arm64-msvc.node')
99 | } else {
100 | nativeBinding = require('@anysphere/tiktoken-node-win32-arm64-msvc')
101 | }
102 | } catch (e) {
103 | loadError = e
104 | }
105 | break
106 | default:
107 | throw new Error(`Unsupported architecture on Windows: ${arch}`)
108 | }
109 | break
110 | case 'darwin':
111 | localFileExisted = existsSync(join(__dirname, 'tiktoken-node.darwin-universal.node'))
112 | try {
113 | if (localFileExisted) {
114 | nativeBinding = require('./tiktoken-node.darwin-universal.node')
115 | } else {
116 | nativeBinding = require('@anysphere/tiktoken-node-darwin-universal')
117 | }
118 | break
119 | } catch {}
120 | switch (arch) {
121 | case 'x64':
122 | localFileExisted = existsSync(join(__dirname, 'tiktoken-node.darwin-x64.node'))
123 | try {
124 | if (localFileExisted) {
125 | nativeBinding = require('./tiktoken-node.darwin-x64.node')
126 | } else {
127 | nativeBinding = require('@anysphere/tiktoken-node-darwin-x64')
128 | }
129 | } catch (e) {
130 | loadError = e
131 | }
132 | break
133 | case 'arm64':
134 | localFileExisted = existsSync(
135 | join(__dirname, 'tiktoken-node.darwin-arm64.node')
136 | )
137 | try {
138 | if (localFileExisted) {
139 | nativeBinding = require('./tiktoken-node.darwin-arm64.node')
140 | } else {
141 | nativeBinding = require('@anysphere/tiktoken-node-darwin-arm64')
142 | }
143 | } catch (e) {
144 | loadError = e
145 | }
146 | break
147 | default:
148 | throw new Error(`Unsupported architecture on macOS: ${arch}`)
149 | }
150 | break
151 | case 'freebsd':
152 | if (arch !== 'x64') {
153 | throw new Error(`Unsupported architecture on FreeBSD: ${arch}`)
154 | }
155 | localFileExisted = existsSync(join(__dirname, 'tiktoken-node.freebsd-x64.node'))
156 | try {
157 | if (localFileExisted) {
158 | nativeBinding = require('./tiktoken-node.freebsd-x64.node')
159 | } else {
160 | nativeBinding = require('@anysphere/tiktoken-node-freebsd-x64')
161 | }
162 | } catch (e) {
163 | loadError = e
164 | }
165 | break
166 | case 'linux':
167 | switch (arch) {
168 | case 'x64':
169 | if (isMusl()) {
170 | localFileExisted = existsSync(
171 | join(__dirname, 'tiktoken-node.linux-x64-musl.node')
172 | )
173 | try {
174 | if (localFileExisted) {
175 | nativeBinding = require('./tiktoken-node.linux-x64-musl.node')
176 | } else {
177 | nativeBinding = require('@anysphere/tiktoken-node-linux-x64-musl')
178 | }
179 | } catch (e) {
180 | loadError = e
181 | }
182 | } else {
183 | localFileExisted = existsSync(
184 | join(__dirname, 'tiktoken-node.linux-x64-gnu.node')
185 | )
186 | try {
187 | if (localFileExisted) {
188 | nativeBinding = require('./tiktoken-node.linux-x64-gnu.node')
189 | } else {
190 | nativeBinding = require('@anysphere/tiktoken-node-linux-x64-gnu')
191 | }
192 | } catch (e) {
193 | loadError = e
194 | }
195 | }
196 | break
197 | case 'arm64':
198 | if (isMusl()) {
199 | localFileExisted = existsSync(
200 | join(__dirname, 'tiktoken-node.linux-arm64-musl.node')
201 | )
202 | try {
203 | if (localFileExisted) {
204 | nativeBinding = require('./tiktoken-node.linux-arm64-musl.node')
205 | } else {
206 | nativeBinding = require('@anysphere/tiktoken-node-linux-arm64-musl')
207 | }
208 | } catch (e) {
209 | loadError = e
210 | }
211 | } else {
212 | localFileExisted = existsSync(
213 | join(__dirname, 'tiktoken-node.linux-arm64-gnu.node')
214 | )
215 | try {
216 | if (localFileExisted) {
217 | nativeBinding = require('./tiktoken-node.linux-arm64-gnu.node')
218 | } else {
219 | nativeBinding = require('@anysphere/tiktoken-node-linux-arm64-gnu')
220 | }
221 | } catch (e) {
222 | loadError = e
223 | }
224 | }
225 | break
226 | case 'arm':
227 | localFileExisted = existsSync(
228 | join(__dirname, 'tiktoken-node.linux-arm-gnueabihf.node')
229 | )
230 | try {
231 | if (localFileExisted) {
232 | nativeBinding = require('./tiktoken-node.linux-arm-gnueabihf.node')
233 | } else {
234 | nativeBinding = require('@anysphere/tiktoken-node-linux-arm-gnueabihf')
235 | }
236 | } catch (e) {
237 | loadError = e
238 | }
239 | break
240 | default:
241 | throw new Error(`Unsupported architecture on Linux: ${arch}`)
242 | }
243 | break
244 | default:
245 | throw new Error(`Unsupported OS: ${platform}, architecture: ${arch}`)
246 | }
247 |
248 | if (!nativeBinding) {
249 | if (loadError) {
250 | throw loadError
251 | }
252 | throw new Error(`Failed to load native binding`)
253 | }
254 |
255 | const { SupportedEncoding, Tokenizer, SpecialTokenAction, SyncTokenizer, getTokenizer } = nativeBinding
256 |
257 | module.exports.SupportedEncoding = SupportedEncoding
258 | module.exports.Tokenizer = Tokenizer
259 | module.exports.SpecialTokenAction = SpecialTokenAction
260 | module.exports.SyncTokenizer = SyncTokenizer
261 | module.exports.getTokenizer = getTokenizer
262 |
--------------------------------------------------------------------------------
/priompt/src/types.d.ts:
--------------------------------------------------------------------------------
1 |
2 | // First picks out the first child (in order) that is prioritized enough
3 |
4 | import { JSONSchema7 } from 'json-schema';
5 | import { ChatCompletionResponseMessage } from 'openai';
6 | import { UsableTokenizer } from './openai';
7 |
8 | export type FunctionBody = {
9 | name: string;
10 | description: string;
11 | parameters: JSONSchema7;
12 | }
13 |
14 | // It is a REQUIREMENT that the children have decreasing token counts
15 | export type First = {
16 | type: 'first';
17 | children: Scope[];
18 | onEject?: () => void;
19 | onInclude?: () => void;
20 | };
21 |
22 | export type Empty = {
23 | type: 'empty';
24 | tokenCount: number;
25 | };
26 |
27 | export type BreakToken = {
28 | type: 'breaktoken';
29 | };
30 |
31 | export type Capture = {
32 | type: 'capture';
33 | } & CaptureProps;
34 |
35 | export type Config = {
36 | type: 'config';
37 | } & ConfigProps;
38 |
39 | export type ConfigProps = {
40 | maxResponseTokens?: number | "tokensReserved" | "tokensRemaining";
41 | // at most 4 of these
42 | stop?: string | string[];
43 | }
44 |
45 | export type Isolate = {
46 | type: 'isolate';
47 | children: Node[];
48 | cachedRenderOutput?: RenderOutput;
49 | } & IsolateProps;
50 |
51 | export type ChatImage = {
52 | type: 'image';
53 | } & ImageProps;
54 |
55 | // TODO: make the Capture work for other kinds of completions that aren't chat and aren't openai
56 | export type CaptureProps = {
57 | onOutput?: OutputHandler;
58 | onStream?: OutputHandler>;
59 | }
60 |
61 | export type IsolateProps = {
62 | tokenLimit: number;
63 | }
64 |
65 | export type ImageProps = {
66 | bytes: Uint8Array;
67 | detail: 'low' | 'high' | 'auto';
68 | dimensions: {
69 | width: number;
70 | height: number;
71 | };
72 |
73 | }
74 |
75 | // the scope will exist iff the final priority is lower than the priority here
76 | // it shouldn't be the case that both the relative priority and the absolute priority is set
77 | export type Scope = {
78 | type: 'scope';
79 | children: Node[];
80 | // absolute priority takes precedence over relative priority
81 | absolutePriority: number | undefined;
82 | // relativePriority is relative to the parent of this scope
83 | // it should always be negative (or else it will not be displayed)
84 | relativePriority: number | undefined;
85 | onEject?: () => void;
86 | onInclude?: () => void;
87 | };
88 |
89 | export type ChatUserSystemMessage = {
90 | type: 'chat';
91 | role: 'user' | 'system';
92 | name?: string;
93 | to?: string;
94 | children: Node[];
95 | }
96 |
97 | export type ChatAssistantMessage = {
98 | type: 'chat';
99 | role: 'assistant';
100 | to?: string;
101 | children: Node[]; // can be empty!
102 |
103 | // the functionCall is provided by the assistant
104 | functionCall?: {
105 | name: string;
106 | arguments: string; // json string
107 | };
108 | }
109 |
110 | export type ChatFunctionResultMessage = {
111 | type: 'chat';
112 | role: 'function';
113 | name: string;
114 | to?: string;
115 | children: Node[];
116 | }
117 |
118 | export type ChatToolResultMessage = {
119 | type: 'chat';
120 | role: 'tool';
121 | name: string;
122 | to?: string;
123 | children: Node[];
124 | }
125 |
126 | export type ChatMessage = ChatUserSystemMessage | ChatFunctionResultMessage | ChatToolResultMessage | ChatAssistantMessage;
127 |
128 | export type FunctionDefinition = {
129 | type: 'functionDefinition';
130 | name: string;
131 | description: string;
132 | parameters: JSONSchema7;
133 | }
134 |
135 | export type Node = FunctionDefinition | BreakToken | First | Isolate | Capture | Config | Scope | Empty | ChatMessage | ChatImage | string | null | undefined | number | false;
136 |
137 | export type PromptElement = Node[] | Node;
138 |
139 | export type BaseProps = {
140 | // absolute priority takes precedence over relative priority
141 | // maximum supported priority level is 1e6
142 | p?: number;
143 | prel?: number;
144 | // TODO: add a max (token count) here. the max functions as follows:
145 | // first we optimize over the outest token count scope. if any max exceeds its token count, it is capped to the token count. once we have a global solution we seek the local solution
146 | // this works, but leads to something that may be a little bit weird: something of priority 1000 in a maxed out scope is not included while something with a priority of 0 outside the maxed out scope is included. but that's fine. i guess the whole point of the max is to break the global opptimization
147 | children?: PromptElement[] | PromptElement;
148 | onEject?: () => void;
149 | onInclude?: () => void;
150 | };
151 |
152 | export type ReturnProps = {
153 | onReturn: OutputHandler;
154 | }
155 |
156 | type BasePromptProps> = (keyof T extends never ? BaseProps : BaseProps & T);
157 | export type PromptProps, ReturnT = never> = ([ReturnT] extends [never] ? BasePromptProps : BasePromptProps & ReturnProps);
158 |
159 | export namespace JSX {
160 | interface IntrinsicElements {
161 | scope: BaseProps;
162 | br: Omit;
163 | hr: Omit;
164 | breaktoken: Omit;
165 | // automatically use a certain number of tokens (useful for leaving space for the model to give its answer)
166 | empty: BaseProps & { tokens: number; };
167 | first: Omit, 'prel'>;
168 | capture: Omit & CaptureProps;
169 | isolate: BaseProps & IsolateProps;
170 | config: Omit & ConfigProps;
171 | }
172 | type Element = PromptElement;
173 | interface ElementAttributesProperty {
174 | props: BaseProps; // specify the property name to use
175 | }
176 | }
177 |
178 | // if prompt string is a list of strings, then those strings should be tokenized independently
179 | // this prevents tokens from crossing the boundary between strings, which is useful for things when you
180 | // need exact copying
181 | export type PromptString = string | string[];
182 |
183 | export type PromptContentWrapper = {
184 | type: 'prompt_content',
185 | content: PromptString;
186 | images?: ImagePromptContent[];
187 | }
188 |
189 | export type TextPromptContent = {
190 | type: 'text',
191 | text: string
192 | }
193 | export type ImagePromptContent = {
194 | type: 'image',
195 | image_url: {
196 | url: string;
197 | detail: 'low' | 'high' | 'auto';
198 | dimensions: {
199 | width: number;
200 | height: number;
201 | }
202 | }
203 | }
204 | export type PromptContent = TextPromptContent | ImagePromptContent;
205 |
206 | export type ChatPromptSystemMessage = {
207 | role: 'system';
208 | name?: string;
209 | to?: string | undefined;
210 | content: PromptString;
211 | }
212 |
213 | export type ChatPromptUserMessage = {
214 | role: 'user';
215 | name?: string;
216 | to?: string | undefined;
217 | content: PromptString;
218 | images?: ImagePromptContent[];
219 | }
220 |
221 | export type ChatPromptAssistantMessage = {
222 | role: 'assistant';
223 | to?: string | undefined;
224 | content?: PromptString;
225 | functionCall?: {
226 | name: string;
227 | arguments: string; // json string
228 | }
229 | }
230 |
231 | export type ChatPromptFunctionResultMessage = {
232 | role: 'function';
233 | name: string;
234 | to?: string | undefined;
235 | content: PromptString;
236 | };
237 |
238 | export type ChatPromptToolResultMessage = {
239 | role: 'tool';
240 | name: string;
241 | to?: string | undefined;
242 | content: PromptString;
243 | };
244 |
245 | export type ChatPromptMessage = ChatPromptSystemMessage | ChatPromptUserMessage | ChatPromptAssistantMessage | ChatPromptFunctionResultMessage | ChatPromptToolResultMessage;
246 |
247 | export type ChatPrompt = {
248 | type: 'chat';
249 | messages: ChatPromptMessage[];
250 | }
251 |
252 | export type TextPrompt = {
253 | type: 'text';
254 | text: PromptString;
255 | }
256 |
257 | export type ChatAndFunctionPromptFunction = {
258 | name: string;
259 | description: string;
260 | parameters: JSONSchema7;
261 | }
262 |
263 | export type FunctionPrompt = {
264 | functions: ChatAndFunctionPromptFunction[];
265 | }
266 |
267 | // the p is used to specify the priority of the handler
268 | // higher priority handler will be called first in case there are multiple
269 | export type OutputHandler = (output: T, options?: { p?: number }) => Promise;
270 |
271 | export type RenderedPrompt = PromptString | ChatPrompt | (ChatPrompt & FunctionPrompt) | (TextPrompt & FunctionPrompt) | PromptContentWrapper;
272 |
273 | export type Prompt = (props: PromptProps) => (PromptElement | Promise);
274 | export type SynchronousPrompt = (props: PromptProps) => (PromptElement);
275 |
276 | // TODO: should the components have access to the token limit?
277 | // argument against: no, it should all be responsive to the token limit and we shouldn't need this
278 | // argument for: CSS has media queries because it is very hard to have something that's fully responsive without changing any of the layout
279 | // decision: wait for now, see if it is needed
280 | export type RenderOptions = {
281 | tokenLimit: number;
282 | tokenizer: UsableTokenizer;
283 | countTokensFast_UNSAFE_CAN_THROW_TOOMANYTOKENS_INCORRECTLY?: boolean;
284 |
285 | // if it is, then we need to count tokens differently
286 | lastMessageIsIncomplete?: boolean;
287 | };
288 | export type RenderOutput = {
289 | prompt: RenderedPrompt;
290 | tokenCount: number;
291 | tokenLimit: number;
292 | tokenizer: UsableTokenizer;
293 | tokensReserved: number;
294 | priorityCutoff: number;
295 | outputHandlers: OutputHandler[];
296 | streamHandlers: OutputHandler>[];
297 | config: ConfigProps;
298 | durationMs?: number;
299 | };
300 |
--------------------------------------------------------------------------------
/priompt/src/components.test.tsx:
--------------------------------------------------------------------------------
1 | import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
2 | import * as Priompt from "./index";
3 | import {
4 | isChatPrompt,
5 | isPlainPrompt,
6 | promptHasFunctions,
7 | promptToOpenAIChatMessages,
8 | render,
9 | } from "./lib";
10 | import {
11 | AssistantMessage,
12 | Function,
13 | FunctionMessage,
14 | ToolResultMessage,
15 | ImageComponent,
16 | SystemMessage,
17 | UserMessage,
18 | } from "./components";
19 | import { PromptElement, PromptProps } from "./types";
20 |
21 | describe("SystemMessage", () => {
22 | function TestSystemMessage(props: PromptProps): PromptElement {
23 | return hi this is a system message;
24 | }
25 |
26 | it("should create a chat message", async () => {
27 | const rendered = await render(TestSystemMessage({}), {
28 | tokenLimit: 1000,
29 | tokenizer: "cl100k_base",
30 | });
31 | expect(isChatPrompt(rendered.prompt)).toBe(true);
32 | });
33 |
34 | function TestSystemMessageWithName(
35 | props: PromptProps<{ systemName?: string; userName?: string }>
36 | ): PromptElement {
37 | return (
38 | <>
39 |
40 | hi this is a system message
41 |
42 |
43 | hi this is a user message
44 |
45 | hi this is a user message
46 | >
47 | );
48 | }
49 |
50 | it("should test the 'name' field of the SystemMessage", async () => {
51 | const rendered = await render(
52 | TestSystemMessageWithName({ systemName: "TestName", userName: "carl" }),
53 | {
54 | tokenLimit: 1000,
55 | tokenizer: "cl100k_base",
56 | }
57 | );
58 | expect(isChatPrompt(rendered.prompt)).toBe(true);
59 | if (!isChatPrompt(rendered.prompt)) return;
60 | expect(rendered.prompt.messages[0].role === "system").toBe(true);
61 | if (rendered.prompt.messages[0].role !== "system") return;
62 | expect(rendered.prompt.messages[0].name).toBe("TestName");
63 | expect(rendered.prompt.messages[1].role === "user").toBe(true);
64 | if (rendered.prompt.messages[1].role !== "user") return;
65 | expect(rendered.prompt.messages[1].name).toBe("carl");
66 | expect(rendered.prompt.messages[2].role === "user").toBe(true);
67 | if (rendered.prompt.messages[2].role !== "user") return;
68 | expect(rendered.prompt.messages[2].name).toBeUndefined();
69 |
70 | const openAIPrompt = promptToOpenAIChatMessages(rendered.prompt);
71 | expect(openAIPrompt[0].role === "system").toBe(true);
72 | if (openAIPrompt[0].role !== "system") return;
73 | expect(openAIPrompt[0].name).toBe("TestName");
74 | expect(openAIPrompt[1].role === "user").toBe(true);
75 | if (openAIPrompt[1].role !== "user") return;
76 | expect(openAIPrompt[1].name).toBe("carl");
77 | expect(openAIPrompt[2].role === "user").toBe(true);
78 | if (openAIPrompt[2].role !== "user") return;
79 | expect(openAIPrompt[2].name).toBeUndefined();
80 | });
81 | });
82 |
83 | describe("Function", () => {
84 | function TestFunction(props: PromptProps): PromptElement {
85 | return (
86 | <>
87 |
101 | say hi
102 | >
103 | );
104 | }
105 |
106 | it("should create a function message", async () => {
107 | const rendered = await render(TestFunction({}), {
108 | tokenLimit: 1000,
109 | tokenizer: "cl100k_base",
110 | });
111 | expect(isChatPrompt(rendered.prompt)).toBe(true);
112 | expect(promptHasFunctions(rendered.prompt)).toBe(true);
113 | if (!promptHasFunctions(rendered.prompt)) return;
114 | expect(rendered.prompt.functions).toEqual([
115 | {
116 | name: "echo",
117 | description: "Echo a message to the user.",
118 | parameters: {
119 | type: "object",
120 | properties: {
121 | message: {
122 | type: "string",
123 | description: "The message to echo.",
124 | },
125 | },
126 | required: ["message"],
127 | },
128 | },
129 | ]);
130 | });
131 | });
132 |
133 | describe("All kinds of messages", () => {
134 | function TestAllMessages(props: PromptProps): PromptElement {
135 | return (
136 | <>
137 |
151 | System message
152 | User message
153 |
159 | this is a test echo
160 | {/* // eslint-disable-next-line @typescript-eslint/ban-ts-comment
161 | // @ts-ignore */}
162 | print("Hello world!")
163 | {/* // eslint-disable-next-line @typescript-eslint/ban-ts-comment
164 | // @ts-ignore */}
165 |
166 | Hello world!
167 |
168 | >
169 | );
170 | }
171 |
172 | it("should create all kinds of messages", async () => {
173 | const rendered = await render(TestAllMessages({}), {
174 | tokenLimit: 1000,
175 | tokenizer: "cl100k_base",
176 | });
177 | expect(isChatPrompt(rendered.prompt)).toBe(true);
178 | if (!isChatPrompt(rendered.prompt)) return;
179 | expect(promptHasFunctions(rendered.prompt)).toBe(true);
180 | if (!promptHasFunctions(rendered.prompt)) return;
181 | expect(rendered.prompt.functions).toEqual([
182 | {
183 | name: "echo",
184 | description: "Echo a message to the user.",
185 | parameters: {
186 | type: "object",
187 | properties: {
188 | message: {
189 | type: "string",
190 | description: "The message to echo.",
191 | },
192 | },
193 | required: ["message"],
194 | },
195 | },
196 | ]);
197 | expect(rendered.prompt.messages).toEqual([
198 | {
199 | role: "system",
200 | content: "System message",
201 | },
202 | {
203 | role: "user",
204 | content: "User message",
205 | },
206 | {
207 | role: "assistant",
208 | functionCall: {
209 | name: "echo",
210 | arguments: '{"message": "this is a test echo"}',
211 | },
212 | },
213 | {
214 | role: "function",
215 | name: "echo",
216 | content: "this is a test echo",
217 | },
218 | {
219 | role: "assistant",
220 | to: "python",
221 | content: 'print("Hello world!")',
222 | },
223 | {
224 | role: "tool",
225 | name: "python",
226 | to: "all",
227 | content: "Hello world!",
228 | },
229 | ]);
230 | const openaiMessage = promptToOpenAIChatMessages(rendered.prompt);
231 | expect(openaiMessage.length).toBe(6);
232 | expect(openaiMessage[0].role).toBe("system");
233 | expect(openaiMessage[1].role).toBe("user");
234 | expect(openaiMessage[2].role).toBe("assistant");
235 | expect(openaiMessage[3].role).toBe("function");
236 | expect(openaiMessage[4].role).toBe("assistant");
237 | // the tool shall not be sent to openai! it's unsupported
238 | expect(openaiMessage[5].role).toBe("system");
239 | // assert none of them contain "to"
240 | expect("to" in openaiMessage[4]).toBe(false);
241 | expect("to" in openaiMessage[5]).toBe(false);
242 | });
243 | });
244 |
245 | describe("Images", () => {
246 | function TestImageMessage(props: PromptProps): PromptElement {
247 | return (
248 | <>
249 | System message
250 |
251 | {/* // eslint-disable-next-line @typescript-eslint/ban-ts-comment
252 | // @ts-ignore */}
253 |
258 |
259 | If the instructions mention this image, use it to help you write the
260 | code with the utmost precision and detail.
261 |
262 | {""}
263 |
264 | TEST, THIS IS A TEST,
265 |
266 | {""}
267 |
268 |
269 | >
270 | );
271 | }
272 |
273 | it("should create all kinds of messages", async () => {
274 | const rendered = await render(TestImageMessage({}), {
275 | tokenLimit: 1000,
276 | tokenizer: "cl100k_base",
277 | });
278 | expect(isChatPrompt(rendered.prompt)).toBe(true);
279 | if (!isChatPrompt(rendered.prompt)) return;
280 | // make sure the prompt string part is in the right order
281 | expect(rendered.prompt.messages[1].content).toBe(
282 | "\n" +
283 | "If the instructions mention this image, use it to help you write the code with the utmost precision and detail.\n" +
284 | "\n" +
285 | "TEST, THIS IS A TEST,\n" +
286 | "\n"
287 | );
288 | });
289 | });
290 |
--------------------------------------------------------------------------------
/tiktoken-node/src/lib.rs:
--------------------------------------------------------------------------------
1 | use anyhow::Context;
2 | use napi::bindgen_prelude::Error;
3 | use napi::bindgen_prelude::FromNapiValue;
4 | use napi::bindgen_prelude::ToNapiValue;
5 | use napi_derive::napi;
6 | use std::collections::HashMap;
7 |
8 | // we use the actor pattern to have good cache locality
9 | // this means that no tokenization requests will ever run in parallel, but i think that's almost certainly fine
10 | use napi::tokio::sync::{mpsc, oneshot};
11 |
12 | #[napi]
13 | pub enum SupportedEncoding {
14 | Cl100k = 0,
15 | }
16 |
17 | struct TokenizerActor {
18 | receiver: mpsc::Receiver,
19 | cl100k_encoding: tiktoken::Encoding,
20 | }
21 | enum TokenizerMessage {
22 | ExactNumTokens {
23 | respond_to: oneshot::Sender>,
24 | text: String,
25 | encoding: SupportedEncoding,
26 | special_token_handling: tiktoken::SpecialTokenHandling,
27 | },
28 | EncodeTokens {
29 | respond_to: oneshot::Sender>>,
30 | text: String,
31 | encoding: SupportedEncoding,
32 | special_token_handling: tiktoken::SpecialTokenHandling,
33 | },
34 | // always encodes all special tokens!
35 | EncodeSingleToken {
36 | respond_to: oneshot::Sender>,
37 | bytes: Vec,
38 | encoding: SupportedEncoding,
39 | },
40 | DecodeTokens {
41 | respond_to: oneshot::Sender>,
42 | tokens: Vec,
43 | encoding: SupportedEncoding,
44 | },
45 | DecodeTokenBytes {
46 | respond_to: oneshot::Sender>>,
47 | token: u32,
48 | encoding: SupportedEncoding,
49 | },
50 | ApproximateNumTokens {
51 | respond_to: oneshot::Sender>,
52 | text: String,
53 | encoding: SupportedEncoding,
54 | },
55 | }
56 |
57 | impl TokenizerActor {
58 | fn new(
59 | receiver: mpsc::Receiver,
60 | ) -> Result {
61 | let cl100k_encoding = tiktoken::EncodingFactory::cl100k_im()?;
62 | Ok(TokenizerActor { receiver, cl100k_encoding })
63 | }
64 | fn handle_message(&mut self, msg: TokenizerMessage) {
65 | match msg {
66 | TokenizerMessage::ExactNumTokens { respond_to, text, encoding, special_token_handling } => {
67 | let enc = match encoding {
68 | SupportedEncoding::Cl100k => &self.cl100k_encoding,
69 | };
70 |
71 | let tokens = enc.encode(&text, &special_token_handling).context("Error encoding string");
72 |
73 | let num_tokens = match tokens {
74 | Ok(t) => Ok(t.len() as i32),
75 | Err(e) => Err(e),
76 | };
77 |
78 | // The `let _ =` ignores any errors when sending.
79 | let _ = respond_to.send(num_tokens);
80 | }
81 | TokenizerMessage::EncodeTokens { respond_to, text, encoding, special_token_handling } => {
82 | let enc = match encoding {
83 | SupportedEncoding::Cl100k => &self.cl100k_encoding,
84 | };
85 |
86 | let tokens = enc.encode(&text, &special_token_handling).context("Error encoding string");
87 |
88 | let tokens = match tokens {
89 | Ok(t) => Ok(t.into_iter().map(|t| t as u32).collect()),
90 | Err(e) => Err(e),
91 | };
92 |
93 | // The `let _ =` ignores any errors when sending.
94 | let _ = respond_to.send(tokens);
95 | }
96 | TokenizerMessage::EncodeSingleToken { respond_to, bytes, encoding } => {
97 | let enc = match encoding {
98 | SupportedEncoding::Cl100k => &self.cl100k_encoding,
99 | };
100 |
101 | let token = enc.encode_single_token_bytes(&bytes);
102 |
103 | let token = match token {
104 | Ok(t) => Ok(t as u32),
105 | Err(_) => Err(anyhow::anyhow!("Token not recognized")),
106 | };
107 |
108 | // The `let _ =` ignores any errors when sending.
109 | let _ = respond_to.send(token);
110 | }
111 | TokenizerMessage::DecodeTokenBytes { respond_to, token, encoding } => {
112 | let enc = match encoding {
113 | SupportedEncoding::Cl100k => &self.cl100k_encoding,
114 | };
115 | let bytes = enc.decode_single_token_bytes(token as usize);
116 | let bytes = match bytes {
117 | Ok(b) => Ok(b),
118 | Err(e) => Err(anyhow::anyhow!(e)),
119 | };
120 | let _ = respond_to.send(bytes);
121 | }
122 | TokenizerMessage::DecodeTokens { respond_to, tokens, encoding } => {
123 | let enc = match encoding {
124 | SupportedEncoding::Cl100k => &self.cl100k_encoding,
125 | };
126 |
127 | let text = enc.decode(&tokens.into_iter().map(|t| t as usize).collect::>());
128 |
129 | // The `let _ =` ignores any errors when sending.
130 | let _ = respond_to.send(Ok(text));
131 | }
132 | TokenizerMessage::ApproximateNumTokens { respond_to, text, encoding } => {
133 | let enc = match encoding {
134 | SupportedEncoding::Cl100k => &self.cl100k_encoding,
135 | };
136 |
137 | let tokens = enc.estimate_num_tokens_no_special_tokens_fast(&text);
138 |
139 | // The `let _ =` ignores any errors when sending.
140 | let _ = respond_to.send(Ok(tokens as i32));
141 | }
142 | }
143 | }
144 | }
145 |
146 | fn run_tokenizer_actor(mut actor: TokenizerActor) {
147 | while let Some(msg) = actor.receiver.blocking_recv() {
148 | actor.handle_message(msg);
149 | }
150 | }
151 |
152 | #[napi]
153 | #[derive(Clone)]
154 | pub struct Tokenizer {
155 | sender: mpsc::Sender,
156 | }
157 |
158 | #[napi]
159 | pub enum SpecialTokenAction {
160 | /// The special token is forbidden. If it is included in the string, an error will be returned.
161 | Forbidden = 0,
162 | /// The special token is tokenized as normal text.
163 | NormalText = 1,
164 | /// The special token is treated as the special token it is. If this is applied to a specific text and the text is NOT a special token then an error will be returned. If it is the default action no error will be returned, don't worry.
165 | Special = 2,
166 | }
167 |
168 | impl SpecialTokenAction {
169 | pub fn to_tiktoken(&self) -> tiktoken::SpecialTokenAction {
170 | match self {
171 | SpecialTokenAction::Forbidden => tiktoken::SpecialTokenAction::Forbidden,
172 | SpecialTokenAction::NormalText => tiktoken::SpecialTokenAction::NormalText,
173 | SpecialTokenAction::Special => tiktoken::SpecialTokenAction::Special,
174 | }
175 | }
176 | }
177 |
178 | #[napi]
179 | impl Tokenizer {
180 | pub fn new() -> Result {
181 | // we allow 100 outstanding requests before we fail
182 | // ideally we should never hit this limit... queueing up would be bad
183 | let (sender, receiver) = mpsc::channel(100);
184 | let actor = TokenizerActor::new(receiver)?;
185 | napi::tokio::task::spawn_blocking(move || run_tokenizer_actor(actor));
186 |
187 | Ok(Self { sender })
188 | }
189 |
190 | #[napi]
191 | pub async fn exact_num_tokens_cl100k_no_special_tokens(
192 | &self,
193 | text: String,
194 | ) -> Result {
195 | let (send, recv) = oneshot::channel();
196 | let msg = TokenizerMessage::ExactNumTokens {
197 | respond_to: send,
198 | text,
199 | encoding: SupportedEncoding::Cl100k,
200 | special_token_handling: tiktoken::SpecialTokenHandling {
201 | // no special tokens!! everything is normal text
202 | // this is how tokenization is handled in the chat model api
203 | default: tiktoken::SpecialTokenAction::NormalText,
204 | ..Default::default()
205 | },
206 | };
207 |
208 | // Ignore send errors. If this send fails, so does the
209 | // recv.await below. There's no reason to check for the
210 | // same failure twice.
211 | let _ = self.sender.send(msg).await;
212 | match recv.await {
213 | Ok(result) => result.map_err(|e| Error::from_reason(e.to_string())),
214 | Err(e) => Err(Error::from_reason(format!("Actor task has been killed: {}", e.to_string()))),
215 | }
216 | }
217 |
218 | #[napi]
219 | pub async fn exact_num_tokens(
220 | &self,
221 | text: String,
222 | encoding: SupportedEncoding,
223 | special_token_default_action: SpecialTokenAction,
224 | special_token_overrides: HashMap,
225 | ) -> Result {
226 | let (send, recv) = oneshot::channel();
227 | let msg = TokenizerMessage::ExactNumTokens {
228 | respond_to: send,
229 | text,
230 | encoding,
231 | special_token_handling: tiktoken::SpecialTokenHandling {
232 | // no special tokens!! everything is normal text
233 | // this is how tokenization is handled in the chat model api
234 | default: special_token_default_action.to_tiktoken(),
235 | overrides: special_token_overrides.into_iter().map(|(k, v)| (k, v.to_tiktoken())).collect(),
236 | },
237 | };
238 |
239 | // Ignore send errors. If this send fails, so does the
240 | // recv.await below. There's no reason to check for the
241 | // same failure twice.
242 | let _ = self.sender.send(msg).await;
243 | match recv.await {
244 | Ok(result) => result.map_err(|e| Error::from_reason(e.to_string())),
245 | Err(e) => Err(Error::from_reason(format!("Actor task has been killed: {}", e.to_string()))),
246 | }
247 | }
248 |
249 | #[napi]
250 | pub async fn encode_cl100k_no_special_tokens(&self, text: String) -> Result, Error> {
251 | let (send, recv) = oneshot::channel();
252 | let msg = TokenizerMessage::EncodeTokens {
253 | respond_to: send,
254 | text,
255 | encoding: SupportedEncoding::Cl100k,
256 | special_token_handling: tiktoken::SpecialTokenHandling {
257 | // no special tokens!! everything is normal text
258 | // this is how tokenization is handled in the chat model api
259 | default: tiktoken::SpecialTokenAction::NormalText,
260 | ..Default::default()
261 | },
262 | };
263 |
264 | // Ignore send errors. If this send fails, so does the
265 | // recv.await below. There's no reason to check for the
266 | // same failure twice.
267 | let _ = self.sender.send(msg).await;
268 | match recv.await {
269 | Ok(result) => result.map_err(|e| Error::from_reason(e.to_string())),
270 | Err(e) => Err(Error::from_reason(format!("Actor task has been killed: {}", e.to_string()))),
271 | }
272 | }
273 |
274 | #[napi]
275 | pub async fn approx_num_tokens(
276 | &self,
277 | text: String,
278 | encoding: SupportedEncoding,
279 | ) -> Result {
280 | let (send, recv) = oneshot::channel();
281 | let msg = TokenizerMessage::ApproximateNumTokens { respond_to: send, text, encoding };
282 |
283 | // Ignore send errors. If this send fails, so does the
284 | // recv.await below. There's no reason to check for the
285 | // same failure twice.
286 | let _ = self.sender.send(msg).await;
287 | match recv.await {
288 | Ok(result) => result.map_err(|e| Error::from_reason(e.to_string())),
289 | Err(e) => Err(Error::from_reason(format!("Actor task has been killed: {}", e.to_string()))),
290 | }
291 | }
292 |
293 | #[napi]
294 | pub async fn encode(
295 | &self,
296 | text: String,
297 | encoding: SupportedEncoding,
298 | special_token_default_action: SpecialTokenAction,
299 | special_token_overrides: HashMap,
300 | ) -> Result, Error> {
301 | let (send, recv) = oneshot::channel();
302 | let msg = TokenizerMessage::EncodeTokens {
303 | respond_to: send,
304 | text,
305 | encoding,
306 | special_token_handling: tiktoken::SpecialTokenHandling {
307 | // no special tokens!! everything is normal text
308 | // this is how tokenization is handled in the chat model api
309 | default: special_token_default_action.to_tiktoken(),
310 | overrides: special_token_overrides.into_iter().map(|(k, v)| (k, v.to_tiktoken())).collect(),
311 | },
312 | };
313 |
314 | // Ignore send errors. If this send fails, so does the
315 | // recv.await below. There's no reason to check for the
316 | // same failure twice.
317 | let _ = self.sender.send(msg).await;
318 | match recv.await {
319 | Ok(result) => result.map_err(|e| Error::from_reason(e.to_string())),
320 | Err(e) => Err(Error::from_reason(format!("Actor task has been killed: {}", e.to_string()))),
321 | }
322 | }
323 |
324 | #[napi]
325 | pub async fn encode_single_token(
326 | &self,
327 | bytes: napi::bindgen_prelude::Uint8Array,
328 | encoding: SupportedEncoding,
329 | ) -> Result {
330 | let (send, recv) = oneshot::channel();
331 | let msg =
332 | TokenizerMessage::EncodeSingleToken { respond_to: send, bytes: bytes.to_vec(), encoding };
333 |
334 | // Ignore send errors. If this send fails, so does the
335 | // recv.await below. There's no reason to check for the
336 | // same failure twice.
337 | let _ = self.sender.send(msg).await;
338 | match recv.await {
339 | Ok(result) => result.map_err(|e| Error::from_reason(e.to_string())),
340 | Err(e) => Err(Error::from_reason(format!("Actor task has been killed: {}", e.to_string()))),
341 | }
342 | }
343 | #[napi]
344 | pub async fn decode_cl100k_byte(
345 | &self,
346 | token: u32,
347 | ) -> Result {
348 | let (send, recv) = oneshot::channel();
349 | let msg = TokenizerMessage::DecodeTokenBytes {
350 | respond_to: send,
351 | token,
352 | encoding: SupportedEncoding::Cl100k,
353 | };
354 |
355 | // Ignore send errors. If this send fails, so does the
356 | // recv.await below. There's no reason to check for the
357 | // same failure twice.
358 | let _ = self.sender.send(msg).await;
359 | match recv.await {
360 | Ok(result) => result
361 | .map_err(|e| napi::Error::from_reason(e.to_string()))
362 | .map(|v| napi::bindgen_prelude::Uint8Array::new(v.into())),
363 | Err(e) => Err(Error::from_reason(format!("Actor task has been killed: {}", e.to_string()))),
364 | }
365 | }
366 |
367 | #[napi]
368 | pub async fn decode_cl100k(&self, encoded_tokens: Vec) -> Result {
369 | let (send, recv) = oneshot::channel();
370 | let msg = TokenizerMessage::DecodeTokens {
371 | respond_to: send,
372 | tokens: encoded_tokens,
373 | encoding: SupportedEncoding::Cl100k,
374 | };
375 |
376 | // Ignore send errors. If this send fails, so does the
377 | // recv.await below. There's no reason to check for the
378 | // same failure twice.
379 | let _ = self.sender.send(msg).await;
380 | match recv.await {
381 | Ok(result) => result.map_err(|e| Error::from_reason(e.to_string())),
382 | Err(e) => Err(Error::from_reason(format!("Actor task has been killed: {}", e.to_string()))),
383 | }
384 | }
385 | }
386 |
387 | #[napi]
388 | pub struct SyncTokenizer {
389 | cl100k_encoding: tiktoken::Encoding,
390 | }
391 |
392 | #[napi]
393 | impl SyncTokenizer {
394 | #[napi(constructor)]
395 | pub fn new() -> Result {
396 | let cl100k_encoding = tiktoken::EncodingFactory::cl100k_im().map_err(|e| {
397 | napi::Error::from_reason(format!("Error creating tokenizer: {}", e.to_string()))
398 | })?;
399 |
400 | Ok(Self { cl100k_encoding })
401 | }
402 |
403 | #[napi]
404 | pub fn approx_num_tokens(&self, text: String, encoding: SupportedEncoding) -> Result {
405 | let enc = match encoding {
406 | SupportedEncoding::Cl100k => &self.cl100k_encoding,
407 | };
408 | Ok(enc.estimate_num_tokens_no_special_tokens_fast(&text) as i32)
409 | }
410 | }
411 |
412 | #[napi]
413 | pub fn get_tokenizer() -> Result {
414 | Tokenizer::new().map_err(|e| Error::from_reason(e.to_string()))
415 | }
416 |
417 | #[cfg(test)]
418 | mod tests {
419 | use super::*;
420 |
421 | #[tokio::test]
422 | async fn test_num_tokens() {
423 | let tokenizer = get_tokenizer().unwrap();
424 | let num_tokens = tokenizer
425 | .exact_num_tokens_cl100k_no_special_tokens("hello, world".to_string())
426 | .await
427 | .unwrap();
428 | assert_eq!(num_tokens, 3);
429 | }
430 | }
431 |
--------------------------------------------------------------------------------
/priompt/src/outputCatcher.ai.test.ts:
--------------------------------------------------------------------------------
1 |
2 | import { OutputCatcher, NewOutputCatcher } from "./outputCatcher.ai";
3 |
4 | // @cursor-agent {"dependsOn": "interface", "hash": "7034e4452cc668449b0b967116683a95303c4509d263ed535851b081164751bb"}
5 | // @cursor-agent:begin-test-plan
6 | // T01: Test that `onOutput` correctly adds an output with no priority to the list of outputs.
7 | // T02: Test that `onOutput` correctly adds an output with a priority to the list of outputs.
8 | // T03: Test that `getOutputs` returns a list of outputs in the correct order, with the highest priority first and then all the ones with no priority assigned, in the order they were added.
9 | // T04: Test that `getOutput` returns the first output in the list.
10 | // T05: Test that `getOutput` returns undefined if there are no outputs in the list.
11 | // T06: Test that `onOutput` correctly handles multiple outputs with the same priority.
12 | // T07: Test that `onOutput` correctly handles multiple outputs with different priorities.
13 | // T08: Test that `onOutput` correctly handles multiple outputs with no priority.
14 | // T09: Test that `getOutputs` correctly handles an empty list of outputs.
15 | // T10: Test that `getOutputs` correctly handles a list of outputs with only one output.
16 | // T11: Test that `getOutputs` correctly handles a list of outputs with multiple outputs with the same priority.
17 | // T12: Test that `getOutputs` correctly handles a list of outputs with multiple outputs with different priorities.
18 | // T13: Test that `getOutputs` correctly handles a list of outputs with multiple outputs with no priority.
19 | // T14: Test that `getOutput` correctly handles a list of outputs with only one output.
20 | // T15: Test that `getOutput` correctly handles a list of outputs with multiple outputs with the same priority.
21 | // T16: Test that `getOutput` correctly handles a list of outputs with multiple outputs with different priorities.
22 | // T17: Test that `getOutput` correctly handles a list of outputs with multiple outputs with no priority.
23 | // @cursor-agent:end-test-plan
24 |
25 | import { describe, it, expect } from "vitest";
26 |
27 |
28 | describe("OutputCatcher", () => {
29 |
30 | // @cursor-agent:add-tests-here
31 |
32 |
33 | // @cursor-agent:test-begin:T17
34 | // @cursor-agent {"dependsOn": "interface", "hash": "7034e4452cc668449b0b967116683a95303c4509d263ed535851b081164751bb"}
35 | // @cursor-agent {"dependsOn": "testPlan", "hash": "2fd5729af0825f1ab89a030c1f9b035ecaafc20906f0a7579240fc4a2ca69eda"}
36 | // @cursor-agent {"id": "T17"}
37 | it('should correctly handle a list of outputs with multiple outputs with no priority', async () => {
38 | const outputCatcher = NewOutputCatcher();
39 |
40 | // Add outputs with no priority
41 | await outputCatcher.onOutput('output1');
42 | await outputCatcher.onOutput('output2');
43 | await outputCatcher.onOutput('output3');
44 |
45 | // Check if the first output is correct
46 | const firstOutput = outputCatcher.getOutput();
47 | expect(firstOutput).toBe('output1');
48 |
49 | // Check if the outputs are in the correct order
50 | const outputs = outputCatcher.getOutputs();
51 | expect(outputs).toEqual(['output1', 'output2', 'output3']);
52 | });
53 | // @cursor-agent:test-end:T17
54 |
55 |
56 |
57 | // @cursor-agent:test-begin:T16
58 | // @cursor-agent {"dependsOn": "interface", "hash": "7034e4452cc668449b0b967116683a95303c4509d263ed535851b081164751bb"}
59 | // @cursor-agent {"dependsOn": "testPlan", "hash": "705d3c64a82e431d967a6202dd705d4d73d542576a464fa50114834100012adb"}
60 | // @cursor-agent {"id": "T16"}
61 | it('should correctly handle a list of outputs with multiple outputs with different priorities', async () => {
62 | const outputCatcher = NewOutputCatcher();
63 |
64 | await outputCatcher.onOutput('output1', { p: 2 });
65 | await outputCatcher.onOutput('output2', { p: 1 });
66 | await outputCatcher.onOutput('output3', { p: 3 });
67 |
68 | const firstOutput = outputCatcher.getOutput();
69 |
70 | expect(firstOutput).toBe('output3');
71 | });
72 | // @cursor-agent:test-end:T16
73 |
74 |
75 |
76 | // @cursor-agent:test-begin:T15
77 | // @cursor-agent {"dependsOn": "interface", "hash": "7034e4452cc668449b0b967116683a95303c4509d263ed535851b081164751bb"}
78 | // @cursor-agent {"dependsOn": "testPlan", "hash": "52e1cabe3ba083495b0f3585522dcaeef206f46efd571b9eb81455bf949d3551"}
79 | // @cursor-agent {"id": "T15"}
80 | it('should correctly handle a list of outputs with multiple outputs with the same priority', async () => {
81 | const outputCatcher = NewOutputCatcher();
82 |
83 | // Add outputs with the same priority
84 | await outputCatcher.onOutput('output1', { p: 1 });
85 | await outputCatcher.onOutput('output2', { p: 1 });
86 | await outputCatcher.onOutput('output3', { p: 1 });
87 |
88 | // Check if the first output is correct
89 | const firstOutput = outputCatcher.getOutput();
90 | expect(firstOutput).toBe('output1');
91 |
92 | // Check if the outputs are sorted correctly
93 | const outputs = outputCatcher.getOutputs();
94 | expect(outputs).toEqual(['output1', 'output2', 'output3']);
95 | });
96 | // @cursor-agent:test-end:T15
97 |
98 |
99 |
100 | // @cursor-agent:test-begin:T14
101 | // @cursor-agent {"dependsOn": "interface", "hash": "7034e4452cc668449b0b967116683a95303c4509d263ed535851b081164751bb"}
102 | // @cursor-agent {"dependsOn": "testPlan", "hash": "2090cb75b222cb54f3319ec6bd2455703427a3a478979e3b3258aed0a969280f"}
103 | // @cursor-agent {"id": "T14"}
104 | it('should correctly handle a list of outputs with only one output', async () => {
105 | const outputCatcher = NewOutputCatcher();
106 | const output = 'Test Output';
107 |
108 | await outputCatcher.onOutput(output);
109 |
110 | const firstOutput = outputCatcher.getOutput();
111 |
112 | expect(firstOutput).toBe(output);
113 | });
114 | // @cursor-agent:test-end:T14
115 |
116 |
117 |
118 | // @cursor-agent:test-begin:T13
119 | // @cursor-agent {"dependsOn": "interface", "hash": "7034e4452cc668449b0b967116683a95303c4509d263ed535851b081164751bb"}
120 | // @cursor-agent {"dependsOn": "testPlan", "hash": "30937a9d8dcb8df28feecd5c7e3b204b04e93c4f26e3b3a61c51578bbc201231"}
121 | // @cursor-agent {"id": "T13"}
122 | it('should handle a list of outputs with multiple outputs with no priority correctly', async () => {
123 | const outputCatcher = NewOutputCatcher();
124 |
125 | // Add outputs with no priority
126 | await outputCatcher.onOutput('output1');
127 | await outputCatcher.onOutput('output2');
128 | await outputCatcher.onOutput('output3');
129 |
130 | // Add outputs with priority
131 | await outputCatcher.onOutput('output4', { p: 1 });
132 | await outputCatcher.onOutput('output5', { p: 2 });
133 |
134 | const outputs = outputCatcher.getOutputs();
135 |
136 | // Check that the outputs with priority are at the beginning of the list
137 | expect(outputs[0]).toBe('output5');
138 | expect(outputs[1]).toBe('output4');
139 |
140 | // Check that the outputs with no priority are in the order they were added
141 | expect(outputs[2]).toBe('output1');
142 | expect(outputs[3]).toBe('output2');
143 | expect(outputs[4]).toBe('output3');
144 | });
145 | // @cursor-agent:test-end:T13
146 |
147 |
148 |
149 |
150 |
151 |
152 | // @cursor-agent:test-begin:T12
153 | // @cursor-agent {"dependsOn": "interface", "hash": "7034e4452cc668449b0b967116683a95303c4509d263ed535851b081164751bb"}
154 | // @cursor-agent {"dependsOn": "testPlan", "hash": "2da0899bc14155cb0b63a83c0a8648d540232eb11e1d253f97739432a152fbdd"}
155 | // @cursor-agent {"id": "T12"}
156 | it('should correctly handle a list of outputs with multiple outputs with different priorities', async () => {
157 | const outputCatcher = NewOutputCatcher();
158 |
159 | await outputCatcher.onOutput('output1', { p: 2 });
160 | await outputCatcher.onOutput('output2', { p: 1 });
161 | await outputCatcher.onOutput('output3', { p: 3 });
162 | await outputCatcher.onOutput('output4');
163 |
164 | const outputs = outputCatcher.getOutputs();
165 |
166 | expect(outputs).toEqual(['output3', 'output1', 'output2', 'output4']);
167 | });
168 | // @cursor-agent:test-end:T12
169 |
170 |
171 |
172 |
173 |
174 |
175 |
176 |
177 |
178 | // @cursor-agent:test-begin:T11
179 | // @cursor-agent {"dependsOn": "interface", "hash": "7034e4452cc668449b0b967116683a95303c4509d263ed535851b081164751bb"}
180 | // @cursor-agent {"dependsOn": "testPlan", "hash": "2ba7dc9f2ce28f32e9ba4a145c6cd71a54b533561b7b422883a75ea0d212b106"}
181 | // @cursor-agent {"id": "T11"}
182 | it('should correctly handle a list of outputs with multiple outputs with the same priority', async () => {
183 | const outputCatcher = NewOutputCatcher();
184 |
185 | await outputCatcher.onOutput('output1', { p: 1 });
186 | await outputCatcher.onOutput('output2', { p: 2 });
187 | await outputCatcher.onOutput('output3', { p: 2 });
188 | await outputCatcher.onOutput('output4', { p: 1 });
189 |
190 | const outputs = outputCatcher.getOutputs();
191 |
192 | expect(outputs).toEqual(['output2', 'output3', 'output1', 'output4']);
193 | });
194 | // @cursor-agent:test-end:T11
195 |
196 |
197 |
198 |
199 |
200 |
201 |
202 |
203 |
204 | // @cursor-agent:test-begin:T10
205 | // @cursor-agent {"dependsOn": "interface", "hash": "7034e4452cc668449b0b967116683a95303c4509d263ed535851b081164751bb"}
206 | // @cursor-agent {"dependsOn": "testPlan", "hash": "9647f55c33afc9626a08bb5bc9fc3ad3bff2a12769f2440bda746c0636cbea43"}
207 | // @cursor-agent {"id": "T10"}
208 | it('should correctly handle a list of outputs with only one output', async () => {
209 | const outputCatcher = NewOutputCatcher();
210 | const output = 'Test Output';
211 | await outputCatcher.onOutput(output);
212 |
213 | const outputs = outputCatcher.getOutputs();
214 | expect(outputs).toHaveLength(1);
215 | expect(outputs[0]).toBe(output);
216 | });
217 | // @cursor-agent:test-end:T10
218 |
219 |
220 |
221 |
222 |
223 |
224 |
225 |
226 |
227 | // @cursor-agent:test-begin:T09
228 | // @cursor-agent {"dependsOn": "interface", "hash": "7034e4452cc668449b0b967116683a95303c4509d263ed535851b081164751bb"}
229 | // @cursor-agent {"dependsOn": "testPlan", "hash": "8c28351285c444e37a7251e9722b474d8e3f86290a1bc53c762f8504603c0438"}
230 | // @cursor-agent {"id": "T09"}
231 | it('should handle an empty list of outputs correctly', async () => {
232 | const outputCatcher = NewOutputCatcher();
233 | const outputs = outputCatcher.getOutputs();
234 | expect(outputs).toEqual([]);
235 | });
236 | // @cursor-agent:test-end:T09
237 |
238 |
239 |
240 |
241 |
242 |
243 | // @cursor-agent:test-begin:T08
244 | // @cursor-agent {"dependsOn": "interface", "hash": "7034e4452cc668449b0b967116683a95303c4509d263ed535851b081164751bb"}
245 | // @cursor-agent {"dependsOn": "testPlan", "hash": "8ed95684475af78a10deba0840d25230f1242f8745c7eb53ed1476ce2c8a1f44"}
246 | // @cursor-agent {"id": "T08"}
247 | it('should correctly handle multiple outputs with no priority', async () => {
248 | const outputCatcher = NewOutputCatcher();
249 |
250 | await outputCatcher.onOutput('output1');
251 | await outputCatcher.onOutput('output2');
252 | await outputCatcher.onOutput('output3');
253 |
254 | const outputs = outputCatcher.getOutputs();
255 |
256 | expect(outputs).toEqual(['output1', 'output2', 'output3']);
257 | });
258 | // @cursor-agent:test-end:T08
259 |
260 |
261 |
262 |
263 |
264 |
265 | // @cursor-agent:test-begin:T07
266 | // @cursor-agent {"dependsOn": "interface", "hash": "7034e4452cc668449b0b967116683a95303c4509d263ed535851b081164751bb"}
267 | // @cursor-agent {"dependsOn": "testPlan", "hash": "ced51a023776d7532a37bf3d5ea669fe80508a21326c932163dde56fa6ddb5ef"}
268 | // @cursor-agent {"id": "T07"}
269 | it('should correctly handle multiple outputs with different priorities', async () => {
270 | const outputCatcher = NewOutputCatcher();
271 |
272 | await outputCatcher.onOutput('output1', { p: 2 });
273 | await outputCatcher.onOutput('output2', { p: 1 });
274 | await outputCatcher.onOutput('output3', { p: 3 });
275 |
276 | const outputs = outputCatcher.getOutputs();
277 |
278 | expect(outputs).toEqual(['output3', 'output1', 'output2']);
279 | });
280 | // @cursor-agent:test-end:T07
281 |
282 |
283 |
284 |
285 |
286 |
287 | // @cursor-agent:test-begin:T06
288 | // @cursor-agent {"dependsOn": "interface", "hash": "7034e4452cc668449b0b967116683a95303c4509d263ed535851b081164751bb"}
289 | // @cursor-agent {"dependsOn": "testPlan", "hash": "743746ca04d5b14b5e90f25c21d1adc87b7ea4d7d205c6899ca6143455869567"}
290 | // @cursor-agent {"id": "T06"}
291 | it('should correctly handle multiple outputs with the same priority', async () => {
292 | const outputCatcher = NewOutputCatcher();
293 |
294 | await outputCatcher.onOutput('output1', { p: 1 });
295 | await outputCatcher.onOutput('output2', { p: 1 });
296 | await outputCatcher.onOutput('output3', { p: 1 });
297 |
298 | const outputs = outputCatcher.getOutputs();
299 |
300 | expect(outputs).toEqual(['output1', 'output2', 'output3']);
301 | });
302 | // @cursor-agent:test-end:T06
303 |
304 |
305 |
306 |
307 |
308 |
309 | // @cursor-agent:test-begin:T05
310 | // @cursor-agent {"dependsOn": "interface", "hash": "7034e4452cc668449b0b967116683a95303c4509d263ed535851b081164751bb"}
311 | // @cursor-agent {"dependsOn": "testPlan", "hash": "1cadfdeb3fd039e28d5ba302889d5f6e82f5504439e9fa7dcfa7bc81d1e42538"}
312 | // @cursor-agent {"id": "T05"}
313 | it('should return undefined if there are no outputs in the list', async () => {
314 | const outputCatcher = NewOutputCatcher();
315 | const output = outputCatcher.getOutput();
316 | expect(output).toBeUndefined();
317 | });
318 | // @cursor-agent:test-end:T05
319 |
320 |
321 |
322 |
323 |
324 |
325 | // @cursor-agent:test-begin:T04
326 | // @cursor-agent {"dependsOn": "interface", "hash": "7034e4452cc668449b0b967116683a95303c4509d263ed535851b081164751bb"}
327 | // @cursor-agent {"dependsOn": "testPlan", "hash": "a8944d34a7042b9cc995a1f9b4ffe600dd75484c5a5eff540737872a25994be3"}
328 | // @cursor-agent {"id": "T04"}
329 | it('should return the first output in the list when getOutput is called', async () => {
330 | const outputCatcher = NewOutputCatcher();
331 | await outputCatcher.onOutput('Test1', { p: 1 });
332 | await outputCatcher.onOutput('Test2', { p: 2 });
333 | await outputCatcher.onOutput('Test3', { p: 3 });
334 |
335 | const firstOutput = outputCatcher.getOutput();
336 | const outputs = outputCatcher.getOutputs();
337 | expect(firstOutput).toBe(outputs[0]);
338 | });
339 | // @cursor-agent:test-end:T04
340 |
341 |
342 |
343 |
344 |
345 |
346 | // @cursor-agent:test-begin:T03
347 | // @cursor-agent {"dependsOn": "interface", "hash": "7034e4452cc668449b0b967116683a95303c4509d263ed535851b081164751bb"}
348 | // @cursor-agent {"dependsOn": "testPlan", "hash": "d7a293c66b50c2f6ba6f251d1e49b9ac66d734199f4ea918e1e7305dc3140c54"}
349 | // @cursor-agent {"id": "T03"}
350 | it('should return a list of outputs in the correct order', async () => {
351 | const outputCatcher = NewOutputCatcher();
352 |
353 | await outputCatcher.onOutput('output1', { p: 2 });
354 | await outputCatcher.onOutput('output2');
355 | await outputCatcher.onOutput('output3', { p: 3 });
356 | await outputCatcher.onOutput('output4');
357 |
358 | const outputs = outputCatcher.getOutputs();
359 |
360 | expect(outputs).toEqual(['output3', 'output1', 'output2', 'output4']);
361 | });
362 | // @cursor-agent:test-end:T03
363 |
364 |
365 |
366 |
367 |
368 |
369 | // @cursor-agent:test-begin:T02
370 | // @cursor-agent {"dependsOn": "interface", "hash": "7034e4452cc668449b0b967116683a95303c4509d263ed535851b081164751bb"}
371 | // @cursor-agent {"dependsOn": "testPlan", "hash": "73c6b3f085b811b0f52b5651fe76b6e10a8cb3f92000cb916f2873d1932c26b7"}
372 | // @cursor-agent {"id": "T02"}
373 | it('should correctly add an output with a priority to the list of outputs', async () => {
374 | const outputCatcher = NewOutputCatcher();
375 | const output = 'Test Output';
376 | const priority = 5;
377 |
378 | await outputCatcher.onOutput(output, { p: priority });
379 |
380 | const outputs = outputCatcher.getOutputs();
381 | expect(outputs).toContain(output);
382 |
383 | const firstOutput = outputCatcher.getOutput();
384 | expect(firstOutput).toBe(output);
385 | });
386 | // @cursor-agent:test-end:T02
387 |
388 |
389 |
390 |
391 |
392 |
393 | // @cursor-agent:test-begin:T01
394 | // @cursor-agent {"dependsOn": "interface", "hash": "7034e4452cc668449b0b967116683a95303c4509d263ed535851b081164751bb"}
395 | // @cursor-agent {"dependsOn": "testPlan", "hash": "6c19bf21fe639eaa6f3404b1a3087f9e94d29bbd8abe44c027087b0bf88d9ad0"}
396 | // @cursor-agent {"id": "T01"}
397 | it('should correctly add an output with no priority to the list of outputs', async () => {
398 | const outputCatcher = NewOutputCatcher();
399 | const output = 'Test Output';
400 |
401 | await outputCatcher.onOutput(output);
402 |
403 | const outputs = outputCatcher.getOutputs();
404 | expect(outputs).toContain(output);
405 | });
406 | // @cursor-agent:test-end:T01
407 |
408 |
409 |
410 |
411 | })
412 |
413 |
--------------------------------------------------------------------------------
/priompt/src/preview.ts:
--------------------------------------------------------------------------------
1 | import { render } from './lib';
2 | import { Prompt, PromptElement, PromptProps, RenderOutput, SynchronousPrompt } from './types';
3 | import * as fs from 'fs';
4 | import * as path from 'path';
5 | import * as yaml from 'js-yaml';
6 | import { StreamChatCompletionResponse, UsableTokenizer } from './openai';
7 | import { ChatCompletionResponseMessage, CreateChatCompletionResponse } from 'openai';
8 | import { NewOutputCatcher, OutputCatcher } from './outputCatcher.ai';
9 |
10 | export type PreviewManagerGetPromptQuery = {
11 | promptId: string;
12 | propsId: string;
13 | tokenLimit: number;
14 | tokenizer: UsableTokenizer;
15 | };
16 | export type PreviewManagerGetRemotePromptQuery = {
17 | promptId: string;
18 | promptDump: string;
19 | tokenLimit: number;
20 | tokenizer: UsableTokenizer;
21 | };
22 |
23 | export type PreviewManagerGetRemotePropsQuery = {
24 | promptId: string;
25 | promptDump: string
26 | }
27 |
28 | export type PreviewManagerGetPromptOutputQuery = {
29 | promptId: string;
30 | propsId: string;
31 | tokenLimit: number;
32 | tokenizer: UsableTokenizer;
33 | completion: ChatCompletionResponseMessage | ChatCompletionResponseMessage[];
34 | stream: boolean;
35 | };
36 |
37 | export type PreviewManagerLiveModeQuery = {
38 | alreadySeenLiveModeId?: string;
39 | }
40 |
41 | export type PreviewManagerLiveModeResultQuery = {
42 | output: string;
43 | }
44 |
45 | export interface IPreviewManager {
46 | registerConfig(config: PreviewConfig): void;
47 | dump(config: PreviewConfig, props: T): void;
48 |
49 | // these two methods need to be implemented on the server for priompt to work
50 | getPreviews(): Record;
51 | getPrompt(query: PreviewManagerGetPromptQuery): Promise;
52 | getPromptFromRemote(query: PreviewManagerGetRemotePromptQuery): Promise;
53 | }
54 |
55 | type LiveModeOutput = {
56 | liveModeId: string;
57 | };
58 |
59 | type LiveModeData = {
60 | liveModeId: string;
61 | promptElement: PromptElement;
62 | };
63 |
64 | function getProjectRoot(): string {
65 | if (process.env.PRIOMPT_PREVIEW_BASE_RELATIVE_PATH !== undefined) {
66 | return path.join(process.cwd(), process.env.PRIOMPT_PREVIEW_BASE_RELATIVE_PATH);
67 | }
68 | // just do cwd / priompt for now
69 | return process.cwd();
70 | }
71 |
72 | export function configFromPrompt(prompt: Prompt): PreviewConfig {
73 | return {
74 | id: prompt.name,
75 | prompt,
76 | };
77 | }
78 | export function configFromSynchronousPrompt(prompt: SynchronousPrompt): SynchronousPreviewConfig {
79 | return {
80 | id: prompt.name,
81 | prompt,
82 | };
83 | }
84 |
85 | export function dumpProps(config: PreviewConfig, props: Omit): string {
86 | let hasNoDump = false;
87 | for (const key in props) {
88 | if (key.startsWith('DO_NOT_DUMP')) {
89 | hasNoDump = true;
90 | }
91 | }
92 | let objectToDump = props;
93 | if (hasNoDump) {
94 | objectToDump = {} as Omit;
95 | for (const key in props) {
96 | if (!key.startsWith('DO_NOT_DUMP')) {
97 | // eslint-disable-next-line @typescript-eslint/ban-ts-comment
98 | // @ts-ignore
99 | objectToDump[key] = props[key];
100 | }
101 | }
102 | }
103 |
104 | const dump = config.dump
105 | ? config.dump(objectToDump)
106 | : yaml.dump(objectToDump, {
107 | indent: 2,
108 | lineWidth: -1,
109 | });
110 | return dump;
111 | }
112 |
113 | export type PreviewConfig = {
114 | id: string;
115 | prompt: Prompt;
116 | // defaults to yaml but can be overridden
117 | dump?: (props: Omit) => string; hydrate?: (dump: string) => PropsT;
118 | }
119 |
120 | export type SynchronousPreviewConfig = {
121 | id: string;
122 | prompt: SynchronousPrompt;
123 | // defaults to yaml but can be overridden
124 | dump?: (props: Omit) => string; hydrate?: (dump: string) => PropsT;
125 |
126 | }
127 |
128 | class PreviewManagerImpl implements IPreviewManager {
129 |
130 | private _shouldDump: boolean = process.env.NODE_ENV === 'development';
131 |
132 | get shouldDump(): boolean {
133 | return this._shouldDump;
134 | }
135 |
136 | set shouldDump(value: boolean) {
137 | this._shouldDump = value;
138 | }
139 |
140 | private readonly previews: Record> = {};
141 |
142 | getConfig(promptId: string) {
143 | return this.previews[promptId];
144 | }
145 |
146 | getPreviews() {
147 | return Object.keys(this.previews).reduce((acc: Record, promptId) => {
148 | const promptPath = path.join(getProjectRoot(), 'priompt', promptId);
149 | const dumpsPath = path.join(promptPath, 'dumps');
150 | // if they don't exist, make the dirs
151 | if (!fs.existsSync(promptPath)) {
152 | fs.mkdirSync(promptPath, { recursive: true });
153 | }
154 | if (!fs.existsSync(dumpsPath)) {
155 | fs.mkdirSync(dumpsPath, { recursive: true });
156 | }
157 | const propsIds = fs.readdirSync(dumpsPath).filter((f) => f.endsWith('.yaml')).map((f) => f.replace('.yaml', ''));
158 | const savedIds = fs.readdirSync(promptPath).filter((f) => f.endsWith('.yaml')).map((f) => f.replace('.yaml', ''));
159 | return {
160 | ...acc,
161 | [promptId]: {
162 | dumps: propsIds,
163 | saved: savedIds,
164 | }
165 | };
166 | }, {});
167 | }
168 |
169 | async getPrompt(query: PreviewManagerGetPromptQuery): Promise {
170 | let element = PreviewManager.getElement(query.promptId, query.propsId);
171 | if (element instanceof Promise) {
172 | element = await element;
173 | }
174 |
175 | const rendered = await render(element, { tokenizer: query.tokenizer, tokenLimit: query.tokenLimit });
176 |
177 | return rendered;
178 | }
179 |
180 | async getPromptOutput(query: PreviewManagerGetPromptOutputQuery): Promise {
181 | const outputCatcher = NewOutputCatcher();
182 |
183 | let element = PreviewManager.getElement(query.promptId, query.propsId, outputCatcher);
184 | if (element instanceof Promise) {
185 | element = await element;
186 | }
187 |
188 | const rendered = await render(element, { tokenizer: query.tokenizer, tokenLimit: query.tokenLimit });
189 |
190 | if (!query.stream) {
191 | // call all of them and wait all of them in parallel
192 | await Promise.all(
193 | rendered.outputHandlers.map((handler) => handler(Array.isArray(query.completion) ? query.completion[0] : query.completion))
194 | );
195 |
196 | // now return the first output
197 | const firstOutput = outputCatcher.getOutput();
198 |
199 | return firstOutput;
200 | } else {
201 | await Promise.all(
202 | rendered.streamHandlers.map((handler) => handler((async function* () {
203 | for (const completion of Array.isArray(query.completion) ? query.completion : [query.completion]) {
204 | yield completion;
205 | }
206 | })())
207 | ));
208 |
209 | // now return the first output
210 | const firstOutput = outputCatcher.getOutput();
211 |
212 | // let's just put it in an array
213 | const a = [];
214 | // eslint-disable-next-line @typescript-eslint/no-explicit-any
215 | for await (const x of (firstOutput as any)) {
216 | a.push(x);
217 | }
218 |
219 | return a;
220 | }
221 |
222 | }
223 |
224 | async getPromptFromRemote(query: PreviewManagerGetRemotePromptQuery) {
225 | let element = this.getRemoteElement(query.promptId, query.promptDump);
226 | if (element instanceof Promise) {
227 | element = await element;
228 | }
229 | return this.getPromptFromRemoteElement(query, element);
230 | }
231 |
232 | async getPropsFromRemote(query: PreviewManagerGetRemotePropsQuery) {
233 | const promptId = query.promptId;
234 | const promptDump = query.promptDump;
235 | const config = this.previews[promptId];
236 | const baseProps = this.hydrate(config, promptDump);
237 | return baseProps;
238 | }
239 | getPromptFunctionFromRemote(query: PreviewManagerGetRemotePropsQuery) {
240 | const promptId = query.promptId;
241 | const config = this.previews[promptId];
242 | return config.prompt;
243 | }
244 |
245 | async getPromptFromRemoteElement(query: Omit, element: PromptElement) {
246 | const rendered = await render(element, { tokenizer: query.tokenizer, tokenLimit: query.tokenLimit });
247 | return rendered
248 | }
249 |
250 | getRemoteElement(promptId: string, promptDump: string) {
251 | const config = this.previews[promptId];
252 | const baseProps = this.hydrate(config, promptDump);
253 | const element = config.prompt(baseProps as PromptProps);
254 | return element;
255 | }
256 |
257 |
258 | getElement(promptId: string, propsId: string, outputCatcher?: OutputCatcher): PromptElement | Promise {
259 | if (promptId === 'liveModePromptId') {
260 | if (this.lastLiveModeData === null) {
261 | throw new Error('live mode prompt not found');
262 | }
263 | return this.lastLiveModeData.promptElement;
264 | }
265 | if (!Object.keys(this.previews).includes(promptId)) {
266 | throw new Error(`preview promptId ${promptId} not registered`);
267 | }
268 | const config = this.previews[promptId];
269 |
270 | const baseProps = this.hydrate(config, this.getDump(promptId, propsId));
271 |
272 | let realProps: unknown = baseProps;
273 | if (outputCatcher !== undefined) {
274 | const captureProps: unknown = {
275 | onReturn: (x: unknown) => outputCatcher.onOutput(x),
276 | };
277 | realProps = {
278 | // eslint-disable-next-line @typescript-eslint/no-explicit-any
279 | ...(baseProps as any),
280 | // eslint-disable-next-line @typescript-eslint/no-explicit-any
281 | ...(captureProps as any),
282 | };
283 | }
284 |
285 | return config.prompt(realProps as PromptProps);
286 | }
287 |
288 | registerConfig(config: PreviewConfig) {
289 | if (Object.keys(this.previews).includes(config.id)) {
290 | // sort of sketchy, but may be fine if we're in esm hmr land...
291 | // we just overwrite
292 | if (process.env.ALLOW_PROMPT_REREGISTRATION === "true") {
293 | console.warn(`preview id ${config.id} already registered`);
294 | } else {
295 | throw new Error(`preview id ${config.id} already registered`);
296 | }
297 | }
298 | this.previews[config.id] = config;
299 | }
300 | register(prompt: Prompt) {
301 | const config = configFromPrompt(prompt);
302 | this.registerConfig(config);
303 | }
304 |
305 | configFromSynchronousPrompt(prompt: SynchronousPrompt): SynchronousPreviewConfig {
306 | return configFromSynchronousPrompt(prompt);
307 | }
308 |
309 | hydrate(config: PreviewConfig, dump: string): T {
310 | if (config.hydrate) {
311 | return config.hydrate(dump);
312 | }
313 | const yamlData = yaml.load(dump);
314 | const props: T = yamlData as T;
315 | return props;
316 | }
317 |
318 | maybeDump(prompt: Prompt, props: Omit) {
319 | if (!this.shouldDump) {
320 | return;
321 | }
322 | const config = configFromPrompt(prompt);
323 | this.dump(config, props);
324 | }
325 |
326 | dump(config: PreviewConfig, props: T) {
327 | const dump = dumpProps(config, props);
328 | const priomptPath = path.join(getProjectRoot(), 'priompt', config.id);
329 | console.debug("PRIOMPT PATH: ", priomptPath);
330 | const dumpsPath = path.join(priomptPath, 'dumps');
331 |
332 | if (!fs.existsSync(priomptPath)) {
333 | fs.mkdirSync(priomptPath, { recursive: true });
334 |
335 | // in this case, we want to write the file to the promptId path as well, as example01.yaml
336 | // this makes it easier for other users to see what the prompt is supposed to look like
337 | const filePath = path.join(priomptPath, `example01.yaml`);
338 | fs.writeFileSync(filePath, dump);
339 | }
340 |
341 | if (!fs.existsSync(dumpsPath)) {
342 | fs.mkdirSync(dumpsPath, { recursive: true });
343 | }
344 |
345 | // if there are more than 2000 files, delete the oldest 1000 of them
346 | try {
347 | const files = fs.readdirSync(dumpsPath);
348 | if (files.length > 2000) {
349 | const sortedFiles = files.sort((a, b) => a.localeCompare(b));
350 | for (let i = 0; i < 1000; i++) {
351 | fs.unlinkSync(path.join(dumpsPath, sortedFiles[i]));
352 | }
353 | }
354 | } catch (e) {
355 | console.warn({ error: e }, "failed to remove old priompt dumps")
356 | }
357 |
358 | const propsId = new Date().toISOString().replace(/[:.]/g, '-'); // Human-readable propsId with date and time
359 | const filePath = path.join(dumpsPath, `${propsId}.yaml`); // Changed file extension to .yaml
360 | fs.writeFileSync(filePath, dump);
361 | }
362 |
363 | private lastLiveModeData: LiveModeData | null = null;
364 | private lastLiveModeOutputPromise: Promise;
365 | private resolveLastLiveModeOutputPromise: () => void = () => { };
366 |
367 | private liveModeResultPromise: Promise;
368 | private resolveLiveModeResult: (s: string) => void = () => { };
369 |
370 | constructor() {
371 | this.lastLiveModeOutputPromise = new Promise((resolve) => {
372 | this.resolveLastLiveModeOutputPromise = resolve;
373 | });
374 | this.liveModeResultPromise = new Promise((resolve) => {
375 | this.resolveLiveModeResult = resolve;
376 | });
377 | }
378 |
379 | async getLiveModePromptCompletion(promptElement: PromptElement, options: { model: string, abortSignal?: AbortSignal }): Promise {
380 | const liveModeData: LiveModeData = {
381 | liveModeId: randomString(),
382 | promptElement,
383 | };
384 | this.lastLiveModeData = liveModeData;
385 | this.resolveLastLiveModeOutputPromise();
386 | this.lastLiveModeOutputPromise = new Promise((resolve) => {
387 | this.resolveLastLiveModeOutputPromise = resolve;
388 | });
389 | const result = await this.liveModeResultPromise;
390 | const output: CreateChatCompletionResponse = {
391 | 'id': liveModeData.liveModeId,
392 | 'object': 'text_completion',
393 | 'created': Date.now(),
394 | 'model': options.model,
395 | 'choices': [
396 | {
397 | 'message': {
398 | 'role': 'assistant',
399 | 'content': result,
400 | }
401 | }
402 | ]
403 | };
404 |
405 | return output;
406 | }
407 |
408 | async *streamLiveModePromptCompletion(promptElement: PromptElement, options: { model: string, abortSignal?: AbortSignal }): AsyncGenerator {
409 | const output: StreamChatCompletionResponse = await this.getLiveModePromptCompletion(promptElement, options);
410 |
411 | output.choices[0].delta = output.choices[0].message;
412 |
413 | yield output;
414 | }
415 |
416 | async liveMode(query: PreviewManagerLiveModeQuery, abortSignal?: AbortSignal): Promise {
417 | while (true) {
418 | const result = await Promise.race([
419 | this.lastLiveModeOutputPromise,
420 | new Promise((_, reject) => {
421 | if (abortSignal) {
422 | abortSignal.addEventListener('abort', () => reject(new Error('Aborted')));
423 | }
424 | }),
425 | ]);
426 |
427 | if (result instanceof Error) {
428 | throw result;
429 | }
430 |
431 | if (this.lastLiveModeData === null) {
432 | continue;
433 | }
434 | if (this.lastLiveModeData.liveModeId === query.alreadySeenLiveModeId) {
435 | continue;
436 | }
437 | return this.lastLiveModeData;
438 | }
439 | }
440 |
441 | liveModeResult(query: PreviewManagerLiveModeResultQuery) {
442 | this.resolveLiveModeResult(query.output);
443 | this.liveModeResultPromise = new Promise((resolve) => {
444 | this.resolveLiveModeResult = resolve;
445 | });
446 | }
447 |
448 |
449 | private getDump(promptId: string, propsId: string): string {
450 | const priomptPath = path.join(getProjectRoot(), 'priompt', promptId);
451 | const dumpsPath = path.join(priomptPath, 'dumps');
452 |
453 | const filePathInPromptId = path.join(priomptPath, `${propsId}.yaml`);
454 | const filePathInDumps = path.join(dumpsPath, `${propsId}.yaml`);
455 |
456 | if (fs.existsSync(filePathInPromptId)) {
457 | return fs.readFileSync(filePathInPromptId, 'utf-8');
458 | } else if (fs.existsSync(filePathInDumps)) {
459 | return fs.readFileSync(filePathInDumps, 'utf-8');
460 | } else {
461 | throw new Error(`No dump found for promptId ${promptId} and propsId ${propsId}`);
462 | }
463 | }
464 | }
465 |
466 | // GLOBALS FTW. i love globals.
467 | export const PreviewManager = new PreviewManagerImpl();
468 |
469 | // Decorator
470 | export function register() {
471 | // eslint-disable-next-line @typescript-eslint/no-explicit-any
472 | function registerPrompt(target: any, propertyKey: string, descriptor: TypedPropertyDescriptor>) {
473 | if (descriptor.value === undefined) {
474 | throw new Error(`@registerPrompt can only be used on methods, not ${target.constructor.name}.${propertyKey}`);
475 | } else {
476 | PreviewManager.register(descriptor.value);
477 | }
478 | }
479 | return registerPrompt;
480 | }
481 |
482 | // export function register(prompt: Prompt) {
483 | // PreviewManager.register(prompt);
484 | // }
485 |
486 |
487 |
488 |
489 |
490 | function randomString() {
491 | let s = '';
492 | for (let i = 0; i < 10; i++) {
493 | s += Math.floor(Math.random() * 10);
494 | }
495 | return s;
496 | }
--------------------------------------------------------------------------------