├── .github └── workflows │ └── ci.yml ├── .gitignore ├── LICENSE ├── README.md ├── deno.json ├── examples ├── chatCompletion.ts ├── chatCompletionFunction.ts ├── chatCompletionStream.ts ├── completion.ts ├── completionStream.ts ├── edit.ts ├── files.ts ├── image.ts ├── imageEdit.ts ├── imageEditWithFiles.ts ├── imageVariation.ts ├── imageVariationWithFiles.ts ├── testdata │ ├── example.jsonl │ └── jfk.wav └── transcription.ts ├── mod.ts └── src ├── deps.ts ├── openai.ts ├── types.ts └── util.ts /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: ci 2 | 3 | on: 4 | push: 5 | branches: [master] 6 | pull_request: 7 | branches: [master] 8 | 9 | jobs: 10 | build: 11 | name: tests (${{ matrix.os }}) 12 | runs-on: ${{ matrix.os }} 13 | strategy: 14 | matrix: 15 | os: [ubuntu-latest, windows-latest, macOS-latest] 16 | steps: 17 | - uses: actions/checkout@v2 18 | - name: download deno 19 | uses: denoland/setup-deno@v1 20 | with: 21 | deno-version: v1.x 22 | 23 | - name: check format 24 | if: matrix.os == 'ubuntu-latest' 25 | run: deno fmt --check --ignore=README.md 26 | 27 | - name: check linting 28 | if: matrix.os == 'ubuntu-latest' 29 | run: deno lint 30 | 31 | # TODO: Testing 32 | # - name: run tests 33 | # run: deno task test -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Dean Srebnik 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so, 10 | subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Unofficial Deno wrapper for the Open AI API 2 | 3 | [![Tags](https://img.shields.io/github/release/load1n9/openai)](https://github.com/load1n9/openai/releases) 4 | [![Doc](https://doc.deno.land/badge.svg)](https://doc.deno.land/https/deno.land/x/openai/mod.ts) 5 | [![Checks](https://github.com/load1n9/openai/actions/workflows/ci.yml/badge.svg)](https://github.com/load1n9/openai/actions/workflows/ci.yml) 6 | [![License](https://img.shields.io/github/license/load1n9/openai)](https://github.com/load1n9/openai/blob/master/LICENSE) 7 | 8 | ## Usage 9 | 10 | Your Open AI Api key ([found here](https://beta.openai.com/account/api-keys)) is 11 | needed for this library to work. We recommend setting it as an environment 12 | variable. Here is a configuration example. 13 | 14 | ```ts 15 | import { OpenAI } from "https://deno.land/x/openai/mod.ts"; 16 | 17 | const openAI = new OpenAI(Deno.env.get("YOUR_API_KEY")!); 18 | ``` 19 | 20 | ### Completion 21 | 22 | ```ts 23 | import { OpenAI } from "https://deno.land/x/openai/mod.ts"; 24 | 25 | const openAI = new OpenAI(Deno.env.get("YOUR_API_KEY")!); 26 | 27 | const completion = await openAI.createCompletion({ 28 | model: "davinci", 29 | prompt: "The meaning of life is", 30 | }); 31 | 32 | console.log(completion.choices); 33 | ``` 34 | 35 | ### Chat Completion 36 | 37 | ```ts 38 | import { OpenAI } from "https://deno.land/x/openai/mod.ts"; 39 | 40 | const openAI = new OpenAI(Deno.env.get("YOUR_API_KEY")!); 41 | 42 | const chatCompletion = await openAI.createChatCompletion({ 43 | model: "gpt-3.5-turbo", 44 | messages: [ 45 | { "role": "system", "content": "You are a helpful assistant." }, 46 | { "role": "user", "content": "Who won the world series in 2020?" }, 47 | { 48 | "role": "assistant", 49 | "content": "The Los Angeles Dodgers won the World Series in 2020.", 50 | }, 51 | { "role": "user", "content": "Where was it played?" }, 52 | ], 53 | }); 54 | 55 | console.log(chatCompletion); 56 | ``` 57 | 58 | ### Image 59 | 60 | ```ts 61 | import { OpenAI } from "https://deno.land/x/openai/mod.ts"; 62 | 63 | const openAI = new OpenAI(Deno.env.get("YOUR_API_KEY")!); 64 | 65 | const image = await openAI.createImage({ 66 | prompt: "A unicorn in space", 67 | }); 68 | 69 | console.log(image); 70 | ``` 71 | 72 | ### Edit 73 | 74 | ```ts 75 | import { OpenAI } from "https://deno.land/x/openai/mod.ts"; 76 | 77 | const openAI = new OpenAI(Deno.env.get("YOUR_API_KEY")!); 78 | 79 | const edit = await openAI.createEdit({ 80 | model: "text-davinci-edit-001", 81 | input: "What day of the wek is it?", 82 | instruction: "Fix the spelling mistakes", 83 | }); 84 | 85 | console.log(edit); 86 | ``` 87 | 88 | ### Image Edit 89 | 90 | ```ts 91 | import { OpenAI } from "https://deno.land/x/openai/mod.ts"; 92 | 93 | const openAI = new OpenAI(Deno.env.get("YOUR_API_KEY")!); 94 | 95 | const imageEdit = await openAI.createImageEdit({ 96 | image: "@otter.png", 97 | mask: "@mask.png", 98 | prompt: "A cute baby sea otter wearing a beret", 99 | n: 2, 100 | size: "1024x1024", 101 | }); 102 | 103 | console.log(imageEdit); 104 | ``` 105 | 106 | ### Image Variation 107 | 108 | ```ts 109 | import { OpenAI } from "https://deno.land/x/openai/mod.ts"; 110 | 111 | const openAI = new OpenAI(Deno.env.get("YOUR_API_KEY")!); 112 | 113 | const imageVariation = await openAI.createImageVariation({ 114 | image: "@otter.png", 115 | n: 2, 116 | size: "1024x1024", 117 | }); 118 | 119 | console.log(imageVariation); 120 | ``` 121 | 122 | ### Audio Transcription 123 | 124 | ```ts 125 | import { OpenAI } from "https://deno.land/x/openai/mod.ts"; 126 | 127 | const openAI = new OpenAI(Deno.env.get("YOUR_API_KEY")!); 128 | 129 | const transcription = await openAI.createTranscription({ 130 | model: "whisper-1", 131 | file: "/path/to/your/audio/file.mp3", 132 | }); 133 | 134 | console.log(transcription); 135 | ``` 136 | 137 | ## Maintainers 138 | 139 | - Dean Srebnik ([@load1n9](https://github.com/load1n9)) 140 | - Lino Le Van ([@lino-levan](https://github.com/lino-levan)) 141 | 142 | ## License 143 | 144 | MIT 145 | -------------------------------------------------------------------------------- /deno.json: -------------------------------------------------------------------------------- 1 | { 2 | "tasks": {}, 3 | "lock": false 4 | } 5 | -------------------------------------------------------------------------------- /examples/chatCompletion.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "../mod.ts"; 2 | 3 | const openAI = new OpenAI(Deno.env.get("YOUR_API_KEY")!); 4 | 5 | const chatCompletion = await openAI.createChatCompletion({ 6 | model: "gpt-3.5-turbo", 7 | messages: [ 8 | { "role": "system", "content": "You are a helpful assistant." }, 9 | { "role": "user", "content": "Who won the world series in 2020?" }, 10 | { 11 | "role": "assistant", 12 | "content": "The Los Angeles Dodgers won the World Series in 2020.", 13 | }, 14 | { "role": "user", "content": "Where was it played?" }, 15 | ], 16 | }); 17 | 18 | console.log(chatCompletion); 19 | -------------------------------------------------------------------------------- /examples/chatCompletionFunction.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "../mod.ts"; 2 | 3 | const openAI = new OpenAI(Deno.env.get("YOUR_API_KEY")!); 4 | 5 | const chatCompletion = await openAI.createChatCompletion({ 6 | model: "gpt-3.5-turbo", 7 | messages: [ 8 | { "role": "user", "content": "What is the weather like in Boston?" }, 9 | ], 10 | function_call: { name: "get_current_weather" }, 11 | functions: [ 12 | { 13 | "name": "get_current_weather", 14 | "description": "Get the current weather in a given location", 15 | "parameters": { 16 | "type": "object", 17 | "properties": { 18 | "location": { 19 | "type": "string", 20 | "description": "The city and state, e.g. San Francisco, CA", 21 | }, 22 | "unit": { 23 | "type": "string", 24 | "enum": ["celsius", "fahrenheit"], 25 | }, 26 | }, 27 | "required": ["location"], 28 | }, 29 | }, 30 | ], 31 | }); 32 | 33 | console.log(chatCompletion); 34 | -------------------------------------------------------------------------------- /examples/chatCompletionStream.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "../mod.ts"; 2 | 3 | const openAI = new OpenAI(Deno.env.get("YOUR_API_KEY")!); 4 | 5 | await openAI.createChatCompletionStream({ 6 | model: "gpt-3.5-turbo", 7 | messages: [ 8 | { "role": "system", "content": "You are a helpful assistant." }, 9 | { "role": "user", "content": "Who won the world series in 2020?" }, 10 | { 11 | "role": "assistant", 12 | "content": "The Los Angeles Dodgers won the World Series in 2020.", 13 | }, 14 | { "role": "user", "content": "Where was it played?" }, 15 | ], 16 | }, (chunk) => { 17 | console.log(chunk); 18 | }); 19 | -------------------------------------------------------------------------------- /examples/completion.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "../mod.ts"; 2 | 3 | const openAI = new OpenAI(Deno.env.get("YOUR_API_KEY")!); 4 | 5 | const completion = await openAI.createCompletion({ 6 | model: "davinci", 7 | prompt: "The meaning of life is", 8 | }); 9 | 10 | console.log(completion); 11 | -------------------------------------------------------------------------------- /examples/completionStream.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "../mod.ts"; 2 | 3 | const openAI = new OpenAI(Deno.env.get("YOUR_API_KEY")!); 4 | 5 | openAI.createCompletionStream({ 6 | model: "davinci", 7 | prompt: "The meaning of life is", 8 | }, (chunk) => { 9 | console.log(chunk); 10 | }); 11 | -------------------------------------------------------------------------------- /examples/edit.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "../mod.ts"; 2 | 3 | const openAI = new OpenAI(Deno.env.get("YOUR_API_KEY")!); 4 | 5 | const edit = await openAI.createEdit({ 6 | model: "text-davinci-edit-001", 7 | input: "What day of the wek is it?", 8 | instruction: "Fix the spelling mistakes", 9 | }); 10 | 11 | console.log(edit); 12 | -------------------------------------------------------------------------------- /examples/files.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "../mod.ts"; 2 | 3 | const openAI = new OpenAI(Deno.env.get("YOUR_API_KEY")!); 4 | 5 | // TODO: Do this more portably 6 | console.log(await openAI.uploadFile("./testdata/example.jsonl", "fine-tune")); 7 | -------------------------------------------------------------------------------- /examples/image.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "../mod.ts"; 2 | 3 | const openAI = new OpenAI(Deno.env.get("YOUR_API_KEY")!); 4 | 5 | const image = await openAI.createImage({ 6 | prompt: "A unicorn in space", 7 | }); 8 | 9 | console.log(image); 10 | -------------------------------------------------------------------------------- /examples/imageEdit.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "../mod.ts"; 2 | 3 | const openAI = new OpenAI(Deno.env.get("YOUR_API_KEY")!); 4 | 5 | const imageEdit = await openAI.createImageEdit({ 6 | image: "@otter.png", 7 | mask: "@mask.png", 8 | prompt: "A cute baby sea otter wearing a beret", 9 | n: 2, 10 | size: "1024x1024", 11 | }); 12 | 13 | console.log(imageEdit); 14 | -------------------------------------------------------------------------------- /examples/imageEditWithFiles.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "../mod.ts"; 2 | 3 | const openAI = new OpenAI(Deno.env.get("YOUR_API_KEY")!); 4 | 5 | const imageBuffer = await Deno.open("@otter.png", { read: true }); 6 | const maskBuffer = await Deno.open("@mask.png", { read: true }); 7 | const imageEdit = await openAI.createImageEdit({ 8 | image: imageBuffer, 9 | mask: maskBuffer, 10 | prompt: "A cute baby sea otter wearing a beret", 11 | n: 2, 12 | size: "1024x1024", 13 | }); 14 | 15 | console.log(imageEdit); 16 | -------------------------------------------------------------------------------- /examples/imageVariation.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "../mod.ts"; 2 | 3 | const openAI = new OpenAI(Deno.env.get("YOUR_API_KEY")!); 4 | 5 | const imageVariation = await openAI.createImageVariation({ 6 | image: "@otter.png", 7 | n: 2, 8 | size: "1024x1024", 9 | }); 10 | 11 | console.log(imageVariation); 12 | -------------------------------------------------------------------------------- /examples/imageVariationWithFiles.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "../mod.ts"; 2 | 3 | const openAI = new OpenAI(Deno.env.get("YOUR_API_KEY")!); 4 | 5 | const imageBuffer = await Deno.open("@otter.png", { read: true }); 6 | const imageVariation = await openAI.createImageVariation({ 7 | image: imageBuffer, 8 | n: 2, 9 | size: "1024x1024", 10 | }); 11 | 12 | console.log(imageVariation); 13 | -------------------------------------------------------------------------------- /examples/testdata/example.jsonl: -------------------------------------------------------------------------------- 1 | {"prompt": "red", "completion": "angry"} 2 | {"prompt": "orange", "completion": "angry"} 3 | {"prompt": "blue", "completion": "calm"} 4 | {"prompt": "purple", "completion": "calm"} -------------------------------------------------------------------------------- /examples/testdata/jfk.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/load1n9/openai/42504235c39687b82e4e9ca03969470ea609ca37/examples/testdata/jfk.wav -------------------------------------------------------------------------------- /examples/transcription.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "../mod.ts"; 2 | 3 | const openAI = new OpenAI(Deno.env.get("YOUR_API_KEY")!); 4 | 5 | const transcription = await openAI.createTranscription({ 6 | model: "whisper-1", 7 | file: "./testdata/jfk.wav", // TODO: Do this more portably 8 | }); 9 | 10 | console.log(transcription); 11 | -------------------------------------------------------------------------------- /mod.ts: -------------------------------------------------------------------------------- 1 | export { OpenAI } from "./src/openai.ts"; 2 | 3 | export * from "./src/types.ts"; 4 | -------------------------------------------------------------------------------- /src/deps.ts: -------------------------------------------------------------------------------- 1 | export { TextDelimiterStream } from "https://deno.land/std@0.204.0/streams/mod.ts"; 2 | export { basename } from "https://deno.land/std@0.204.0/path/mod.ts"; 3 | -------------------------------------------------------------------------------- /src/openai.ts: -------------------------------------------------------------------------------- 1 | import { basename } from "./deps.ts"; 2 | import { decodeStream, throwError } from "./util.ts"; 3 | import type { 4 | ChatCompletion, 5 | ChatCompletionOptions, 6 | ChatCompletionStream, 7 | Completion, 8 | CompletionOptions, 9 | CompletionStream, 10 | DeletedFile, 11 | DeletedFineTune, 12 | Edit, 13 | EditOptions, 14 | Embedding, 15 | EmbeddingsOptions, 16 | FileInstance, 17 | FileList, 18 | FileSpecifier, 19 | FineTune, 20 | FineTuneEvent, 21 | FineTuneEventList, 22 | FineTuneList, 23 | FineTuneOptions, 24 | Image, 25 | ImageEditOptions, 26 | ImageOptions, 27 | ImageVariationOptions, 28 | Model, 29 | ModelList, 30 | Moderation, 31 | Transcription, 32 | TranscriptionOptions, 33 | Translation, 34 | TranslationOptions, 35 | } from "./types.ts"; 36 | 37 | const defaultBaseUrl = "https://api.openai.com/v1"; 38 | 39 | export class OpenAI { 40 | #privateKey: string; 41 | #baseUrl: string; 42 | 43 | constructor(privateKey: string, options?: { baseUrl?: string }) { 44 | this.#privateKey = privateKey; 45 | this.#baseUrl = options?.baseUrl ?? defaultBaseUrl; 46 | } 47 | 48 | async #request( 49 | url: string, 50 | // deno-lint-ignore no-explicit-any 51 | body: any, 52 | options?: { method?: string; noContentType?: boolean }, 53 | ) { 54 | const response = await fetch( 55 | `${this.#baseUrl}${url}`, 56 | { 57 | body: options?.noContentType 58 | ? body 59 | : (body ? JSON.stringify(body) : undefined), 60 | headers: { 61 | Authorization: `Bearer ${this.#privateKey}`, 62 | ...( 63 | options?.noContentType ? {} : { 64 | "Content-Type": "application/json", 65 | } 66 | ), 67 | }, 68 | method: options?.method ?? "POST", 69 | }, 70 | ); 71 | const data = await response.json(); 72 | 73 | throwError(data); 74 | 75 | return data; 76 | } 77 | 78 | /** 79 | * Lists the currently available models, and provides basic information about each one such as the owner and availability. 80 | * 81 | * https://platform.openai.com/docs/api-reference/models/list 82 | */ 83 | async listModels(): Promise { 84 | return await this.#request("/models", undefined, { method: "GET" }); 85 | } 86 | 87 | /** 88 | * Retrieves a model instance, providing basic information about the model such as the owner and permissioning. 89 | * 90 | * https://platform.openai.com/docs/api-reference/models/retrieve 91 | */ 92 | async getModel(model: string): Promise { 93 | return await this.#request(`/models/${model}`, undefined, { 94 | method: "GET", 95 | }); 96 | } 97 | 98 | /** 99 | * Creates a completion for the provided prompt and parameters 100 | * 101 | * https://platform.openai.com/docs/api-reference/completions/create 102 | */ 103 | async createCompletion(options: CompletionOptions): Promise { 104 | return await this.#request(`/completions`, { 105 | model: options.model, 106 | prompt: options.prompt, 107 | suffix: options.suffix, 108 | max_tokens: options.maxTokens, 109 | temperature: options.temperature, 110 | top_p: options.topP, 111 | n: options.n, 112 | logprobs: options.logprobs, 113 | echo: options.echo, 114 | stop: options.stop, 115 | presence_penalty: options.presencePenalty, 116 | frequency_penalty: options.frequencyPenalty, 117 | best_of: options.bestOf, 118 | logit_bias: options.logitBias, 119 | user: options.user, 120 | }); 121 | } 122 | 123 | /** 124 | * Creates a completion stream for the provided prompt and parameters 125 | * 126 | * https://platform.openai.com/docs/api-reference/completions/create 127 | */ 128 | async createCompletionStream( 129 | options: Omit, 130 | callback: (chunk: CompletionStream) => void, 131 | ): Promise { 132 | const res = await fetch( 133 | `${this.#baseUrl}/completions`, 134 | { 135 | method: "POST", 136 | headers: { 137 | Authorization: `Bearer ${this.#privateKey}`, 138 | "Content-Type": "application/json", 139 | }, 140 | body: JSON.stringify({ 141 | model: options.model, 142 | prompt: options.prompt, 143 | suffix: options.suffix, 144 | max_tokens: options.maxTokens, 145 | temperature: options.temperature, 146 | top_p: options.topP, 147 | n: options.n, 148 | stream: true, 149 | logprobs: options.logprobs, 150 | echo: options.echo, 151 | stop: options.stop, 152 | presence_penalty: options.presencePenalty, 153 | frequency_penalty: options.frequencyPenalty, 154 | logit_bias: options.logitBias, 155 | user: options.user, 156 | }), 157 | }, 158 | ); 159 | 160 | await decodeStream(res, callback); 161 | } 162 | 163 | /** 164 | * Creates a completion for the chat message 165 | * 166 | * https://platform.openai.com/docs/api-reference/chat/create 167 | */ 168 | async createChatCompletion( 169 | options: ChatCompletionOptions, 170 | ): Promise { 171 | const resp = await this.#request(`/chat/completions`, { 172 | model: options.model, 173 | messages: options.messages, 174 | temperature: options.temperature, 175 | top_p: options.topP, 176 | n: options.n, 177 | stop: options.stop, 178 | max_tokens: options.maxTokens, 179 | presence_penalty: options.presencePenalty, 180 | frequency_penalty: options.frequencyPenalty, 181 | logit_bias: options.logitBias, 182 | user: options.user, 183 | functions: options.functions, 184 | function_call: options.function_call, 185 | }) as ChatCompletion; 186 | 187 | // null coalesce content to empty string as discussed in PR #17 188 | resp?.choices?.forEach( 189 | (choice) => (choice.message.content = choice.message.content ?? ""), 190 | ); 191 | return resp; 192 | } 193 | 194 | /** 195 | * Creates a completion stream for the chat message 196 | * 197 | * https://platform.openai.com/docs/api-reference/chat/create 198 | */ 199 | async createChatCompletionStream( 200 | options: ChatCompletionOptions, 201 | callback: (chunk: ChatCompletionStream) => void, 202 | ): Promise { 203 | const res = await fetch( 204 | `${this.#baseUrl}/chat/completions`, 205 | { 206 | method: "POST", 207 | headers: { 208 | Authorization: `Bearer ${this.#privateKey}`, 209 | "Content-Type": "application/json", 210 | }, 211 | body: JSON.stringify({ 212 | model: options.model, 213 | messages: options.messages, 214 | temperature: options.temperature, 215 | top_p: options.topP, 216 | n: options.n, 217 | stream: true, 218 | stop: options.stop, 219 | max_tokens: options.maxTokens, 220 | presence_penalty: options.presencePenalty, 221 | frequency_penalty: options.frequencyPenalty, 222 | logit_bias: options.logitBias, 223 | user: options.user, 224 | functions: options.functions, 225 | function_call: options.function_call, 226 | }), 227 | }, 228 | ); 229 | 230 | await decodeStream(res, callback); 231 | } 232 | 233 | /** 234 | * Creates a new edit for the provided input, instruction, and parameters. 235 | * 236 | * https://platform.openai.com/docs/api-reference/edits/create 237 | */ 238 | async createEdit(options: EditOptions): Promise { 239 | return await this.#request(`/edits`, { 240 | model: options.model, 241 | input: options.input, 242 | instruction: options.instruction, 243 | n: options.n, 244 | temperature: options.temperature, 245 | top_p: options.topP, 246 | }); 247 | } 248 | 249 | /** 250 | * Creates an image given a prompt. 251 | * 252 | * https://platform.openai.com/docs/api-reference/images/create 253 | */ 254 | async createImage(options: ImageOptions): Promise { 255 | return await this.#request(`/images/generations`, { 256 | prompt: options.prompt, 257 | n: options.n, 258 | size: options.size, 259 | response_format: options.responseFormat, 260 | user: options.user, 261 | }); 262 | } 263 | 264 | /** 265 | * Creates an edited or extended image given an original image and a prompt. 266 | * 267 | * https://platform.openai.com/docs/api-reference/images/create-edit 268 | */ 269 | async createImageEdit(options: ImageEditOptions): Promise { 270 | const formData = new FormData(); 271 | 272 | // Model specified 273 | formData.append("image", options.image); 274 | 275 | // File data 276 | if (typeof options.image === "string") { 277 | const fileData = await Deno.readFile(options.image); 278 | 279 | formData.append( 280 | "image", 281 | new File([fileData], basename(options.image)), 282 | ); 283 | } else { 284 | // Deno types are wrong 285 | formData.append("image", options.image as unknown as Blob); 286 | } 287 | 288 | if (options.n) formData.append("n", options.n); 289 | if (options.mask) formData.append("mask", options.mask); 290 | if (options.prompt) formData.append("prompt", options.prompt); 291 | if (options.size) formData.append("size", options.size); 292 | if (options.user) formData.append("user", options.user); 293 | if (options.responseFormat) { 294 | formData.append("response_format", options.responseFormat); 295 | } 296 | 297 | return await this.#request(`/images/edits`, formData, { 298 | noContentType: true, 299 | method: "POST", 300 | }); 301 | } 302 | 303 | /** 304 | * Creates a variation of a given image. 305 | * 306 | * https://platform.openai.com/docs/api-reference/images/create-variation 307 | */ 308 | async createImageVariation(options: ImageVariationOptions): Promise { 309 | const formData = new FormData(); 310 | 311 | // Model specified 312 | formData.append("image", options.image); 313 | 314 | // File data 315 | if (typeof options.image === "string") { 316 | const fileData = await Deno.readFile(options.image); 317 | 318 | formData.append( 319 | "image", 320 | new File([fileData], basename(options.image)), 321 | ); 322 | } else { 323 | // Deno types are wrong 324 | formData.append("image", options.image as unknown as Blob); 325 | } 326 | 327 | if (options.n) formData.append("n", options.n); 328 | if (options.size) formData.append("size", options.size); 329 | if (options.user) formData.append("user", options.user); 330 | if (options.responseFormat) { 331 | formData.append("response_format", options.responseFormat); 332 | } 333 | 334 | return await this.#request(`/images/variations`, formData, { 335 | noContentType: true, 336 | method: "POST", 337 | }); 338 | } 339 | 340 | /** 341 | * Creates an embedding vector representing the input text. 342 | * 343 | * https://platform.openai.com/docs/api-reference/embeddings/create 344 | */ 345 | async createEmbeddings(options: EmbeddingsOptions): Promise { 346 | return await this.#request(`/embeddings`, options); 347 | } 348 | 349 | /** 350 | * Transcribes audio into the input language. 351 | * 352 | * https://platform.openai.com/docs/api-reference/audio/create 353 | */ 354 | async createTranscription( 355 | options: TranscriptionOptions, 356 | ): Promise { 357 | const formData = new FormData(); 358 | 359 | // Model specified 360 | formData.append("model", options.model); 361 | 362 | // File data 363 | if (typeof options.file === "string") { 364 | const file = await Deno.readFile(options.file); 365 | 366 | formData.append( 367 | "file", 368 | new File([file], basename(options.file)), 369 | ); 370 | } else { 371 | // Deno types are wrong 372 | formData.append("file", options.file as unknown as Blob); 373 | } 374 | 375 | if (options.prompt) { 376 | formData.append("prompt", options.prompt); 377 | } 378 | if (options.responseFormat) { 379 | formData.append("response_format", options.responseFormat); 380 | } 381 | if (options.temperature) { 382 | formData.append("temperature", options.temperature.toString()); 383 | } 384 | if (options.language) { 385 | formData.append("language", options.language); 386 | } 387 | 388 | return await this.#request(`/audio/transcriptions`, formData, { 389 | noContentType: true, 390 | }); 391 | } 392 | 393 | /** 394 | * Translates audio into into English. 395 | * 396 | * https://platform.openai.com/docs/api-reference/audio/create 397 | */ 398 | async createTranslation(options: TranslationOptions): Promise { 399 | const formData = new FormData(); 400 | 401 | // Model specified 402 | formData.append("model", options.model); 403 | 404 | // File data 405 | if (typeof options.file === "string") { 406 | const file = await Deno.readFile(options.file); 407 | 408 | formData.append( 409 | "file", 410 | new File([file], basename(options.file)), 411 | ); 412 | } else { 413 | // Deno types are wrong 414 | formData.append("file", options.file as unknown as Blob); 415 | } 416 | 417 | if (options.prompt) { 418 | formData.append("prompt", options.prompt); 419 | } 420 | if (options.responseFormat) { 421 | formData.append("response_format", options.responseFormat); 422 | } 423 | if (options.temperature) { 424 | formData.append("temperature", options.temperature.toString()); 425 | } 426 | 427 | return await this.#request(`/audio/translations`, formData, { 428 | noContentType: true, 429 | }); 430 | } 431 | 432 | /** 433 | * Returns a list of files that belong to the user's organization. 434 | * 435 | * https://platform.openai.com/docs/api-reference/files/list 436 | */ 437 | async listFiles(): Promise { 438 | return await this.#request(`/files`, undefined, { method: "GET" }); 439 | } 440 | 441 | /** 442 | * Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. 443 | * 444 | * https://platform.openai.com/docs/api-reference/files/upload 445 | */ 446 | async uploadFile( 447 | file: FileSpecifier, 448 | purpose: string, 449 | ): Promise { 450 | const formData = new FormData(); 451 | 452 | // Model specified 453 | formData.append("file", file); 454 | 455 | // File data 456 | if (typeof file === "string") { 457 | const fileData = await Deno.readFile(file); 458 | 459 | formData.append( 460 | "file", 461 | new File([fileData], basename(file)), 462 | ); 463 | } else { 464 | // Deno types are wrong 465 | formData.append("file", file as unknown as Blob); 466 | } 467 | 468 | formData.append("purpose", purpose); 469 | 470 | return await this.#request(`/files`, formData, { 471 | noContentType: true, 472 | method: "POST", 473 | }); 474 | } 475 | 476 | /** 477 | * Delete a file. 478 | * 479 | * https://platform.openai.com/docs/api-reference/files/delete 480 | */ 481 | async deleteFile(fileId: string): Promise { 482 | return await this.#request(`/files/${fileId}`, undefined, { 483 | method: "DELETE", 484 | }); 485 | } 486 | 487 | /** 488 | * Returns information about a specific file. 489 | * 490 | * https://platform.openai.com/docs/api-reference/files/retrieve 491 | */ 492 | async retrieveFile(fileId: string): Promise { 493 | return await this.#request(`/files/${fileId}`, undefined, { 494 | method: "GET", 495 | }); 496 | } 497 | 498 | /** 499 | * Returns the contents of the specified file 500 | * 501 | * https://platform.openai.com/docs/api-reference/files/retrieve-content 502 | */ 503 | async retrieveFileContent(fileId: string) { 504 | const response = await fetch( 505 | `${this.#baseUrl}/files/${fileId}/content`, 506 | { 507 | headers: { 508 | Authorization: `Bearer ${this.#privateKey}`, 509 | "Content-Type": "application/json", 510 | }, 511 | }, 512 | ); 513 | return response.body; 514 | } 515 | 516 | /** 517 | * Creates a job that fine-tunes a specified model from a given dataset. 518 | * 519 | * https://platform.openai.com/docs/api-reference/fine-tunes/create 520 | */ 521 | async createFineTune( 522 | options: FineTuneOptions, 523 | ): Promise<(FineTune & { events: FineTuneEvent[] })> { 524 | return await this.#request(`/fine-tunes`, { 525 | training_file: options.trainingFile, 526 | validation_file: options.validationFile, 527 | model: options.model, 528 | n_epochs: options.nEpochs, 529 | batch_size: options.batchSize, 530 | learning_rate_multiplier: options.learningRateMultiplier, 531 | prompt_loss_weight: options.promptLossWeight, 532 | compute_classification_metrics: options.computeClassificationMetrics, 533 | classification_n_classes: options.classificationNClasses, 534 | classification_positive_class: options.classificationPositiveClass, 535 | classification_betas: options.classificationBetas, 536 | suffix: options.suffix, 537 | }); 538 | } 539 | 540 | /** 541 | * List your organization's fine-tuning jobs 542 | * 543 | * https://platform.openai.com/docs/api-reference/fine-tunes/list 544 | */ 545 | async listFineTunes(): Promise { 546 | return await this.#request(`/fine-tunes`, undefined, { method: "GET" }); 547 | } 548 | 549 | /** 550 | * Gets info about the fine-tune job. 551 | * 552 | * https://platform.openai.com/docs/api-reference/fine-tunes/retrieve 553 | */ 554 | async retrieveFineTune( 555 | fineTuneId: string, 556 | ): Promise<(FineTune & { events: FineTuneEvent[] })> { 557 | return await this.#request(`/fine-tunes/${fineTuneId}`, undefined, { 558 | method: "GET", 559 | }); 560 | } 561 | 562 | /** 563 | * Immediately cancel a fine-tune job. 564 | * 565 | * https://platform.openai.com/docs/api-reference/fine-tunes/cancel 566 | */ 567 | async cancelFineTune( 568 | fineTuneId: string, 569 | ): Promise<(FineTune & { events: FineTuneEvent[] })> { 570 | return await this.#request(`/fine-tunes/${fineTuneId}/cancel`, undefined); 571 | } 572 | 573 | /** 574 | * Get fine-grained status updates for a fine-tune job. 575 | * 576 | * https://platform.openai.com/docs/api-reference/fine-tunes/events 577 | */ 578 | async listFineTuneEvents(fineTuneId: string): Promise { 579 | // TODO: stream query parameter 580 | return await this.#request( 581 | `/fine-tunes/${fineTuneId}/events`, 582 | undefined, 583 | { method: "GET" }, 584 | ); 585 | } 586 | 587 | /** 588 | * Delete a fine-tuned model. You must have the Owner role in your organization. 589 | * 590 | * https://platform.openai.com/docs/api-reference/fine-tunes/delete-model 591 | */ 592 | async deleteFineTuneModel(model: string): Promise { 593 | return await this.#request(`/models/${model}`, undefined, { 594 | method: "DELETE", 595 | }); 596 | } 597 | 598 | /** 599 | * Classifies if text violates OpenAI's Content Policy 600 | * 601 | * https://platform.openai.com/docs/api-reference/moderations/create 602 | */ 603 | async createModeration( 604 | input: string | string[], 605 | model?: string, 606 | ): Promise { 607 | return await this.#request(`/moderations`, { 608 | input, 609 | model, 610 | }); 611 | } 612 | } 613 | -------------------------------------------------------------------------------- /src/types.ts: -------------------------------------------------------------------------------- 1 | export type FileSpecifier = string | File; 2 | 3 | export interface CompletionOptions { 4 | /** 5 | * ID of the model to use. You can use the List models API to see all of your available models, or see our Model overview for descriptions of them. 6 | * https://platform.openai.com/docs/api-reference/completions/create#completions/create-model 7 | */ 8 | model: string; 9 | 10 | /** 11 | * The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. 12 | * Note that <|endoftext|> is the document separator that the model sees during training, 13 | * so if a prompt is not specified the model will generate as if from the beginning of a new document. 14 | * https://platform.openai.com/docs/api-reference/completions/create#completions/create-prompt 15 | */ 16 | prompt: string | string[]; 17 | 18 | /** 19 | * The suffix that comes after a completion of inserted text. 20 | * https://platform.openai.com/docs/api-reference/completions/create#completions/create-suffix 21 | */ 22 | suffix?: string; 23 | 24 | /** 25 | * The maximum number of tokens to generate in the completion. 26 | * The token count of your prompt plus max_tokens cannot exceed the model's context length. 27 | * Most models have a context length of 2048 tokens (except for the newest models, which support 4096). 28 | * https://platform.openai.com/docs/api-reference/completions/create#completions/create-max_tokens 29 | */ 30 | maxTokens?: number; 31 | 32 | /** 33 | * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, 34 | * while lower values like 0.2 will make it more focused and deterministic. 35 | * We generally recommend altering this or top_p but not both. 36 | * https://platform.openai.com/docs/api-reference/completions/create#completions/create-temperature 37 | */ 38 | temperature?: number; 39 | 40 | /** 41 | * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. 42 | * So 0.1 means only the tokens comprising the top 10% probability mass are considered. 43 | * https://platform.openai.com/docs/api-reference/completions/create#completions/create-top_p 44 | */ 45 | topP?: number; 46 | 47 | /** 48 | * How many completions to generate for each prompt. 49 | * Note: Because this parameter generates many completions, it can quickly consume your token quota. 50 | * Use carefully and ensure that you have reasonable settings for max_tokens and stop. 51 | * https://platform.openai.com/docs/api-reference/completions/create#completions/create-n 52 | */ 53 | n?: number; 54 | 55 | /** 56 | * Include the log probabilities on the logprobs most likely tokens, as well the chosen tokens. 57 | * For example, if logprobs is 5, the API will return a list of the 5 most likely tokens. 58 | * The API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response. 59 | * The maximum value for logprobs is 5. If you need more than this, please contact us through our Help center and describe your use case. 60 | * https://platform.openai.com/docs/api-reference/completions/create#completions/create-logprobs 61 | */ 62 | logprobs?: number; 63 | 64 | /** 65 | * Echo back the prompt in addition to the completion 66 | * https://platform.openai.com/docs/api-reference/completions/create#completions/create-echo 67 | */ 68 | echo?: boolean; 69 | 70 | /** 71 | * Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. 72 | * https://platform.openai.com/docs/api-reference/completions/create#completions/create-stop 73 | */ 74 | stop?: string | string[]; 75 | 76 | /** 77 | * Number between -2.0 and 2.0. 78 | * Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. 79 | * https://platform.openai.com/docs/api-reference/completions/create#completions/create-presence_penalty 80 | */ 81 | presencePenalty?: number; 82 | 83 | /** 84 | * Number between -2.0 and 2.0. 85 | * Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. 86 | * https://platform.openai.com/docs/api-reference/completions/create#completions/create-frequency_penalty 87 | */ 88 | frequencyPenalty?: number; 89 | 90 | /** 91 | * Generates best_of completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. 92 | * When used with n, best_of controls the number of candidate completions and n specifies how many to return – best_of must be greater than n. 93 | * Note: Because this parameter generates many completions, it can quickly consume your token quota. 94 | * Use carefully and ensure that you have reasonable settings for max_tokens and stop. 95 | * https://platform.openai.com/docs/api-reference/completions/create#completions/create-best_of 96 | */ 97 | bestOf?: number; 98 | 99 | /** 100 | * Modify the likelihood of specified tokens appearing in the completion. 101 | * Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. 102 | * You can use this tokenizer tool (which works for both GPT-2 and GPT-3) to convert text to token IDs. 103 | * Mathematically, the bias is added to the logits generated by the model prior to sampling. 104 | * The exact effect will vary per model, 105 | * but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. 106 | * As an example, you can pass {"50256": -100} to prevent the <|endoftext|> token from being generated. 107 | * https://platform.openai.com/docs/api-reference/completions/create#completions/create-logit_bias 108 | */ 109 | logitBias?: Record; 110 | 111 | /** 112 | * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 113 | * https://platform.openai.com/docs/api-reference/completions/create#completions/create-user 114 | */ 115 | user?: string; 116 | } 117 | 118 | export interface ChatCompletionOptions { 119 | /** 120 | * ID of the model to use. See the 121 | * [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) 122 | * table for details on which models work with the Chat API. 123 | */ 124 | model: 125 | | (string & {}) 126 | | "gpt-4" 127 | | "gpt-4-0314" 128 | | "gpt-4-0613" 129 | | "gpt-4-32k" 130 | | "gpt-4-32k-0314" 131 | | "gpt-4-32k-0613" 132 | | "gpt-3.5-turbo" 133 | | "gpt-3.5-turbo-16k" 134 | | "gpt-3.5-turbo-0301" 135 | | "gpt-3.5-turbo-0613" 136 | | "gpt-3.5-turbo-16k-0613"; 137 | 138 | /** 139 | * The messages to generate chat completions for, in the chat format.The messages to generate chat completions for, in the chat format. 140 | * https://platform.openai.com/docs/api-reference/chat/create#chat/create-messages 141 | */ 142 | messages: ChatCompletionMessage[]; 143 | 144 | /** 145 | * What sampling temperature to use, between 0 and 2. 146 | * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. 147 | * We generally recommend altering this or top_p but not both. 148 | * https://platform.openai.com/docs/api-reference/chat/create#chat/create-temperature 149 | */ 150 | temperature?: number; 151 | 152 | /** 153 | * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. 154 | * So 0.1 means only the tokens comprising the top 10% probability mass are considered. 155 | * We generally recommend altering this or temperature but not both. 156 | * https://platform.openai.com/docs/api-reference/chat/create#chat/create-top_p 157 | */ 158 | topP?: number; 159 | 160 | /** 161 | * How many chat completion choices to generate for each input message. 162 | * https://platform.openai.com/docs/api-reference/chat/create#chat/create-n 163 | */ 164 | n?: number; 165 | 166 | /** 167 | * Up to 4 sequences where the API will stop generating further tokens. 168 | * https://platform.openai.com/docs/api-reference/chat/create#chat/create-stop 169 | */ 170 | stop?: string | string[]; 171 | 172 | /** 173 | * The maximum number of tokens allowed for the generated answer. 174 | * By default, the number of tokens the model can return will be (4096 - prompt tokens). 175 | * https://platform.openai.com/docs/api-reference/chat/create#chat/create-max_tokens 176 | */ 177 | maxTokens?: number; 178 | 179 | /** 180 | * Number between -2.0 and 2.0. 181 | * Positive values penalize new tokens based on whether they appear in the text so far, 182 | * increasing the model's likelihood to talk about new topics. 183 | * https://platform.openai.com/docs/api-reference/chat/create#chat/create-presence_penalty 184 | */ 185 | presencePenalty?: number; 186 | 187 | /** 188 | * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, 189 | * decreasing the model's likelihood to repeat the same line verbatim. 190 | * https://platform.openai.com/docs/api-reference/chat/create#chat/create-frequency_penalty 191 | */ 192 | frequencyPenalty?: number; 193 | 194 | /** 195 | * Modify the likelihood of specified tokens appearing in the completion. 196 | * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. 197 | * Mathematically, the bias is added to the logits generated by the model prior to sampling. 198 | * The exact effect will vary per model, 199 | * but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. 200 | * https://platform.openai.com/docs/api-reference/chat/create#chat/create-logit_bias 201 | */ 202 | logitBias?: Record; 203 | 204 | /** 205 | * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 206 | * https://platform.openai.com/docs/api-reference/chat/create#chat/create-user 207 | */ 208 | user?: string; 209 | 210 | /** 211 | * A list of functions the model may generate JSON inputs for. 212 | * https://platform.openai.com/docs/api-reference/chat/create#chat/create-functions 213 | */ 214 | functions?: ChatCompletionOptionsFunction[]; 215 | 216 | /** 217 | * Controls how the model responds to function calls. 218 | * "none" means the model does not call a function, and responds to the end-user. 219 | * "auto" means the model can pick between an end-user or calling a function. 220 | * Specifying a particular function via {"name":\ "my_function"} forces the model to call that function. 221 | * "none" is the default when no functions are present. "auto" is the default if functions are present. 222 | * https://platform.openai.com/docs/api-reference/chat/create#chat/create-function_call 223 | */ 224 | function_call?: "none" | "auto" | { name: string }; 225 | } 226 | 227 | export type ChatCompletionOptionsFunction = { 228 | name: string; 229 | description: string; 230 | parameters: ObjectSchema; 231 | }; 232 | 233 | export interface SystemCompletionMessage { 234 | content: string; 235 | name?: string; 236 | role: "system"; 237 | } 238 | 239 | export interface UserCompletionMessage { 240 | content: string; 241 | name?: string; 242 | role: "user"; 243 | } 244 | 245 | export interface AssistantCompletionMessage { 246 | content: string; 247 | name?: string; 248 | role: "assistant"; 249 | } 250 | 251 | export interface FunctionAwareAssistantCompletionMessage { 252 | content: string | null; 253 | role: "assistant"; 254 | function_call?: { 255 | "name": string; 256 | "arguments": string; 257 | }; 258 | } 259 | 260 | export interface FunctionCompletionMessage { 261 | content: string; 262 | role: "function"; 263 | name: string; 264 | } 265 | 266 | export type ChatCompletionMessage = 267 | | SystemCompletionMessage 268 | | UserCompletionMessage 269 | | FunctionAwareAssistantCompletionMessage 270 | | FunctionCompletionMessage 271 | | AssistantCompletionMessage; 272 | 273 | type JSONSchema = 274 | & ( 275 | | ObjectSchema 276 | | StringSchema 277 | | NumberSchema 278 | | BooleanSchema 279 | | ArraySchema 280 | ) 281 | & { description?: string }; 282 | 283 | type ObjectSchema = { 284 | type: "object"; 285 | properties: Record; 286 | required: string[]; 287 | }; 288 | 289 | type ArraySchema = { 290 | type: "array"; 291 | items: JSONSchema; 292 | }; 293 | 294 | type StringSchema = { 295 | type: "string"; 296 | enum?: string[]; 297 | }; 298 | 299 | type NumberSchema = { 300 | type: "number"; 301 | minimum?: number; 302 | maximum?: number; 303 | }; 304 | 305 | type BooleanSchema = { 306 | type: "boolean"; 307 | }; 308 | 309 | export interface EditOptions { 310 | /** 311 | * ID of the model to use. You can use the text-davinci-edit-001 or code-davinci-edit-001 model with this endpoint. 312 | * https://platform.openai.com/docs/api-reference/edits/create#edits/create-model 313 | */ 314 | model: string; 315 | 316 | /** 317 | * The input text to use as a starting point for the edit. 318 | * https://platform.openai.com/docs/api-reference/edits/create#edits/create-input 319 | */ 320 | input?: string; 321 | 322 | /** 323 | * The instruction that tells the model how to edit the prompt. 324 | * https://platform.openai.com/docs/api-reference/edits/create#edits/create-instruction 325 | */ 326 | instruction: string; 327 | 328 | /** 329 | * How many edits to generate for the input and instruction. 330 | * https://platform.openai.com/docs/api-reference/edits/create#edits/create-n 331 | */ 332 | n?: number; 333 | 334 | /** 335 | * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, 336 | * while lower values like 0.2 will make it more focused and deterministic. 337 | * We generally recommend altering this or top_p but not both. 338 | * https://platform.openai.com/docs/api-reference/edits/create#edits/create-temperature 339 | */ 340 | temperature?: number; 341 | 342 | /** 343 | * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. 344 | * So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. 345 | * https://platform.openai.com/docs/api-reference/edits/create#edits/create-top_p 346 | */ 347 | topP?: number; 348 | } 349 | 350 | export interface ImageOptions { 351 | /** 352 | * A text description of the desired image(s). The maximum length is 1000 characters. 353 | * https://platform.openai.com/docs/api-reference/images/create#images/create-prompt 354 | */ 355 | prompt: string; 356 | 357 | /** 358 | * The number of images to generate. Must be between 1 and 10. 359 | * https://platform.openai.com/docs/api-reference/images/create#images/create-n 360 | */ 361 | n?: number; 362 | 363 | /** 364 | * The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024. 365 | * https://platform.openai.com/docs/api-reference/images/create#images/create-size 366 | */ 367 | size?: "256x256" | "512x512" | "1024x1024"; 368 | 369 | /** 370 | * The format in which the generated images are returned. Must be one of url or b64_json. 371 | * https://platform.openai.com/docs/api-reference/images/create#images/create-response_format 372 | */ 373 | responseFormat?: "url" | "b64_json"; 374 | 375 | /** 376 | * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 377 | * https://platform.openai.com/docs/api-reference/images/create#images/create-user 378 | */ 379 | user?: string; 380 | } 381 | 382 | export interface ImageEditOptions { 383 | /** 384 | * The image to edit. Must be a valid PNG file, less than 4MB, and square. 385 | * If mask is not provided, image must have transparency, which will be used as the mask. 386 | * https://platform.openai.com/docs/api-reference/images/create-edit#images/create-edit-image 387 | */ 388 | image: FileSpecifier; 389 | 390 | /** 391 | * An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image should be edited. 392 | * Must be a valid PNG file, less than 4MB, and have the same dimensions as image. 393 | * https://platform.openai.com/docs/api-reference/images/create-edit#images/create-edit-mask 394 | */ 395 | mask?: string; 396 | 397 | /** 398 | * A text description of the desired image(s). The maximum length is 1000 characters. 399 | * https://platform.openai.com/docs/api-reference/images/create-edit#images/create-edit-prompt 400 | */ 401 | prompt: string; 402 | 403 | /** 404 | * The number of images to generate. Must be between 1 and 10. 405 | * https://platform.openai.com/docs/api-reference/images/create-edit#images/create-edit-n 406 | */ 407 | n?: number; 408 | 409 | /** 410 | * The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024. 411 | * https://platform.openai.com/docs/api-reference/images/create-edit#images/create-edit-size 412 | */ 413 | size?: "256x256" | "512x512" | "1024x1024"; 414 | 415 | /** 416 | * The format in which the generated images are returned. Must be one of url or b64_json. 417 | * https://platform.openai.com/docs/api-reference/images/create-edit#images/create-edit-response_format 418 | */ 419 | responseFormat?: "url" | "b64_json"; 420 | 421 | /** 422 | * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 423 | * https://platform.openai.com/docs/api-reference/images/create-edit#images/create-edit-user 424 | */ 425 | user?: string; 426 | } 427 | 428 | export interface ImageVariationOptions { 429 | /** 430 | * The image to edit. Must be a valid PNG file, less than 4MB, and square. 431 | * https://platform.openai.com/docs/api-reference/images/create-variation#images/create-variation-image 432 | */ 433 | image: FileSpecifier; 434 | 435 | /** 436 | * The number of images to generate. Must be between 1 and 10. 437 | * https://platform.openai.com/docs/api-reference/images/create-variation#images/create-variation-n 438 | */ 439 | n?: number; 440 | 441 | /** 442 | * The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024. 443 | * https://platform.openai.com/docs/api-reference/images/create-variation#images/create-variation-size 444 | */ 445 | size?: "256x256" | "512x512" | "1024x1024"; 446 | 447 | /** 448 | * The format in which the generated images are returned. Must be one of url or b64_json. 449 | * https://platform.openai.com/docs/api-reference/images/create-variation#images/create-variation-response_format 450 | */ 451 | responseFormat?: "url" | "b64_json"; 452 | 453 | /** 454 | * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 455 | * https://platform.openai.com/docs/api-reference/images/create-variation#images/create-variation-user 456 | */ 457 | user?: string; 458 | } 459 | 460 | export interface EmbeddingsOptions { 461 | /** 462 | * ID of the model to use. You can use the List models API to see all of your available models, or see our Model overview for descriptions of them. 463 | * https://platform.openai.com/docs/api-reference/embeddings/create#embeddings/create-model 464 | */ 465 | model: string; 466 | 467 | /** 468 | * Input text to get embeddings for, encoded as a string or array of tokens. 469 | * To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays. 470 | * Each input must not exceed 8192 tokens in length. 471 | * https://platform.openai.com/docs/api-reference/embeddings/create#embeddings/create-input 472 | */ 473 | input: string | string[]; 474 | 475 | /** 476 | * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 477 | * https://platform.openai.com/docs/api-reference/embeddings/create#embeddings/create-user 478 | */ 479 | user?: string; 480 | } 481 | 482 | export interface TranscriptionOptions { 483 | /** 484 | * The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm. 485 | * https://platform.openai.com/docs/api-reference/audio/create#audio/create-file 486 | */ 487 | file: FileSpecifier; 488 | 489 | /** 490 | * ID of the model to use. Only whisper-1 is currently available. 491 | * https://platform.openai.com/docs/api-reference/audio/create#audio/create-model 492 | */ 493 | model: string; 494 | 495 | /** 496 | * An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language. 497 | * https://platform.openai.com/docs/api-reference/audio/create#audio/create-prompt 498 | */ 499 | prompt?: string; 500 | 501 | /** 502 | * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. 503 | * https://platform.openai.com/docs/api-reference/audio/create#audio/create-response_format 504 | */ 505 | responseFormat?: "json" | "text" | "srt" | "verbose_json" | "vtt"; 506 | 507 | /** 508 | * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. 509 | * If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit. 510 | * https://platform.openai.com/docs/api-reference/audio/create#audio/create-temperature 511 | */ 512 | temperature?: number; 513 | 514 | /** 515 | * The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency. 516 | * https://platform.openai.com/docs/api-reference/audio/create#audio/create-language 517 | */ 518 | language?: string; 519 | } 520 | 521 | export interface TranslationOptions { 522 | /** 523 | * The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm. 524 | * https://platform.openai.com/docs/api-reference/audio/create#audio/create-file 525 | */ 526 | file: FileSpecifier; 527 | 528 | /** 529 | * ID of the model to use. Only whisper-1 is currently available. 530 | * https://platform.openai.com/docs/api-reference/audio/create#audio/create-model 531 | */ 532 | model: string; 533 | 534 | /** 535 | * An optional text to guide the model's style or continue a previous audio segment. The prompt should be in English. 536 | * https://platform.openai.com/docs/api-reference/audio/create#audio/create-prompt 537 | */ 538 | prompt?: string; 539 | 540 | /** 541 | * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. 542 | * https://platform.openai.com/docs/api-reference/audio/create#audio/create-response_format 543 | */ 544 | responseFormat?: "json" | "text" | "srt" | "verbose_json" | "vtt"; 545 | 546 | /** 547 | * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. 548 | * If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit. 549 | * https://platform.openai.com/docs/api-reference/audio/create#audio/create-temperature 550 | */ 551 | temperature?: number; 552 | 553 | /** 554 | * The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency. 555 | * https://platform.openai.com/docs/api-reference/audio/create#audio/create-language 556 | */ 557 | language?: string; 558 | } 559 | 560 | export interface FineTuneOptions { 561 | /** 562 | * The ID of an uploaded file that contains training data. 563 | * Your dataset must be formatted as a JSONL file, where each training example is a JSON object with the keys "prompt" and "completion". 564 | * Additionally, you must upload your file with the purpose fine-tune. 565 | * https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-training_file 566 | */ 567 | trainingFile: string; 568 | 569 | /** 570 | * The ID of an uploaded file that contains validation data. 571 | * If you provide this file, the data is used to generate validation metrics periodically during fine-tuning. 572 | * These metrics can be viewed in the fine-tuning results file. Your train and validation data should be mutually exclusive. 573 | * Your dataset must be formatted as a JSONL file, where each validation example is a JSON object with the keys "prompt" and "completion". 574 | * Additionally, you must upload your file with the purpose fine-tune. 575 | * https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-training_file 576 | */ 577 | validationFile: string; 578 | 579 | /** 580 | * The name of the base model to fine-tune. 581 | * You can select one of "ada", "babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21. 582 | * To learn more about these models, see the Models documentation. 583 | * https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-model 584 | */ 585 | model: string; 586 | 587 | /** 588 | * The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. 589 | * https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-n_epochs 590 | */ 591 | nEpochs?: number; 592 | 593 | /** 594 | * The batch size to use for training. The batch size is the number of training examples used to train a single forward and backward pass. 595 | * By default, the batch size will be dynamically configured to be ~0.2% of the number of examples in the training set, capped at 256 - in general, 596 | * we've found that larger batch sizes tend to work better for larger datasets. 597 | */ 598 | batchSize?: number; 599 | 600 | /** 601 | * The learning rate multiplier to use for training. The fine-tuning learning rate is the original learning rate used for pretraining multiplied by this value. 602 | * By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final batch_size (larger learning rates tend to perform better with larger batch sizes). 603 | * We recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best results. 604 | * https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-learning_rate_multiplier 605 | */ 606 | learningRateMultiplier?: number; 607 | 608 | /** 609 | * The weight to use for loss on the prompt tokens. 610 | * This controls how much the model tries to learn to generate the prompt (as compared to the completion which always has a weight of 1.0), 611 | * and can add a stabilizing effect to training when completions are short. 612 | * If prompts are extremely long (relative to completions), it may make sense to reduce this weight so as to avoid over-prioritizing learning the prompt. 613 | * https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-prompt_loss_weight 614 | */ 615 | promptLossWeight?: number; 616 | 617 | /** 618 | * If set, we calculate classification-specific metrics such as accuracy and F-1 score using the validation set at the end of every epoch. 619 | * These metrics can be viewed in the results file. 620 | * In order to compute classification metrics, you must provide a validation_file. 621 | * Additionally, you must specify classification_n_classes for multiclass classification or classification_positive_class for binary classification. 622 | * https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-compute_classification_metrics 623 | */ 624 | computeClassificationMetrics?: boolean; 625 | 626 | /** 627 | * The number of classes in a classification task. 628 | * This parameter is required for multiclass classification. 629 | * https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-classification_n_classes 630 | */ 631 | classificationNClasses?: number; 632 | 633 | /** 634 | * The positive class in binary classification. 635 | * This parameter is needed to generate precision, recall, and F1 metrics when doing binary classification. 636 | * https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-classification_positive_class 637 | */ 638 | classificationPositiveClass?: string; 639 | 640 | /** 641 | * If this is provided, we calculate F-beta scores at the specified beta values. 642 | * The F-beta score is a generalization of F-1 score. This is only used for binary classification. 643 | * With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. 644 | * A larger beta score puts more weight on recall and less on precision. A smaller beta score puts more weight on precision and less on recall. 645 | * https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-classification_betas 646 | */ 647 | classificationBetas?: number[]; 648 | 649 | /** 650 | * A string of up to 40 characters that will be added to your fine-tuned model name. 651 | * For example, a suffix of "custom-model-name" would produce a model name like ada:ft-your-org:custom-model-name-2022-02-15-04-21-04. 652 | * https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-suffix 653 | */ 654 | suffix?: string; 655 | } 656 | 657 | export interface Model { 658 | id: string; 659 | object: "model"; 660 | created: number; 661 | owned_by: string; 662 | permission: { 663 | id: string; 664 | object: "model_permission"; 665 | created: number; 666 | allow_create_engine: boolean; 667 | allow_sampling: boolean; 668 | allow_logprobs: boolean; 669 | allow_search_indices: boolean; 670 | allow_view: boolean; 671 | allow_fine_tuning: boolean; 672 | organization: string; 673 | group: null | string; 674 | is_blocking: boolean; 675 | }[]; 676 | root: string; 677 | parent: null | string; 678 | } 679 | 680 | export interface ModelList { 681 | object: "list"; 682 | data: Model[]; 683 | } 684 | 685 | export interface Completion { 686 | id: string; 687 | object: "text_completion"; 688 | created: number; 689 | model: string; 690 | choices: { 691 | text: string; 692 | index: number; 693 | logprobs: number | null; 694 | finish_reason: string; 695 | }[]; 696 | usage: { 697 | prompt_tokens: number; 698 | completion_tokens: number; 699 | total_tokens: number; 700 | }; 701 | } 702 | 703 | export interface CompletionStream { 704 | id: string; 705 | object: "text_completion"; 706 | created: number; 707 | model: string; 708 | choices: { 709 | text: string; 710 | index: number; 711 | logprobs: number | null; 712 | finish_reason: string; 713 | }[]; 714 | } 715 | 716 | export interface ChatCompletion { 717 | id: string; 718 | object: "chat.completion"; 719 | created: number; 720 | choices: { 721 | index: number; 722 | message: ChatCompletionMessage; 723 | finish_reason: string; 724 | }[]; 725 | usage: { 726 | prompt_tokens: number; 727 | completion_tokens: number; 728 | total_tokens: number; 729 | }; 730 | } 731 | 732 | export interface ChatCompletionStreamDelta { 733 | name?: string; 734 | role?: "system" | "assistant" | "user"; 735 | content?: string | null; 736 | function_call?: { 737 | name?: string; 738 | arguments: string; 739 | }; 740 | } 741 | 742 | export interface ChatCompletionStream { 743 | id: string; 744 | object: "chat.completion.chunk"; 745 | created: number; 746 | choices: { 747 | index: number; 748 | delta: ChatCompletionStreamDelta; 749 | finish_reason: string | null; 750 | }[]; 751 | } 752 | 753 | export interface Edit { 754 | object: "edit"; 755 | created: number; 756 | choices: { 757 | text: string; 758 | index: number; 759 | }[]; 760 | usage: { 761 | prompt_tokens: number; 762 | completion_tokens: number; 763 | total_tokens: number; 764 | }; 765 | } 766 | 767 | export interface Image { 768 | created: number; 769 | data: { 770 | url: string; 771 | b64_json: string; 772 | }[]; 773 | } 774 | 775 | export interface Embedding { 776 | object: "list"; 777 | data: { 778 | object: "embedding"; 779 | embedding: number[]; 780 | index: number; 781 | }[]; 782 | model: string; 783 | usage: { 784 | prompt_tokens: number; 785 | total_tokens: number; 786 | }; 787 | } 788 | 789 | export interface Transcription { 790 | text: string; 791 | } 792 | 793 | export interface Translation { 794 | text: string; 795 | } 796 | 797 | export interface FileInstance { 798 | id: string; 799 | object: "file"; 800 | bytes: number; 801 | created_at: number; 802 | filename: string; 803 | purpose: string; 804 | } 805 | 806 | export interface FileList { 807 | data: FileInstance[]; 808 | object: "list"; 809 | } 810 | 811 | export interface DeletedFile { 812 | id: string; 813 | object: "file"; 814 | deleted: boolean; 815 | } 816 | 817 | export interface FineTuneEvent { 818 | object: "fine-tune-event"; 819 | created_at: number; 820 | level: string; 821 | message: string; 822 | } 823 | 824 | export interface FineTuneEventList { 825 | object: "list"; 826 | data: FineTuneEvent[]; 827 | } 828 | 829 | export interface FineTune { 830 | id: string; 831 | object: "fine-tune"; 832 | model: string; 833 | created_at: number; 834 | fine_tuned_model: null | string; 835 | hyperparams: { 836 | batch_size: number; 837 | learning_rate_multiplier: number; 838 | n_epochs: number; 839 | prompt_loss_weight: number; 840 | }; 841 | organization_id: string; 842 | result_files: FileInstance[]; 843 | status: "pending" | "succeeded" | "cancelled"; 844 | validation_files: FileInstance[]; 845 | training_files: FileInstance[]; 846 | updated_at: number; 847 | } 848 | 849 | export interface FineTuneList { 850 | object: "list"; 851 | data: FineTune[]; 852 | } 853 | 854 | export interface DeletedFineTune { 855 | id: string; 856 | object: "model"; 857 | deleted: boolean; 858 | } 859 | 860 | export interface Moderation { 861 | id: string; 862 | model: string; 863 | results: { 864 | categories: { 865 | hate: boolean; 866 | "hate/threatening": boolean; 867 | "self-harm": boolean; 868 | sexual: boolean; 869 | "sexual/minors": boolean; 870 | violence: boolean; 871 | "violence/graphic": boolean; 872 | }; 873 | category_scores: { 874 | hate: number; 875 | "hate/threatening": number; 876 | "self-harm": number; 877 | sexual: number; 878 | "sexual/minors": number; 879 | violence: number; 880 | "violence/graphic": number; 881 | }; 882 | flagged: boolean; 883 | }[]; 884 | } 885 | -------------------------------------------------------------------------------- /src/util.ts: -------------------------------------------------------------------------------- 1 | import { TextDelimiterStream } from "./deps.ts"; 2 | 3 | export function throwError( 4 | data: { error?: { type: string; message: string; code: string } }, 5 | ) { 6 | if (data.error) { 7 | let errorMessage = `${data.error.type}`; 8 | if (data.error.message) { 9 | errorMessage += ": " + data.error.message; 10 | } 11 | if (data.error.code) { 12 | errorMessage += ` (${data.error.code})`; 13 | } 14 | // console.log(data.error); 15 | throw new Error(errorMessage); 16 | } 17 | } 18 | 19 | // deno-lint-ignore no-explicit-any 20 | export async function decodeStream( 21 | res: Response, 22 | callback: (data: any) => void, 23 | ) { 24 | const chunks = res.body! 25 | .pipeThrough(new TextDecoderStream()) 26 | .pipeThrough(new TextDelimiterStream("\n\n")); 27 | 28 | for await (const chunk of chunks) { 29 | let data; 30 | try { 31 | data = JSON.parse(chunk); 32 | } catch { 33 | // no-op (just checking if error message) 34 | } 35 | if (data) throwError(data); 36 | 37 | if (chunk === "data: [DONE]") break; 38 | callback(JSON.parse(chunk.slice(6))); 39 | } 40 | } 41 | --------------------------------------------------------------------------------