├── examples ├── config.nims └── simpleApp.nim ├── src ├── openaiClient.nim └── openaiClient │ └── client.nim ├── openaiClient.nimble ├── LICENSE.md ├── README.md └── spec.yaml /examples/config.nims: -------------------------------------------------------------------------------- 1 | --d:ssl -------------------------------------------------------------------------------- /src/openaiClient.nim: -------------------------------------------------------------------------------- 1 | import openaiClient/[client] 2 | 3 | export client -------------------------------------------------------------------------------- /openaiClient.nimble: -------------------------------------------------------------------------------- 1 | # Package 2 | 3 | version = "0.1.2" 4 | author = "Cletus" 5 | description = "Openai API client For Nim" 6 | license = "MIT" 7 | srcDir = "src" 8 | 9 | 10 | # Dependencies 11 | 12 | requires "nim >= 2.0.0" 13 | 14 | -------------------------------------------------------------------------------- /examples/simpleApp.nim: -------------------------------------------------------------------------------- 1 | import openaiClient 2 | import httpclient, json 3 | 4 | template Json(body: untyped): untyped = 5 | `%*`(body) 6 | 7 | let 8 | env = loadEnvFile(".env") 9 | api_key = env.get("API_KEY") 10 | openai = newOpenAiClient(api_key = api_key) 11 | 12 | let baz = Json { 13 | "image": "pic.png", 14 | "mask": "pic.png", 15 | "prompt": "A Nice Tesla For Asiwaju", 16 | "n": 2, 17 | "size": "512x512", 18 | } 19 | 20 | let foo = openai.createImageEdit(baz) 21 | echo foo.body() 22 | 23 | # import nim_openai/[client] 24 | # import asyncdispatch 25 | 26 | # let 27 | # env = loadEnvFile(".env") 28 | # api_key = env.get("API_KEY") 29 | # openai = newAsyncOpenAiClient(api_key = api_key) 30 | 31 | # let foo = await openai.listmodels() 32 | 33 | # echo foo 34 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Cletus Igwe 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ### OpenAI api client for Nim Lang 2 | 3 | This is a simple implementation of a Nim lang client for the openai api spec (as found in the spec.yaml file above). This client has support for asynchronous requests and parameters are passed as json. 4 | 5 | ### Installation 6 | 7 | ```console 8 | nimble install openaiclient 9 | ``` 10 | 11 | ### Usage 12 | 13 | [1] Create a file to hold your apikeys and other environmental variables and construct a new openai client 14 | 15 | ```nim 16 | import openaiClient 17 | 18 | let 19 | env = loadEnv(".env") 20 | apiKey = env.get("API_KEY") 21 | openai = newOpenAiClient(apikey = apikey) 22 | ## if you need to use an asynchronous client then: 23 | ##import asyncdispatch 24 | ##let openai = newAsyncOpenAiClient(apikey = apikey) 25 | 26 | ``` 27 | 28 | [2] Setup your parameters as json and pass them to the [openai functions](https://platform.openai.com/docs/api-reference) you wish to call 29 | 30 | ```nim 31 | import json 32 | 33 | let imageEditRequestParams = %*{"image": "pic.png", "mask": "pic.png", "prompt": "A Nice Tesla For Asiwaju", "n": 2, "size": "512x512"} 34 | 35 | let imageEditResponse = openai.createImageEdit(imageEditRequestParams) 36 | 37 | #use the response for whatever cool stuff you are trying to do 38 | 39 | echo imageEditResponse 40 | 41 | ``` 42 | 43 | ### Contributions 44 | 45 | Contributions to the OpenAI Nim client are welcome. If you find a bug or have a suggestion for a new feature, please open an issue on the GitHub repository. If you would like to contribute code, please fork the repository and submit a pull request. 46 | -------------------------------------------------------------------------------- /src/openaiClient/client.nim: -------------------------------------------------------------------------------- 1 | #[ 2 | This Module implements an OpenAI REST API Client In Nim: 3 | openapi: 3.0.0 4 | info: 5 | title: OpenAI API 6 | description: APIs for sampling from and fine-tuning language models 7 | version: '1.3.0' 8 | ]# 9 | 10 | import std/[tables, parsecfg, streams] 11 | 12 | type 13 | Env = object 14 | data: OrderedTableRef[string, string] 15 | 16 | EnvWrongFormatError = object of CatchableError 17 | 18 | func initEnv(): Env {.inline.} = 19 | ## Initializes an `Env`. 20 | Env(data: newOrderedTable[string, string]()) 21 | 22 | func get*(env: Env, key: string): string {.inline.} = 23 | ## Retrieves a value of `key` in `Env`. 24 | result = env.data[key] 25 | 26 | proc loadEnvFile*(filename: string): Env = 27 | ##loads the env file 28 | result = initEnv() 29 | var f = newFileStream(filename, fmRead) 30 | 31 | if f != nil: 32 | var p: CfgParser 33 | open(p, f, filename) 34 | while true: 35 | var e = p.next 36 | case e.kind 37 | of cfgEof: 38 | break 39 | of cfgKeyValuePair: 40 | result.data[e.key] = e.value 41 | else: 42 | raise newException( 43 | EnvWrongFormatError, ".env files only support key-value pairs" 44 | ) 45 | f.close() 46 | p.close() 47 | 48 | import std/[httpclient, asyncdispatch, json, strformat, sets] 49 | 50 | const OpenAI_BASEURL* = "https://api.openai.com/v1" 51 | 52 | type 53 | OpenAi_Client* = ref object 54 | API_KEY: string 55 | organization: string 56 | client: HttpClient 57 | 58 | Async_OpenAi_Client* = ref object 59 | API_KEY: string 60 | organization: string 61 | client: AsyncHttpClient 62 | 63 | proc defaultHeader(api_key, organization: string): HttpHeaders = 64 | if organization != "": 65 | result = newHttpHeaders( 66 | [ 67 | ("Authorization", fmt"Bearer {api_key}"), 68 | ("OpenAI_Organization", organization), 69 | ] 70 | ) 71 | else: 72 | result = newHttpHeaders([("Authorization", fmt"Bearer {api_key}")]) 73 | 74 | proc newOpenAiClient*(api_key: string, organization = ""): OpenAi_Client = 75 | result = OpenAi_Client( 76 | API_KEY: api_key, 77 | client: newHttpClient(headers = defaultHeader(api_key, organization)), 78 | ) 79 | 80 | proc newAsyncOpenAiClient*(api_key: string, organization = ""): Async_OpenAi_Client = 81 | result = Async_OpenAi_Client( 82 | API_KEY: api_key, 83 | client: newAsyncHttpClient(headers = defaultHeader(api_key, organization)), 84 | ) 85 | 86 | template getFromOpenAi( 87 | client: HttpClient | AsyncHttpClient, relativePath: string 88 | ): untyped = 89 | get(client, OpenAI_BASEURL & relativePath) 90 | 91 | template postToOpenAi( 92 | client: HttpClient | AsyncHttpClient, 93 | relativePath: string, 94 | requestBody: string = "", 95 | multipart: MultipartData = nil, 96 | ): untyped = 97 | post(client, OpenAI_BASEURL & relativePath, requestBody, multipart) 98 | 99 | template deleteFromOpenAi( 100 | client: HttpClient | AsyncHttpClient, relativePath: string 101 | ): untyped = 102 | delete(client, OpenAI_BASEURL & relativePath) 103 | 104 | template verifyRequestParams( 105 | procName, procType: untyped, requiredParams, optionalParams: seq[string] 106 | ): untyped = 107 | ## The parameter verifications will only be done in development mode, to help speed up production code 108 | ## 109 | proc `procName`(body: JsonNode): `procType` = 110 | when defined(release): 111 | return body 112 | 113 | result = %*{} 114 | var 115 | required = toHashSet(`requiredParams`) 116 | optional = toHashSet(`optionalParams`) 117 | allPossibleParams = required + optional 118 | 119 | for key in body.keys: 120 | if key in allPossibleParams: 121 | allPossibleParams.excl(key) 122 | result[key] = body[key] 123 | else: 124 | echo key, " is not a valid key in the ", `procType`, " schema" 125 | quit(1) 126 | 127 | let omittedRequiredParams = allPossibleParams - optional 128 | 129 | if omittedRequiredParams.len > 0: 130 | echo omittedRequiredParams, 131 | " is a required Parameter in the ", `procType`, 132 | " schema but has not been provided" 133 | quit(1) 134 | 135 | type 136 | CompletionRequest = JsonNode 137 | 138 | ChatCompletionRequest = JsonNode 139 | 140 | EditRequest = JsonNode 141 | 142 | ImageRequest = JsonNode 143 | 144 | ImageEditRequest = JsonNode 145 | 146 | ImageVariationRequest = JsonNode 147 | 148 | EmbeddingRequest = JsonNode 149 | 150 | TranscriptionRequest = JsonNode 151 | 152 | TranslationRequest = JsonNode 153 | 154 | SearchRequest = JsonNode 155 | 156 | FileRequest = JsonNode 157 | 158 | AnswerRequest = JsonNode 159 | 160 | ClassificationRequest = JsonNode 161 | 162 | FineTuneRequest = JsonNode 163 | 164 | ModerationRequest = JsonNode 165 | 166 | verifyRequestParams( 167 | parseCompletionRequest, 168 | CompletionRequest, 169 | @["model"], 170 | @[ 171 | "prompt", "suffix", "max_tokens", "temperature", "top_p", "n", "stream", 172 | "logprobs", "echo", "stop", "presence_penalty", "frequency_penalty", "best_of", 173 | "logit_bias", "user", 174 | ], 175 | ) 176 | 177 | verifyRequestParams( 178 | parseChatCompletionRequest, 179 | ChatCompletionRequest, 180 | @["model", "messages"], 181 | @[ 182 | "functions", "function_call", "temperature", "top_p", "n", "stream", "stop", 183 | "max_tokens", "presence_penalty", "frequency_penalty", "logit_bias", 184 | ], 185 | ) 186 | 187 | verifyRequestParams( 188 | parseEditRequest, 189 | EditRequest, 190 | @["model", "instruction"], 191 | @["instruction", "n", "temperature", "top_p"], 192 | ) 193 | 194 | verifyRequestParams( 195 | parseImageRequest, 196 | ImageRequest, 197 | @["prompt"], 198 | @["n", "size", "response_format", "user"], 199 | ) 200 | 201 | verifyRequestParams( 202 | parseImageEditRequest, 203 | ImageEditRequest, 204 | @["prompt", "image"], 205 | @["mask", "n", "size", "response_format", "user"], 206 | ) 207 | 208 | verifyRequestParams( 209 | parseImageVariationRequest, 210 | ImageVariationRequest, 211 | @["image"], 212 | @["n", "size", "response_format", "user"], 213 | ) 214 | 215 | verifyRequestParams(parseModerationRequest, ModerationRequest, @["input"], @["model"]) 216 | 217 | verifyRequestParams( 218 | parseSearchRequest, 219 | SearchRequest, 220 | @["query"], 221 | @["documents", "file", "max_rerank", "user"], 222 | ) 223 | 224 | verifyRequestParams(parseFileRequest, FileRequest, @["file", "purpose"], @["empty"]) 225 | #the empty optionalParams is just so the compiler will shut up 226 | 227 | verifyRequestParams( 228 | parseAnswerRequest, 229 | AnswerRequest, 230 | @["model", "question", "examples", "examples_context"], 231 | @[ 232 | "documents", "file", "search_model", "max_rerank", "temperature", "logprobs", 233 | "max_tokens", "stop", "n", "logit_bias", "return_metadata", "return_prompt", 234 | "expand", "user", 235 | ], 236 | ) 237 | 238 | verifyRequestParams( 239 | parseClassificationRequest, 240 | ClassificationRequest, 241 | @["model", "query"], 242 | @[ 243 | "examples", "file", "labels", "search_model", "temperature", "logprobs", 244 | "max_examples", "logit_bias", "return_prompt", "return_metadata", "expand", 245 | "user", 246 | ], 247 | ) 248 | 249 | verifyRequestParams( 250 | parseFineTuneRequest, 251 | FineTuneRequest, 252 | @["training_file"], 253 | @[ 254 | "validation_file", "model", "n_epochs", "batch_size", 255 | "learning_rate_multiplier", "prompt_loss_weight", 256 | "compute_classification_metrics", "classification_n_classes", 257 | "classification_positive_class", "classification_betas", "suffix", 258 | ], 259 | ) 260 | 261 | verifyRequestParams( 262 | parseEmbeddingRequest, EmbeddingRequest, @["model", "input"], @["user"] 263 | ) 264 | 265 | verifyRequestParams( 266 | parseTranscriptionRequest, 267 | TranscriptionRequest, 268 | @["file", "model"], 269 | @["prompt", "response_format", "temperature", "language"], 270 | ) 271 | 272 | verifyRequestParams( 273 | parseTranslationRequest, 274 | TranslationRequest, 275 | @["file", "model"], 276 | @["prompt", "response_format", "temperature"], 277 | ) 278 | 279 | proc createMultiPartData( 280 | body: JsonNode, 281 | parseBody: proc(body: JsonNode): JsonNode, 282 | multipartFields: openArray[string], 283 | ): MultipartData = 284 | ## Procedure to make creating multipart/form-data content-types easier 285 | ## 286 | let 287 | verifiedBody = parseBody(body) 288 | multipartBody = newMultipartData() 289 | 290 | for key in verifiedBody.keys: 291 | if key in multipartFields: 292 | let fileName = verifiedBody[key].getStr() 293 | multipartBody.addFiles([(key, fileName)]) 294 | else: 295 | multipartBody[key] = verifiedBody[key].getStr() 296 | result = multipartBody 297 | 298 | proc createCompletion*( 299 | apiConfig: OpenAi_Client | Async_OpenAi_Client, body: JsonNode 300 | ): Future[Response | AsyncResponse] {.multisync.} = 301 | ## Creates a completion for the provided prompt and parameters 302 | 303 | let verifiedBody = parseCompletionRequest(body) 304 | apiConfig.client.headers["Content-Type"] = "application/json" 305 | result = await postToOpenAi(apiConfig.client, "/completions", $verifiedBody) 306 | 307 | proc createChatCompletion*( 308 | apiConfig: OpenAi_Client | Async_OpenAi_Client, body: JsonNode 309 | ): Future[Response | AsyncResponse] {.multisync.} = 310 | ## Creates a completion for the chat message 311 | let verifiedBody = parseChatCompletionRequest(body) 312 | apiConfig.client.headers["Content-Type"] = "application/json" 313 | result = await postToOpenAi(apiConfig.client, "/chat/completions", $verifiedBody) 314 | 315 | proc createEdit*( 316 | apiConfig: OpenAi_Client | Async_OpenAi_Client, body: JsonNode 317 | ): Future[Response | AsyncResponse] {.multisync.} = 318 | ## Creates a new edit for the provided input, instruction, and parameters. 319 | let verifiedBody = parseEditRequest(body) 320 | apiConfig.client.headers["Content-Type"] = "application/json" 321 | result = await postToOpenAi(apiConfig.client, "/edits", $verifiedBody) 322 | 323 | proc createImage*( 324 | apiConfig: OpenAi_Client | Async_OpenAi_Client, body: JsonNode 325 | ): Future[Response | AsyncResponse] {.multisync.} = 326 | ## Creates an image given a prompt. 327 | let verifiedBody = parseImageRequest(body) 328 | apiConfig.client.headers["Content-Type"] = "application/json" 329 | result = await postToOpenAi(apiConfig.client, "/images/generations", $verifiedbody) 330 | 331 | proc createImageEdit*( 332 | apiConfig: OpenAi_Client | Async_OpenAi_Client, body: JsonNode 333 | ): Future[Response | AsyncResponse] {.multisync.} = 334 | ## Creates an edited or extended image given an original image and a prompt. 335 | 336 | apiConfig.client.headers["Content-Type"] = "multipart/form-data" 337 | result = await postToOpenAi( 338 | apiConfig.client, 339 | "/images/edits", 340 | multipart = createMultiPartData(body, parseImageEditRequest, ["image", "mask"]), 341 | ) 342 | 343 | proc createImageVariation*( 344 | apiConfig: OpenAi_Client | Async_OpenAi_Client, body: JsonNode 345 | ): Future[Response | AsyncResponse] {.multisync.} = 346 | ## Creates a variation of a given image. 347 | 348 | apiConfig.client.headers["Content-Type"] = "multipart/form-data" 349 | result = await postToOpenAi( 350 | apiConfig.client, 351 | "/images/variations", 352 | multipart = createMultiPartData(body, parseImageVariationRequest, ["image"]), 353 | ) 354 | 355 | proc createEmbedding*( 356 | apiConfig: OpenAi_Client | Async_OpenAi_Client, body: JsonNode 357 | ): Future[Response | AsyncResponse] {.multisync.} = 358 | ## Creates an embedding vector representing the input text. 359 | 360 | let verifiedBody = parseEmbeddingRequest(body) 361 | apiConfig.client.headers["Content-Type"] = "application/json" 362 | result = await postToOpenAi(apiConfig.client, "/embeddings", $verifiedBody) 363 | 364 | proc createTranscription*( 365 | apiConfig: OpenAi_Client | Async_OpenAi_Client, body: JsonNode 366 | ): Future[Response | AsyncResponse] {.multisync.} = 367 | ## Transcribes audio into the input language. 368 | 369 | apiConfig.client.headers["Content-Type"] = "multipart/form-data" 370 | result = await postToOpenAi( 371 | apiConfig.client, 372 | "/audio/transcriptions", 373 | multipart = createMultiPartData(body, parseTranscriptionRequest, ["file"]), 374 | ) 375 | 376 | proc createTranslation*( 377 | apiConfig: OpenAi_Client | Async_OpenAi_Client, body: JsonNode 378 | ): Future[Response | AsyncResponse] {.multisync.} = 379 | ## Translates audio into into English. 380 | 381 | apiConfig.client.headers["Content-Type"] = "multipart/form-data" 382 | result = await postToOpenAi( 383 | apiConfig.client, 384 | "/audio/translations", 385 | multipart = createMultiPartData(body, parseTranslationRequest, ["file"]), 386 | ) 387 | 388 | proc createSearch*( 389 | apiConfig: OpenAi_Client | Async_OpenAi_Client, engineId: string, body: JsonNode 390 | ): Future[Response | AsyncResponse] {.multisync.} = 391 | ## The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. 392 | ## 393 | ## To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. 394 | ## When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. 395 | ## These documents will be returned along with their search scores. 396 | ## 397 | ## The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query. 398 | 399 | let verifiedBody = parseSearchRequest(body) 400 | apiConfig.client.headers["Content-Type"] = "application/json" 401 | result = await postToOpenAi( 402 | apiConfig.client, fmt"/engines/{engineId}/search", $verifiedBody 403 | ) 404 | 405 | proc listFiles*( 406 | apiConfig: OpenAi_Client | Async_OpenAi_Client 407 | ): Future[Response | AsyncResponse] {.multisync.} = 408 | ## Returns a list of files that belong to the user's organization. 409 | apiConfig.client.headers["Content-Type"] = "application/json" 410 | result = await getFromOpenAi(apiConfig.client, "/files") 411 | 412 | proc createFile*( 413 | apiConfig: OpenAi_Client | Async_OpenAi_Client, body: JsonNode 414 | ): Future[Response | AsyncResponse] {.multisync.} = 415 | ## Upload a file that contains document(s) to be used across various endpoints/features. 416 | ## Currently, the size of all the files uploaded by one organization can be up to 1 GB. 417 | ## Please contact us if you need to increase the storage limit. 418 | 419 | apiConfig.client.headers["Content-Type"] = "multipart/form-data" 420 | result = await postToOpenAi( 421 | apiConfig.client, 422 | "/file", 423 | multipart = createMultiPartData(body, parseFileRequest, ["file"]), 424 | ) 425 | 426 | proc deleteFile*( 427 | apiConfig: OpenAi_Client | Async_OpenAi_Client, fileId: string 428 | ): Future[Response | AsyncResponse] {.multisync.} = 429 | ## Delete a file. 430 | result = await deleteFromOpenAi(apiConfig.client, fmt"/files/{fileId}") 431 | 432 | proc retrieveFile*( 433 | apiConfig: OpenAi_Client | Async_OpenAi_Client, fileId: string 434 | ): Future[Response | AsyncResponse] {.multisync.} = 435 | ## Returns information about a specific file. 436 | result = await getFromOpenAi(apiConfig.client, fmt"/file/{fileId}") 437 | 438 | proc downloadFile*( 439 | apiConfig: OpenAi_Client | Async_OpenAi_Client, 440 | fileId: string, 441 | saveToFileName = fileId, 442 | ): Future[void] = 443 | ## Returns the contents of the specified file 444 | result = await httpclient.downloadFile( 445 | apiConfig.client, fmt"/file/{fileId}", saveToFileName 446 | ) 447 | 448 | proc createAnswer*( 449 | apiConfig: OpenAi_Client | Async_OpenAi_Client, body: JsonNode 450 | ): Future[Response | AsyncResponse] {.multisync.} = 451 | ## Answers the specified question using the provided documents and examples. 452 | ## The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. 453 | ## The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions). 454 | 455 | let verifiedBody = parseAnswerRequest(body) 456 | apiConfig.client.headers["Content-Type"] = "application/json" 457 | result = await postToOpenAi(apiConfig.client, "/answers", $verifiedBody) 458 | 459 | proc createClassification*( 460 | apiConfig: OpenAi_Client | Async_OpenAi_Client, body: JsonNode 461 | ): Future[Response | AsyncResponse] {.multisync.} = 462 | ## Classifies the specified `query` using provided examples. 463 | ## 464 | ## The endpoint first [searches](/docs/api-reference/searches) over the labeled examples 465 | ## to select the ones most relevant for the particular query. Then, the relevant examples 466 | ## are combined with the query to construct a prompt to produce the final label via the 467 | ## [completions](/docs/api-reference/completions) endpoint. 468 | ## 469 | ## Labeled examples can be provided via an uploaded `file`, or explicitly listed in the 470 | ## request using the `examples` parameter for quick tests and small scale use cases. 471 | 472 | let verifiedBody = parseClassificationRequest(body) 473 | apiConfig.client.headers["Content-Type"] = "application/json" 474 | result = await postToOpenAi(apiConfig.client, "/classifications", $verifiedBody) 475 | 476 | proc createFineTune*( 477 | apiConfig: OpenAi_Client | Async_OpenAi_Client, body: JsonNode 478 | ): Future[Response | AsyncResponse] {.multisync.} = 479 | ## Creates a job that fine-tunes a specified model from a given dataset. 480 | ## 481 | ## Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. 482 | ## 483 | ## [Learn more about Fine-tuning](/docs/guides/fine-tuning) 484 | 485 | let verifiedBody = parseFineTuneRequest(body) 486 | apiConfig.client.headers["Content-Type"] = "application/json" 487 | result = await postToOpenAi(apiConfig.client, "/fine-tunes", $verifiedBody) 488 | 489 | proc listFineTunes*( 490 | apiConfig: OpenAi_Client | Async_OpenAi_Client 491 | ): Future[Response | AsyncResponse] {.multisync.} = 492 | ## List your organization's fine-tuning jobs 493 | 494 | result = await getFromOpenAi(apiConfig.client, "/fine-tunes") 495 | 496 | proc retrieveFineTune*( 497 | apiConfig: OpenAi_Client | Async_OpenAi_Client, fineTuneId: string 498 | ): Response | Future[AsyncResponse] = 499 | ## Gets info about the fine-tune job. 500 | ## 501 | ## [Learn more about Fine-tuning](/docs/guides/fine-tuning) 502 | 503 | result = await getFromOpenAi(apiConfig.client, fmt"/fine-tunes/{fineTuneId}") 504 | 505 | proc cancelFineTune*( 506 | apiConfig: OpenAi_Client | Async_OpenAi_Client, fineTuneId: string 507 | ): Response | Future[AsyncResponse] = 508 | ## Immediately cancel a fine-tune job. 509 | result = await postToOpenAi(apiConfig.client, fmt"/fines-tunes/{fineTuneId}/cancel") 510 | 511 | proc listFineTuneEvents*( 512 | apiConfig: OpenAi_Client | Async_OpenAi_Client, fineTuneId: string 513 | ): Response | Future[AsyncResponse] = 514 | ## Get fine-grained status updates for a fine-tune job. 515 | result = 516 | await getFromOpenAi(apiConfig.client, fmt"/fines-tunes/{fineTuneId}/events") 517 | 518 | proc listModels*( 519 | apiConfig: OpenAi_Client | Async_OpenAi_Client 520 | ): Future[Response | AsyncResponse] {.multisync.} = 521 | ## Lists the currently available models, and provides basic information about each one such as the owner and availability. 522 | result = await getFromOpenAi(apiConfig.client, "/models") 523 | 524 | proc retrieveModel*( 525 | apiConfig: OpenAi_Client | Async_OpenAi_Client, model: string 526 | ): Future[Response | AsyncResponse] {.multisync.} = 527 | ## Retrieves a model instance, providing basic information about the model such as the owner and permissioning. 528 | result = await getFromOpenAi(apiConfig.client, fmt"/models/{model}") 529 | 530 | proc deleteModel*( 531 | apiConfig: OpenAi_Client | Async_OpenAi_Client, model: string 532 | ): Future[Response | AsyncResponse] {.multisync.} = 533 | ## Delete a fine-tuned model. You must have the Owner role in your organization. 534 | result = await deleteFromOpenAi(apiConfig.client, fmt"/models/{model}") 535 | 536 | proc createModeration*( 537 | apiConfig: OpenAi_Client | Async_OpenAi_Client, body: JsonNode 538 | ): Future[Response | AsyncResponse] {.multisync.} = 539 | ## Classifies if text violates OpenAI's Content Policy 540 | let verifiedBody = parseModerationRequest(body) 541 | apiConfig.client.headers["Content-Type"] = "application/json" 542 | result = await postToOpenAi(apiConfig.client, "/moderations", $verifiedBody) 543 | -------------------------------------------------------------------------------- /spec.yaml: -------------------------------------------------------------------------------- 1 | openapi: 3.0.0 2 | info: 3 | title: OpenAI API 4 | description: APIs for sampling from and fine-tuning language models 5 | version: '1.3.0' 6 | servers: 7 | - url: https://api.openai.com/v1 8 | tags: 9 | - name: OpenAI 10 | description: The OpenAI REST API 11 | paths: 12 | /engines: 13 | get: 14 | operationId: listEngines 15 | deprecated: true 16 | tags: 17 | - OpenAI 18 | summary: Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability. 19 | responses: 20 | "200": 21 | description: OK 22 | content: 23 | application/json: 24 | schema: 25 | $ref: '#/components/schemas/ListEnginesResponse' 26 | x-oaiMeta: 27 | name: List engines 28 | group: engines 29 | path: list 30 | examples: 31 | curl: | 32 | curl https://api.openai.com/v1/engines \ 33 | -H "Authorization: Bearer $OPENAI_API_KEY" 34 | python: | 35 | import os 36 | import openai 37 | openai.api_key = os.getenv("OPENAI_API_KEY") 38 | openai.Engine.list() 39 | node.js: | 40 | const { Configuration, OpenAIApi } = require("openai"); 41 | const configuration = new Configuration({ 42 | apiKey: process.env.OPENAI_API_KEY, 43 | }); 44 | const openai = new OpenAIApi(configuration); 45 | const response = await openai.listEngines(); 46 | response: | 47 | { 48 | "data": [ 49 | { 50 | "id": "engine-id-0", 51 | "object": "engine", 52 | "owner": "organization-owner", 53 | "ready": true 54 | }, 55 | { 56 | "id": "engine-id-2", 57 | "object": "engine", 58 | "owner": "organization-owner", 59 | "ready": true 60 | }, 61 | { 62 | "id": "engine-id-3", 63 | "object": "engine", 64 | "owner": "openai", 65 | "ready": false 66 | }, 67 | ], 68 | "object": "list" 69 | } 70 | 71 | /engines/{engine_id}: 72 | get: 73 | operationId: retrieveEngine 74 | deprecated: true 75 | tags: 76 | - OpenAI 77 | summary: Retrieves a model instance, providing basic information about it such as the owner and availability. 78 | parameters: 79 | - in: path 80 | name: engine_id 81 | required: true 82 | schema: 83 | type: string 84 | # ideally this will be an actual ID, so this will always work from browser 85 | example: 86 | davinci 87 | description: &engine_id_description > 88 | The ID of the engine to use for this request 89 | responses: 90 | "200": 91 | description: OK 92 | content: 93 | application/json: 94 | schema: 95 | $ref: '#/components/schemas/Engine' 96 | x-oaiMeta: 97 | name: Retrieve engine 98 | group: engines 99 | path: retrieve 100 | examples: 101 | curl: | 102 | curl https://api.openai.com/v1/engines/VAR_model_id \ 103 | -H "Authorization: Bearer $OPENAI_API_KEY" 104 | python: | 105 | import os 106 | import openai 107 | openai.api_key = os.getenv("OPENAI_API_KEY") 108 | openai.Engine.retrieve("VAR_model_id") 109 | node.js: | 110 | const { Configuration, OpenAIApi } = require("openai"); 111 | const configuration = new Configuration({ 112 | apiKey: process.env.OPENAI_API_KEY, 113 | }); 114 | const openai = new OpenAIApi(configuration); 115 | const response = await openai.retrieveEngine("VAR_model_id"); 116 | response: | 117 | { 118 | "id": "VAR_model_id", 119 | "object": "engine", 120 | "owner": "openai", 121 | "ready": true 122 | } 123 | 124 | /chat/completions: 125 | post: 126 | operationId: createChatCompletion 127 | tags: 128 | - OpenAI 129 | summary: Creates a model response for the given chat conversation. 130 | requestBody: 131 | required: true 132 | content: 133 | application/json: 134 | schema: 135 | $ref: '#/components/schemas/CreateChatCompletionRequest' 136 | responses: 137 | "200": 138 | description: OK 139 | content: 140 | application/json: 141 | schema: 142 | $ref: '#/components/schemas/CreateChatCompletionResponse' 143 | 144 | x-oaiMeta: 145 | name: Create chat completion 146 | group: chat 147 | path: create 148 | beta: true 149 | examples: 150 | curl: | 151 | curl https://api.openai.com/v1/chat/completions \ 152 | -H "Content-Type: application/json" \ 153 | -H "Authorization: Bearer $OPENAI_API_KEY" \ 154 | -d '{ 155 | "model": "gpt-3.5-turbo", 156 | "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Hello!"}] 157 | }' 158 | python: | 159 | import os 160 | import openai 161 | openai.api_key = os.getenv("OPENAI_API_KEY") 162 | 163 | completion = openai.ChatCompletion.create( 164 | model="gpt-3.5-turbo", 165 | messages=[ 166 | {"role": "system", "content": "You are a helpful assistant."}, 167 | {"role": "user", "content": "Hello!"} 168 | ] 169 | ) 170 | 171 | print(completion.choices[0].message) 172 | node.js: | 173 | const { Configuration, OpenAIApi } = require("openai"); 174 | 175 | const configuration = new Configuration({ 176 | apiKey: process.env.OPENAI_API_KEY, 177 | }); 178 | const openai = new OpenAIApi(configuration); 179 | 180 | const completion = await openai.createChatCompletion({ 181 | model: "gpt-3.5-turbo", 182 | messages: [{"role": "system", "content": "You are a helpful assistant."}, {role: "user", content: "Hello world"}], 183 | }); 184 | console.log(completion.data.choices[0].message); 185 | parameters: | 186 | { 187 | "model": "gpt-3.5-turbo", 188 | "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Hello!"}] 189 | } 190 | response: | 191 | { 192 | "id": "chatcmpl-123", 193 | "object": "chat.completion", 194 | "created": 1677652288, 195 | "choices": [{ 196 | "index": 0, 197 | "message": { 198 | "role": "assistant", 199 | "content": "\n\nHello there, how may I assist you today?", 200 | }, 201 | "finish_reason": "stop" 202 | }], 203 | "usage": { 204 | "prompt_tokens": 9, 205 | "completion_tokens": 12, 206 | "total_tokens": 21 207 | } 208 | } 209 | /completions: 210 | post: 211 | operationId: createCompletion 212 | tags: 213 | - OpenAI 214 | summary: Creates a completion for the provided prompt and parameters. 215 | requestBody: 216 | required: true 217 | content: 218 | application/json: 219 | schema: 220 | $ref: '#/components/schemas/CreateCompletionRequest' 221 | responses: 222 | "200": 223 | description: OK 224 | content: 225 | application/json: 226 | schema: 227 | $ref: '#/components/schemas/CreateCompletionResponse' 228 | x-oaiMeta: 229 | name: Create completion 230 | group: completions 231 | path: create 232 | examples: 233 | curl: | 234 | curl https://api.openai.com/v1/completions \ 235 | -H "Content-Type: application/json" \ 236 | -H "Authorization: Bearer $OPENAI_API_KEY" \ 237 | -d '{ 238 | "model": "VAR_model_id", 239 | "prompt": "Say this is a test", 240 | "max_tokens": 7, 241 | "temperature": 0 242 | }' 243 | python: | 244 | import os 245 | import openai 246 | openai.api_key = os.getenv("OPENAI_API_KEY") 247 | openai.Completion.create( 248 | model="VAR_model_id", 249 | prompt="Say this is a test", 250 | max_tokens=7, 251 | temperature=0 252 | ) 253 | node.js: | 254 | const { Configuration, OpenAIApi } = require("openai"); 255 | const configuration = new Configuration({ 256 | apiKey: process.env.OPENAI_API_KEY, 257 | }); 258 | const openai = new OpenAIApi(configuration); 259 | const response = await openai.createCompletion({ 260 | model: "VAR_model_id", 261 | prompt: "Say this is a test", 262 | max_tokens: 7, 263 | temperature: 0, 264 | }); 265 | parameters: | 266 | { 267 | "model": "VAR_model_id", 268 | "prompt": "Say this is a test", 269 | "max_tokens": 7, 270 | "temperature": 0, 271 | "top_p": 1, 272 | "n": 1, 273 | "stream": false, 274 | "logprobs": null, 275 | "stop": "\n" 276 | } 277 | response: | 278 | { 279 | "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", 280 | "object": "text_completion", 281 | "created": 1589478378, 282 | "model": "VAR_model_id", 283 | "choices": [ 284 | { 285 | "text": "\n\nThis is indeed a test", 286 | "index": 0, 287 | "logprobs": null, 288 | "finish_reason": "length" 289 | } 290 | ], 291 | "usage": { 292 | "prompt_tokens": 5, 293 | "completion_tokens": 7, 294 | "total_tokens": 12 295 | } 296 | } 297 | /edits: 298 | post: 299 | operationId: createEdit 300 | tags: 301 | - OpenAI 302 | summary: Creates a new edit for the provided input, instruction, and parameters. 303 | requestBody: 304 | required: true 305 | content: 306 | application/json: 307 | schema: 308 | $ref: '#/components/schemas/CreateEditRequest' 309 | responses: 310 | "200": 311 | description: OK 312 | content: 313 | application/json: 314 | schema: 315 | $ref: '#/components/schemas/CreateEditResponse' 316 | x-oaiMeta: 317 | name: Create edit 318 | group: edits 319 | path: create 320 | examples: 321 | curl: | 322 | curl https://api.openai.com/v1/edits \ 323 | -H "Content-Type: application/json" \ 324 | -H "Authorization: Bearer $OPENAI_API_KEY" \ 325 | -d '{ 326 | "model": "VAR_model_id", 327 | "input": "What day of the wek is it?", 328 | "instruction": "Fix the spelling mistakes" 329 | }' 330 | python: | 331 | import os 332 | import openai 333 | openai.api_key = os.getenv("OPENAI_API_KEY") 334 | openai.Edit.create( 335 | model="VAR_model_id", 336 | input="What day of the wek is it?", 337 | instruction="Fix the spelling mistakes" 338 | ) 339 | node.js: | 340 | const { Configuration, OpenAIApi } = require("openai"); 341 | const configuration = new Configuration({ 342 | apiKey: process.env.OPENAI_API_KEY, 343 | }); 344 | const openai = new OpenAIApi(configuration); 345 | const response = await openai.createEdit({ 346 | model: "VAR_model_id", 347 | input: "What day of the wek is it?", 348 | instruction: "Fix the spelling mistakes", 349 | }); 350 | parameters: | 351 | { 352 | "model": "VAR_model_id", 353 | "input": "What day of the wek is it?", 354 | "instruction": "Fix the spelling mistakes", 355 | } 356 | response: | 357 | { 358 | "object": "edit", 359 | "created": 1589478378, 360 | "choices": [ 361 | { 362 | "text": "What day of the week is it?", 363 | "index": 0, 364 | } 365 | ], 366 | "usage": { 367 | "prompt_tokens": 25, 368 | "completion_tokens": 32, 369 | "total_tokens": 57 370 | } 371 | } 372 | 373 | /images/generations: 374 | post: 375 | operationId: createImage 376 | tags: 377 | - OpenAI 378 | summary: Creates an image given a prompt. 379 | requestBody: 380 | required: true 381 | content: 382 | application/json: 383 | schema: 384 | $ref: '#/components/schemas/CreateImageRequest' 385 | responses: 386 | "200": 387 | description: OK 388 | content: 389 | application/json: 390 | schema: 391 | $ref: '#/components/schemas/ImagesResponse' 392 | x-oaiMeta: 393 | name: Create image 394 | group: images 395 | path: create 396 | beta: true 397 | examples: 398 | curl: | 399 | curl https://api.openai.com/v1/images/generations \ 400 | -H "Content-Type: application/json" \ 401 | -H "Authorization: Bearer $OPENAI_API_KEY" \ 402 | -d '{ 403 | "prompt": "A cute baby sea otter", 404 | "n": 2, 405 | "size": "1024x1024" 406 | }' 407 | python: | 408 | import os 409 | import openai 410 | openai.api_key = os.getenv("OPENAI_API_KEY") 411 | openai.Image.create( 412 | prompt="A cute baby sea otter", 413 | n=2, 414 | size="1024x1024" 415 | ) 416 | node.js: | 417 | const { Configuration, OpenAIApi } = require("openai"); 418 | const configuration = new Configuration({ 419 | apiKey: process.env.OPENAI_API_KEY, 420 | }); 421 | const openai = new OpenAIApi(configuration); 422 | const response = await openai.createImage({ 423 | prompt: "A cute baby sea otter", 424 | n: 2, 425 | size: "1024x1024", 426 | }); 427 | parameters: | 428 | { 429 | "prompt": "A cute baby sea otter", 430 | "n": 2, 431 | "size": "1024x1024" 432 | } 433 | response: | 434 | { 435 | "created": 1589478378, 436 | "data": [ 437 | { 438 | "url": "https://..." 439 | }, 440 | { 441 | "url": "https://..." 442 | } 443 | ] 444 | } 445 | 446 | /images/edits: 447 | post: 448 | operationId: createImageEdit 449 | tags: 450 | - OpenAI 451 | summary: Creates an edited or extended image given an original image and a prompt. 452 | requestBody: 453 | required: true 454 | content: 455 | multipart/form-data: 456 | schema: 457 | $ref: '#/components/schemas/CreateImageEditRequest' 458 | responses: 459 | "200": 460 | description: OK 461 | content: 462 | application/json: 463 | schema: 464 | $ref: '#/components/schemas/ImagesResponse' 465 | x-oaiMeta: 466 | name: Create image edit 467 | group: images 468 | path: create-edit 469 | beta: true 470 | examples: 471 | curl: | 472 | curl https://api.openai.com/v1/images/edits \ 473 | -H "Authorization: Bearer $OPENAI_API_KEY" \ 474 | -F image="@otter.png" \ 475 | -F mask="@mask.png" \ 476 | -F prompt="A cute baby sea otter wearing a beret" \ 477 | -F n=2 \ 478 | -F size="1024x1024" 479 | python: | 480 | import os 481 | import openai 482 | openai.api_key = os.getenv("OPENAI_API_KEY") 483 | openai.Image.create_edit( 484 | image=open("otter.png", "rb"), 485 | mask=open("mask.png", "rb"), 486 | prompt="A cute baby sea otter wearing a beret", 487 | n=2, 488 | size="1024x1024" 489 | ) 490 | node.js: | 491 | const { Configuration, OpenAIApi } = require("openai"); 492 | const configuration = new Configuration({ 493 | apiKey: process.env.OPENAI_API_KEY, 494 | }); 495 | const openai = new OpenAIApi(configuration); 496 | const response = await openai.createImageEdit( 497 | fs.createReadStream("otter.png"), 498 | fs.createReadStream("mask.png"), 499 | "A cute baby sea otter wearing a beret", 500 | 2, 501 | "1024x1024" 502 | ); 503 | response: | 504 | { 505 | "created": 1589478378, 506 | "data": [ 507 | { 508 | "url": "https://..." 509 | }, 510 | { 511 | "url": "https://..." 512 | } 513 | ] 514 | } 515 | 516 | /images/variations: 517 | post: 518 | operationId: createImageVariation 519 | tags: 520 | - OpenAI 521 | summary: Creates a variation of a given image. 522 | requestBody: 523 | required: true 524 | content: 525 | multipart/form-data: 526 | schema: 527 | $ref: '#/components/schemas/CreateImageVariationRequest' 528 | responses: 529 | "200": 530 | description: OK 531 | content: 532 | application/json: 533 | schema: 534 | $ref: '#/components/schemas/ImagesResponse' 535 | x-oaiMeta: 536 | name: Create image variation 537 | group: images 538 | path: create-variation 539 | beta: true 540 | examples: 541 | curl: | 542 | curl https://api.openai.com/v1/images/variations \ 543 | -H "Authorization: Bearer $OPENAI_API_KEY" \ 544 | -F image="@otter.png" \ 545 | -F n=2 \ 546 | -F size="1024x1024" 547 | python: | 548 | import os 549 | import openai 550 | openai.api_key = os.getenv("OPENAI_API_KEY") 551 | openai.Image.create_variation( 552 | image=open("otter.png", "rb"), 553 | n=2, 554 | size="1024x1024" 555 | ) 556 | node.js: | 557 | const { Configuration, OpenAIApi } = require("openai"); 558 | const configuration = new Configuration({ 559 | apiKey: process.env.OPENAI_API_KEY, 560 | }); 561 | const openai = new OpenAIApi(configuration); 562 | const response = await openai.createImageVariation( 563 | fs.createReadStream("otter.png"), 564 | 2, 565 | "1024x1024" 566 | ); 567 | response: | 568 | { 569 | "created": 1589478378, 570 | "data": [ 571 | { 572 | "url": "https://..." 573 | }, 574 | { 575 | "url": "https://..." 576 | } 577 | ] 578 | } 579 | 580 | /embeddings: 581 | post: 582 | operationId: createEmbedding 583 | tags: 584 | - OpenAI 585 | summary: Creates an embedding vector representing the input text. 586 | requestBody: 587 | required: true 588 | content: 589 | application/json: 590 | schema: 591 | $ref: '#/components/schemas/CreateEmbeddingRequest' 592 | responses: 593 | "200": 594 | description: OK 595 | content: 596 | application/json: 597 | schema: 598 | $ref: '#/components/schemas/CreateEmbeddingResponse' 599 | x-oaiMeta: 600 | name: Create embeddings 601 | group: embeddings 602 | path: create 603 | examples: 604 | curl: | 605 | curl https://api.openai.com/v1/embeddings \ 606 | -H "Authorization: Bearer $OPENAI_API_KEY" \ 607 | -H "Content-Type: application/json" \ 608 | -d '{ 609 | "input": "The food was delicious and the waiter...", 610 | "model": "text-embedding-ada-002" 611 | }' 612 | python: | 613 | import os 614 | import openai 615 | openai.api_key = os.getenv("OPENAI_API_KEY") 616 | openai.Embedding.create( 617 | model="text-embedding-ada-002", 618 | input="The food was delicious and the waiter..." 619 | ) 620 | node.js: | 621 | const { Configuration, OpenAIApi } = require("openai"); 622 | const configuration = new Configuration({ 623 | apiKey: process.env.OPENAI_API_KEY, 624 | }); 625 | const openai = new OpenAIApi(configuration); 626 | const response = await openai.createEmbedding({ 627 | model: "text-embedding-ada-002", 628 | input: "The food was delicious and the waiter...", 629 | }); 630 | parameters: | 631 | { 632 | "model": "text-embedding-ada-002", 633 | "input": "The food was delicious and the waiter..." 634 | } 635 | response: | 636 | { 637 | "object": "list", 638 | "data": [ 639 | { 640 | "object": "embedding", 641 | "embedding": [ 642 | 0.0023064255, 643 | -0.009327292, 644 | .... (1536 floats total for ada-002) 645 | -0.0028842222, 646 | ], 647 | "index": 0 648 | } 649 | ], 650 | "model": "text-embedding-ada-002", 651 | "usage": { 652 | "prompt_tokens": 8, 653 | "total_tokens": 8 654 | } 655 | } 656 | 657 | /audio/transcriptions: 658 | post: 659 | operationId: createTranscription 660 | tags: 661 | - OpenAI 662 | summary: Transcribes audio into the input language. 663 | requestBody: 664 | required: true 665 | content: 666 | multipart/form-data: 667 | schema: 668 | $ref: '#/components/schemas/CreateTranscriptionRequest' 669 | responses: 670 | "200": 671 | description: OK 672 | content: 673 | application/json: 674 | schema: 675 | $ref: '#/components/schemas/CreateTranscriptionResponse' 676 | x-oaiMeta: 677 | name: Create transcription 678 | group: audio 679 | path: create 680 | beta: true 681 | examples: 682 | curl: | 683 | curl https://api.openai.com/v1/audio/transcriptions \ 684 | -H "Authorization: Bearer $OPENAI_API_KEY" \ 685 | -H "Content-Type: multipart/form-data" \ 686 | -F file="@/path/to/file/audio.mp3" \ 687 | -F model="whisper-1" 688 | python: | 689 | import os 690 | import openai 691 | openai.api_key = os.getenv("OPENAI_API_KEY") 692 | audio_file = open("audio.mp3", "rb") 693 | transcript = openai.Audio.transcribe("whisper-1", audio_file) 694 | node: | 695 | const { Configuration, OpenAIApi } = require("openai"); 696 | const configuration = new Configuration({ 697 | apiKey: process.env.OPENAI_API_KEY, 698 | }); 699 | const openai = new OpenAIApi(configuration); 700 | const resp = await openai.createTranscription( 701 | fs.createReadStream("audio.mp3"), 702 | "whisper-1" 703 | ); 704 | parameters: | 705 | { 706 | "file": "audio.mp3", 707 | "model": "whisper-1" 708 | } 709 | response: | 710 | { 711 | "text": "Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that." 712 | } 713 | 714 | /audio/translations: 715 | post: 716 | operationId: createTranslation 717 | tags: 718 | - OpenAI 719 | summary: Translates audio into into English. 720 | requestBody: 721 | required: true 722 | content: 723 | multipart/form-data: 724 | schema: 725 | $ref: '#/components/schemas/CreateTranslationRequest' 726 | responses: 727 | "200": 728 | description: OK 729 | content: 730 | application/json: 731 | schema: 732 | $ref: '#/components/schemas/CreateTranslationResponse' 733 | x-oaiMeta: 734 | name: Create translation 735 | group: audio 736 | path: create 737 | beta: true 738 | examples: 739 | curl: | 740 | curl https://api.openai.com/v1/audio/translations \ 741 | -H "Authorization: Bearer $OPENAI_API_KEY" \ 742 | -H "Content-Type: multipart/form-data" \ 743 | -F file="@/path/to/file/german.m4a" \ 744 | -F model="whisper-1" 745 | python: | 746 | import os 747 | import openai 748 | openai.api_key = os.getenv("OPENAI_API_KEY") 749 | audio_file = open("german.m4a", "rb") 750 | transcript = openai.Audio.translate("whisper-1", audio_file) 751 | node: | 752 | const { Configuration, OpenAIApi } = require("openai"); 753 | const configuration = new Configuration({ 754 | apiKey: process.env.OPENAI_API_KEY, 755 | }); 756 | const openai = new OpenAIApi(configuration); 757 | const resp = await openai.createTranslation( 758 | fs.createReadStream("audio.mp3"), 759 | "whisper-1" 760 | ); 761 | parameters: | 762 | { 763 | "file": "german.m4a", 764 | "model": "whisper-1" 765 | } 766 | response: | 767 | { 768 | "text": "Hello, my name is Wolfgang and I come from Germany. Where are you heading today?" 769 | } 770 | 771 | /engines/{engine_id}/search: 772 | post: 773 | operationId: createSearch 774 | deprecated: true 775 | tags: 776 | - OpenAI 777 | summary: | 778 | The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. 779 | 780 | To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. 781 | 782 | The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query. 783 | parameters: 784 | - in: path 785 | name: engine_id 786 | required: true 787 | schema: 788 | type: string 789 | example: davinci 790 | description: The ID of the engine to use for this request. You can select one of `ada`, `babbage`, `curie`, or `davinci`. 791 | requestBody: 792 | required: true 793 | content: 794 | application/json: 795 | schema: 796 | $ref: '#/components/schemas/CreateSearchRequest' 797 | responses: 798 | "200": 799 | description: OK 800 | content: 801 | application/json: 802 | schema: 803 | $ref: '#/components/schemas/CreateSearchResponse' 804 | x-oaiMeta: 805 | name: Create search 806 | group: searches 807 | path: create 808 | examples: 809 | curl: | 810 | curl https://api.openai.com/v1/engines/davinci/search \ 811 | -H "Content-Type: application/json" \ 812 | -H "Authorization: Bearer $OPENAI_API_KEY" \ 813 | -d '{ 814 | "documents": ["White House", "hospital", "school"], 815 | "query": "the president" 816 | }' 817 | python: | 818 | import os 819 | import openai 820 | openai.api_key = os.getenv("OPENAI_API_KEY") 821 | openai.Engine("davinci").search( 822 | documents=["White House", "hospital", "school"], 823 | query="the president" 824 | ) 825 | node.js: | 826 | const { Configuration, OpenAIApi } = require("openai"); 827 | const configuration = new Configuration({ 828 | apiKey: process.env.OPENAI_API_KEY, 829 | }); 830 | const openai = new OpenAIApi(configuration); 831 | const response = await openai.createSearch("davinci", { 832 | documents: ["White House", "hospital", "school"], 833 | query: "the president", 834 | }); 835 | parameters: | 836 | { 837 | "documents": [ 838 | "White House", 839 | "hospital", 840 | "school" 841 | ], 842 | "query": "the president" 843 | } 844 | response: | 845 | { 846 | "data": [ 847 | { 848 | "document": 0, 849 | "object": "search_result", 850 | "score": 215.412 851 | }, 852 | { 853 | "document": 1, 854 | "object": "search_result", 855 | "score": 40.316 856 | }, 857 | { 858 | "document": 2, 859 | "object": "search_result", 860 | "score": 55.226 861 | } 862 | ], 863 | "object": "list" 864 | } 865 | 866 | /files: 867 | get: 868 | operationId: listFiles 869 | tags: 870 | - OpenAI 871 | summary: Returns a list of files that belong to the user's organization. 872 | responses: 873 | "200": 874 | description: OK 875 | content: 876 | application/json: 877 | schema: 878 | $ref: '#/components/schemas/ListFilesResponse' 879 | x-oaiMeta: 880 | name: List files 881 | group: files 882 | path: list 883 | examples: 884 | curl: | 885 | curl https://api.openai.com/v1/files \ 886 | -H "Authorization: Bearer $OPENAI_API_KEY" 887 | python: | 888 | import os 889 | import openai 890 | openai.api_key = os.getenv("OPENAI_API_KEY") 891 | openai.File.list() 892 | node.js: | 893 | const { Configuration, OpenAIApi } = require("openai"); 894 | const configuration = new Configuration({ 895 | apiKey: process.env.OPENAI_API_KEY, 896 | }); 897 | const openai = new OpenAIApi(configuration); 898 | const response = await openai.listFiles(); 899 | response: | 900 | { 901 | "data": [ 902 | { 903 | "id": "file-ccdDZrC3iZVNiQVeEA6Z66wf", 904 | "object": "file", 905 | "bytes": 175, 906 | "created_at": 1613677385, 907 | "filename": "train.jsonl", 908 | "purpose": "search" 909 | }, 910 | { 911 | "id": "file-XjGxS3KTG0uNmNOK362iJua3", 912 | "object": "file", 913 | "bytes": 140, 914 | "created_at": 1613779121, 915 | "filename": "puppy.jsonl", 916 | "purpose": "search" 917 | } 918 | ], 919 | "object": "list" 920 | } 921 | post: 922 | operationId: createFile 923 | tags: 924 | - OpenAI 925 | summary: | 926 | Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. 927 | 928 | requestBody: 929 | required: true 930 | content: 931 | multipart/form-data: 932 | schema: 933 | $ref: '#/components/schemas/CreateFileRequest' 934 | responses: 935 | "200": 936 | description: OK 937 | content: 938 | application/json: 939 | schema: 940 | $ref: '#/components/schemas/OpenAIFile' 941 | x-oaiMeta: 942 | name: Upload file 943 | group: files 944 | path: upload 945 | examples: 946 | curl: | 947 | curl https://api.openai.com/v1/files \ 948 | -H "Authorization: Bearer $OPENAI_API_KEY" \ 949 | -F purpose="fine-tune" \ 950 | -F file="@mydata.jsonl" 951 | python: | 952 | import os 953 | import openai 954 | openai.api_key = os.getenv("OPENAI_API_KEY") 955 | openai.File.create( 956 | file=open("mydata.jsonl", "rb"), 957 | purpose='fine-tune' 958 | ) 959 | node.js: | 960 | const fs = require("fs"); 961 | const { Configuration, OpenAIApi } = require("openai"); 962 | const configuration = new Configuration({ 963 | apiKey: process.env.OPENAI_API_KEY, 964 | }); 965 | const openai = new OpenAIApi(configuration); 966 | const response = await openai.createFile( 967 | fs.createReadStream("mydata.jsonl"), 968 | "fine-tune" 969 | ); 970 | response: | 971 | { 972 | "id": "file-XjGxS3KTG0uNmNOK362iJua3", 973 | "object": "file", 974 | "bytes": 140, 975 | "created_at": 1613779121, 976 | "filename": "mydata.jsonl", 977 | "purpose": "fine-tune" 978 | } 979 | 980 | /files/{file_id}: 981 | delete: 982 | operationId: deleteFile 983 | tags: 984 | - OpenAI 985 | summary: Delete a file. 986 | parameters: 987 | - in: path 988 | name: file_id 989 | required: true 990 | schema: 991 | type: string 992 | description: The ID of the file to use for this request 993 | responses: 994 | "200": 995 | description: OK 996 | content: 997 | application/json: 998 | schema: 999 | $ref: '#/components/schemas/DeleteFileResponse' 1000 | x-oaiMeta: 1001 | name: Delete file 1002 | group: files 1003 | path: delete 1004 | examples: 1005 | curl: | 1006 | curl https://api.openai.com/v1/files/file-XjGxS3KTG0uNmNOK362iJua3 \ 1007 | -X DELETE \ 1008 | -H "Authorization: Bearer $OPENAI_API_KEY" 1009 | python: | 1010 | import os 1011 | import openai 1012 | openai.api_key = os.getenv("OPENAI_API_KEY") 1013 | openai.File.delete("file-XjGxS3KTG0uNmNOK362iJua3") 1014 | node.js: | 1015 | const { Configuration, OpenAIApi } = require("openai"); 1016 | const configuration = new Configuration({ 1017 | apiKey: process.env.OPENAI_API_KEY, 1018 | }); 1019 | const openai = new OpenAIApi(configuration); 1020 | const response = await openai.deleteFile("file-XjGxS3KTG0uNmNOK362iJua3"); 1021 | response: | 1022 | { 1023 | "id": "file-XjGxS3KTG0uNmNOK362iJua3", 1024 | "object": "file", 1025 | "deleted": true 1026 | } 1027 | get: 1028 | operationId: retrieveFile 1029 | tags: 1030 | - OpenAI 1031 | summary: Returns information about a specific file. 1032 | parameters: 1033 | - in: path 1034 | name: file_id 1035 | required: true 1036 | schema: 1037 | type: string 1038 | description: The ID of the file to use for this request 1039 | responses: 1040 | "200": 1041 | description: OK 1042 | content: 1043 | application/json: 1044 | schema: 1045 | $ref: '#/components/schemas/OpenAIFile' 1046 | x-oaiMeta: 1047 | name: Retrieve file 1048 | group: files 1049 | path: retrieve 1050 | examples: 1051 | curl: | 1052 | curl https://api.openai.com/v1/files/file-XjGxS3KTG0uNmNOK362iJua3 \ 1053 | -H "Authorization: Bearer $OPENAI_API_KEY" 1054 | python: | 1055 | import os 1056 | import openai 1057 | openai.api_key = os.getenv("OPENAI_API_KEY") 1058 | openai.File.retrieve("file-XjGxS3KTG0uNmNOK362iJua3") 1059 | node.js: | 1060 | const { Configuration, OpenAIApi } = require("openai"); 1061 | const configuration = new Configuration({ 1062 | apiKey: process.env.OPENAI_API_KEY, 1063 | }); 1064 | const openai = new OpenAIApi(configuration); 1065 | const response = await openai.retrieveFile("file-XjGxS3KTG0uNmNOK362iJua3"); 1066 | response: | 1067 | { 1068 | "id": "file-XjGxS3KTG0uNmNOK362iJua3", 1069 | "object": "file", 1070 | "bytes": 140, 1071 | "created_at": 1613779657, 1072 | "filename": "mydata.jsonl", 1073 | "purpose": "fine-tune" 1074 | } 1075 | 1076 | /files/{file_id}/content: 1077 | get: 1078 | operationId: downloadFile 1079 | tags: 1080 | - OpenAI 1081 | summary: Returns the contents of the specified file 1082 | parameters: 1083 | - in: path 1084 | name: file_id 1085 | required: true 1086 | schema: 1087 | type: string 1088 | description: The ID of the file to use for this request 1089 | responses: 1090 | "200": 1091 | description: OK 1092 | content: 1093 | application/json: 1094 | schema: 1095 | type: string 1096 | x-oaiMeta: 1097 | name: Retrieve file content 1098 | group: files 1099 | path: retrieve-content 1100 | examples: 1101 | curl: | 1102 | curl https://api.openai.com/v1/files/file-XjGxS3KTG0uNmNOK362iJua3/content \ 1103 | -H "Authorization: Bearer $OPENAI_API_KEY" > file.jsonl 1104 | python: | 1105 | import os 1106 | import openai 1107 | openai.api_key = os.getenv("OPENAI_API_KEY") 1108 | content = openai.File.download("file-XjGxS3KTG0uNmNOK362iJua3") 1109 | node.js: | 1110 | const { Configuration, OpenAIApi } = require("openai"); 1111 | const configuration = new Configuration({ 1112 | apiKey: process.env.OPENAI_API_KEY, 1113 | }); 1114 | const openai = new OpenAIApi(configuration); 1115 | const response = await openai.downloadFile("file-XjGxS3KTG0uNmNOK362iJua3"); 1116 | 1117 | /answers: 1118 | post: 1119 | operationId: createAnswer 1120 | deprecated: true 1121 | tags: 1122 | - OpenAI 1123 | summary: | 1124 | Answers the specified question using the provided documents and examples. 1125 | 1126 | The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions). 1127 | requestBody: 1128 | required: true 1129 | content: 1130 | application/json: 1131 | schema: 1132 | $ref: '#/components/schemas/CreateAnswerRequest' 1133 | responses: 1134 | "200": 1135 | description: OK 1136 | content: 1137 | application/json: 1138 | schema: 1139 | $ref: '#/components/schemas/CreateAnswerResponse' 1140 | x-oaiMeta: 1141 | name: Create answer 1142 | group: answers 1143 | path: create 1144 | examples: 1145 | curl: | 1146 | curl https://api.openai.com/v1/answers \ 1147 | -H "Authorization: Bearer $OPENAI_API_KEY" \ 1148 | -H "Content-Type: application/json" \ 1149 | -d '{ 1150 | "documents": ["Puppy A is happy.", "Puppy B is sad."], 1151 | "question": "which puppy is happy?", 1152 | "search_model": "ada", 1153 | "model": "curie", 1154 | "examples_context": "In 2017, U.S. life expectancy was 78.6 years.", 1155 | "examples": [["What is human life expectancy in the United States?","78 years."]], 1156 | "max_tokens": 5, 1157 | "stop": ["\n", "<|endoftext|>"] 1158 | }' 1159 | python: | 1160 | import os 1161 | import openai 1162 | openai.api_key = os.getenv("OPENAI_API_KEY") 1163 | openai.Answer.create( 1164 | search_model="ada", 1165 | model="curie", 1166 | question="which puppy is happy?", 1167 | documents=["Puppy A is happy.", "Puppy B is sad."], 1168 | examples_context="In 2017, U.S. life expectancy was 78.6 years.", 1169 | examples=[["What is human life expectancy in the United States?","78 years."]], 1170 | max_tokens=5, 1171 | stop=["\n", "<|endoftext|>"], 1172 | ) 1173 | node.js: | 1174 | const { Configuration, OpenAIApi } = require("openai"); 1175 | const configuration = new Configuration({ 1176 | apiKey: process.env.OPENAI_API_KEY, 1177 | }); 1178 | const openai = new OpenAIApi(configuration); 1179 | const response = await openai.createAnswer({ 1180 | search_model: "ada", 1181 | model: "curie", 1182 | question: "which puppy is happy?", 1183 | documents: ["Puppy A is happy.", "Puppy B is sad."], 1184 | examples_context: "In 2017, U.S. life expectancy was 78.6 years.", 1185 | examples: [["What is human life expectancy in the United States?","78 years."]], 1186 | max_tokens: 5, 1187 | stop: ["\n", "<|endoftext|>"], 1188 | }); 1189 | parameters: | 1190 | { 1191 | "documents": ["Puppy A is happy.", "Puppy B is sad."], 1192 | "question": "which puppy is happy?", 1193 | "search_model": "ada", 1194 | "model": "curie", 1195 | "examples_context": "In 2017, U.S. life expectancy was 78.6 years.", 1196 | "examples": [["What is human life expectancy in the United States?","78 years."]], 1197 | "max_tokens": 5, 1198 | "stop": ["\n", "<|endoftext|>"] 1199 | } 1200 | response: | 1201 | { 1202 | "answers": [ 1203 | "puppy A." 1204 | ], 1205 | "completion": "cmpl-2euVa1kmKUuLpSX600M41125Mo9NI", 1206 | "model": "curie:2020-05-03", 1207 | "object": "answer", 1208 | "search_model": "ada", 1209 | "selected_documents": [ 1210 | { 1211 | "document": 0, 1212 | "text": "Puppy A is happy. " 1213 | }, 1214 | { 1215 | "document": 1, 1216 | "text": "Puppy B is sad. " 1217 | } 1218 | ] 1219 | } 1220 | 1221 | /classifications: 1222 | post: 1223 | operationId: createClassification 1224 | deprecated: true 1225 | tags: 1226 | - OpenAI 1227 | summary: | 1228 | Classifies the specified `query` using provided examples. 1229 | 1230 | The endpoint first [searches](/docs/api-reference/searches) over the labeled examples 1231 | to select the ones most relevant for the particular query. Then, the relevant examples 1232 | are combined with the query to construct a prompt to produce the final label via the 1233 | [completions](/docs/api-reference/completions) endpoint. 1234 | 1235 | Labeled examples can be provided via an uploaded `file`, or explicitly listed in the 1236 | request using the `examples` parameter for quick tests and small scale use cases. 1237 | requestBody: 1238 | required: true 1239 | content: 1240 | application/json: 1241 | schema: 1242 | $ref: '#/components/schemas/CreateClassificationRequest' 1243 | responses: 1244 | "200": 1245 | description: OK 1246 | content: 1247 | application/json: 1248 | schema: 1249 | $ref: '#/components/schemas/CreateClassificationResponse' 1250 | x-oaiMeta: 1251 | name: Create classification 1252 | group: classifications 1253 | path: create 1254 | examples: 1255 | curl: | 1256 | curl https://api.openai.com/v1/classifications \ 1257 | -H "Authorization: Bearer $OPENAI_API_KEY" \ 1258 | -H "Content-Type: application/json" \ 1259 | -d '{ 1260 | "examples": [ 1261 | ["A happy moment", "Positive"], 1262 | ["I am sad.", "Negative"], 1263 | ["I am feeling awesome", "Positive"] 1264 | ], 1265 | "query": "It is a raining day :(", 1266 | "search_model": "ada", 1267 | "model": "curie", 1268 | "labels":["Positive", "Negative", "Neutral"] 1269 | }' 1270 | python: | 1271 | import os 1272 | import openai 1273 | openai.api_key = os.getenv("OPENAI_API_KEY") 1274 | openai.Classification.create( 1275 | search_model="ada", 1276 | model="curie", 1277 | examples=[ 1278 | ["A happy moment", "Positive"], 1279 | ["I am sad.", "Negative"], 1280 | ["I am feeling awesome", "Positive"] 1281 | ], 1282 | query="It is a raining day :(", 1283 | labels=["Positive", "Negative", "Neutral"], 1284 | ) 1285 | node.js: | 1286 | const { Configuration, OpenAIApi } = require("openai"); 1287 | const configuration = new Configuration({ 1288 | apiKey: process.env.OPENAI_API_KEY, 1289 | }); 1290 | const openai = new OpenAIApi(configuration); 1291 | const response = await openai.createClassification({ 1292 | search_model: "ada", 1293 | model: "curie", 1294 | examples: [ 1295 | ["A happy moment", "Positive"], 1296 | ["I am sad.", "Negative"], 1297 | ["I am feeling awesome", "Positive"] 1298 | ], 1299 | query:"It is a raining day :(", 1300 | labels: ["Positive", "Negative", "Neutral"], 1301 | }); 1302 | parameters: | 1303 | { 1304 | "examples": [ 1305 | ["A happy moment", "Positive"], 1306 | ["I am sad.", "Negative"], 1307 | ["I am feeling awesome", "Positive"] 1308 | ], 1309 | "labels": ["Positive", "Negative", "Neutral"], 1310 | "query": "It is a raining day :(", 1311 | "search_model": "ada", 1312 | "model": "curie" 1313 | } 1314 | response: | 1315 | { 1316 | "completion": "cmpl-2euN7lUVZ0d4RKbQqRV79IiiE6M1f", 1317 | "label": "Negative", 1318 | "model": "curie:2020-05-03", 1319 | "object": "classification", 1320 | "search_model": "ada", 1321 | "selected_examples": [ 1322 | { 1323 | "document": 1, 1324 | "label": "Negative", 1325 | "text": "I am sad." 1326 | }, 1327 | { 1328 | "document": 0, 1329 | "label": "Positive", 1330 | "text": "A happy moment" 1331 | }, 1332 | { 1333 | "document": 2, 1334 | "label": "Positive", 1335 | "text": "I am feeling awesome" 1336 | } 1337 | ] 1338 | } 1339 | 1340 | /fine-tunes: 1341 | post: 1342 | operationId: createFineTune 1343 | tags: 1344 | - OpenAI 1345 | summary: | 1346 | Creates a job that fine-tunes a specified model from a given dataset. 1347 | 1348 | Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. 1349 | 1350 | [Learn more about Fine-tuning](/docs/guides/fine-tuning) 1351 | requestBody: 1352 | required: true 1353 | content: 1354 | application/json: 1355 | schema: 1356 | $ref: '#/components/schemas/CreateFineTuneRequest' 1357 | responses: 1358 | "200": 1359 | description: OK 1360 | content: 1361 | application/json: 1362 | schema: 1363 | $ref: '#/components/schemas/FineTune' 1364 | x-oaiMeta: 1365 | name: Create fine-tune 1366 | group: fine-tunes 1367 | path: create 1368 | examples: 1369 | curl: | 1370 | curl https://api.openai.com/v1/fine-tunes \ 1371 | -H "Content-Type: application/json" \ 1372 | -H "Authorization: Bearer $OPENAI_API_KEY" \ 1373 | -d '{ 1374 | "training_file": "file-XGinujblHPwGLSztz8cPS8XY" 1375 | }' 1376 | python: | 1377 | import os 1378 | import openai 1379 | openai.api_key = os.getenv("OPENAI_API_KEY") 1380 | openai.FineTune.create(training_file="file-XGinujblHPwGLSztz8cPS8XY") 1381 | node.js: | 1382 | const { Configuration, OpenAIApi } = require("openai"); 1383 | const configuration = new Configuration({ 1384 | apiKey: process.env.OPENAI_API_KEY, 1385 | }); 1386 | const openai = new OpenAIApi(configuration); 1387 | const response = await openai.createFineTune({ 1388 | training_file: "file-XGinujblHPwGLSztz8cPS8XY", 1389 | }); 1390 | response: | 1391 | { 1392 | "id": "ft-AF1WoRqd3aJAHsqc9NY7iL8F", 1393 | "object": "fine-tune", 1394 | "model": "curie", 1395 | "created_at": 1614807352, 1396 | "events": [ 1397 | { 1398 | "object": "fine-tune-event", 1399 | "created_at": 1614807352, 1400 | "level": "info", 1401 | "message": "Job enqueued. Waiting for jobs ahead to complete. Queue number: 0." 1402 | } 1403 | ], 1404 | "fine_tuned_model": null, 1405 | "hyperparams": { 1406 | "batch_size": 4, 1407 | "learning_rate_multiplier": 0.1, 1408 | "n_epochs": 4, 1409 | "prompt_loss_weight": 0.1, 1410 | }, 1411 | "organization_id": "org-...", 1412 | "result_files": [], 1413 | "status": "pending", 1414 | "validation_files": [], 1415 | "training_files": [ 1416 | { 1417 | "id": "file-XGinujblHPwGLSztz8cPS8XY", 1418 | "object": "file", 1419 | "bytes": 1547276, 1420 | "created_at": 1610062281, 1421 | "filename": "my-data-train.jsonl", 1422 | "purpose": "fine-tune-train" 1423 | } 1424 | ], 1425 | "updated_at": 1614807352, 1426 | } 1427 | get: 1428 | operationId: listFineTunes 1429 | tags: 1430 | - OpenAI 1431 | summary: | 1432 | List your organization's fine-tuning jobs 1433 | responses: 1434 | "200": 1435 | description: OK 1436 | content: 1437 | application/json: 1438 | schema: 1439 | $ref: '#/components/schemas/ListFineTunesResponse' 1440 | x-oaiMeta: 1441 | name: List fine-tunes 1442 | group: fine-tunes 1443 | path: list 1444 | examples: 1445 | curl: | 1446 | curl https://api.openai.com/v1/fine-tunes \ 1447 | -H "Authorization: Bearer $OPENAI_API_KEY" 1448 | python: | 1449 | import os 1450 | import openai 1451 | openai.api_key = os.getenv("OPENAI_API_KEY") 1452 | openai.FineTune.list() 1453 | node.js: | 1454 | const { Configuration, OpenAIApi } = require("openai"); 1455 | const configuration = new Configuration({ 1456 | apiKey: process.env.OPENAI_API_KEY, 1457 | }); 1458 | const openai = new OpenAIApi(configuration); 1459 | const response = await openai.listFineTunes(); 1460 | response: | 1461 | { 1462 | "object": "list", 1463 | "data": [ 1464 | { 1465 | "id": "ft-AF1WoRqd3aJAHsqc9NY7iL8F", 1466 | "object": "fine-tune", 1467 | "model": "curie", 1468 | "created_at": 1614807352, 1469 | "fine_tuned_model": null, 1470 | "hyperparams": { ... }, 1471 | "organization_id": "org-...", 1472 | "result_files": [], 1473 | "status": "pending", 1474 | "validation_files": [], 1475 | "training_files": [ { ... } ], 1476 | "updated_at": 1614807352, 1477 | }, 1478 | { ... }, 1479 | { ... } 1480 | ] 1481 | } 1482 | 1483 | /fine-tunes/{fine_tune_id}: 1484 | get: 1485 | operationId: retrieveFineTune 1486 | tags: 1487 | - OpenAI 1488 | summary: | 1489 | Gets info about the fine-tune job. 1490 | 1491 | [Learn more about Fine-tuning](/docs/guides/fine-tuning) 1492 | parameters: 1493 | - in: path 1494 | name: fine_tune_id 1495 | required: true 1496 | schema: 1497 | type: string 1498 | example: 1499 | ft-AF1WoRqd3aJAHsqc9NY7iL8F 1500 | description: | 1501 | The ID of the fine-tune job 1502 | responses: 1503 | "200": 1504 | description: OK 1505 | content: 1506 | application/json: 1507 | schema: 1508 | $ref: '#/components/schemas/FineTune' 1509 | x-oaiMeta: 1510 | name: Retrieve fine-tune 1511 | group: fine-tunes 1512 | path: retrieve 1513 | examples: 1514 | curl: | 1515 | curl https://api.openai.com/v1/fine-tunes/ft-AF1WoRqd3aJAHsqc9NY7iL8F \ 1516 | -H "Authorization: Bearer $OPENAI_API_KEY" 1517 | python: | 1518 | import os 1519 | import openai 1520 | openai.api_key = os.getenv("OPENAI_API_KEY") 1521 | openai.FineTune.retrieve(id="ft-AF1WoRqd3aJAHsqc9NY7iL8F") 1522 | node.js: | 1523 | const { Configuration, OpenAIApi } = require("openai"); 1524 | const configuration = new Configuration({ 1525 | apiKey: process.env.OPENAI_API_KEY, 1526 | }); 1527 | const openai = new OpenAIApi(configuration); 1528 | const response = await openai.retrieveFineTune("ft-AF1WoRqd3aJAHsqc9NY7iL8F"); 1529 | response: | 1530 | { 1531 | "id": "ft-AF1WoRqd3aJAHsqc9NY7iL8F", 1532 | "object": "fine-tune", 1533 | "model": "curie", 1534 | "created_at": 1614807352, 1535 | "events": [ 1536 | { 1537 | "object": "fine-tune-event", 1538 | "created_at": 1614807352, 1539 | "level": "info", 1540 | "message": "Job enqueued. Waiting for jobs ahead to complete. Queue number: 0." 1541 | }, 1542 | { 1543 | "object": "fine-tune-event", 1544 | "created_at": 1614807356, 1545 | "level": "info", 1546 | "message": "Job started." 1547 | }, 1548 | { 1549 | "object": "fine-tune-event", 1550 | "created_at": 1614807861, 1551 | "level": "info", 1552 | "message": "Uploaded snapshot: curie:ft-acmeco-2021-03-03-21-44-20." 1553 | }, 1554 | { 1555 | "object": "fine-tune-event", 1556 | "created_at": 1614807864, 1557 | "level": "info", 1558 | "message": "Uploaded result files: file-QQm6ZpqdNwAaVC3aSz5sWwLT." 1559 | }, 1560 | { 1561 | "object": "fine-tune-event", 1562 | "created_at": 1614807864, 1563 | "level": "info", 1564 | "message": "Job succeeded." 1565 | } 1566 | ], 1567 | "fine_tuned_model": "curie:ft-acmeco-2021-03-03-21-44-20", 1568 | "hyperparams": { 1569 | "batch_size": 4, 1570 | "learning_rate_multiplier": 0.1, 1571 | "n_epochs": 4, 1572 | "prompt_loss_weight": 0.1, 1573 | }, 1574 | "organization_id": "org-...", 1575 | "result_files": [ 1576 | { 1577 | "id": "file-QQm6ZpqdNwAaVC3aSz5sWwLT", 1578 | "object": "file", 1579 | "bytes": 81509, 1580 | "created_at": 1614807863, 1581 | "filename": "compiled_results.csv", 1582 | "purpose": "fine-tune-results" 1583 | } 1584 | ], 1585 | "status": "succeeded", 1586 | "validation_files": [], 1587 | "training_files": [ 1588 | { 1589 | "id": "file-XGinujblHPwGLSztz8cPS8XY", 1590 | "object": "file", 1591 | "bytes": 1547276, 1592 | "created_at": 1610062281, 1593 | "filename": "my-data-train.jsonl", 1594 | "purpose": "fine-tune-train" 1595 | } 1596 | ], 1597 | "updated_at": 1614807865, 1598 | } 1599 | 1600 | /fine-tunes/{fine_tune_id}/cancel: 1601 | post: 1602 | operationId: cancelFineTune 1603 | tags: 1604 | - OpenAI 1605 | summary: | 1606 | Immediately cancel a fine-tune job. 1607 | parameters: 1608 | - in: path 1609 | name: fine_tune_id 1610 | required: true 1611 | schema: 1612 | type: string 1613 | example: 1614 | ft-AF1WoRqd3aJAHsqc9NY7iL8F 1615 | description: | 1616 | The ID of the fine-tune job to cancel 1617 | responses: 1618 | "200": 1619 | description: OK 1620 | content: 1621 | application/json: 1622 | schema: 1623 | $ref: '#/components/schemas/FineTune' 1624 | x-oaiMeta: 1625 | name: Cancel fine-tune 1626 | group: fine-tunes 1627 | path: cancel 1628 | examples: 1629 | curl: | 1630 | curl https://api.openai.com/v1/fine-tunes/ft-AF1WoRqd3aJAHsqc9NY7iL8F/cancel \ 1631 | -H "Authorization: Bearer $OPENAI_API_KEY" 1632 | python: | 1633 | import os 1634 | import openai 1635 | openai.api_key = os.getenv("OPENAI_API_KEY") 1636 | openai.FineTune.cancel(id="ft-AF1WoRqd3aJAHsqc9NY7iL8F") 1637 | node.js: | 1638 | const { Configuration, OpenAIApi } = require("openai"); 1639 | const configuration = new Configuration({ 1640 | apiKey: process.env.OPENAI_API_KEY, 1641 | }); 1642 | const openai = new OpenAIApi(configuration); 1643 | const response = await openai.cancelFineTune("ft-AF1WoRqd3aJAHsqc9NY7iL8F"); 1644 | response: | 1645 | { 1646 | "id": "ft-xhrpBbvVUzYGo8oUO1FY4nI7", 1647 | "object": "fine-tune", 1648 | "model": "curie", 1649 | "created_at": 1614807770, 1650 | "events": [ { ... } ], 1651 | "fine_tuned_model": null, 1652 | "hyperparams": { ... }, 1653 | "organization_id": "org-...", 1654 | "result_files": [], 1655 | "status": "cancelled", 1656 | "validation_files": [], 1657 | "training_files": [ 1658 | { 1659 | "id": "file-XGinujblHPwGLSztz8cPS8XY", 1660 | "object": "file", 1661 | "bytes": 1547276, 1662 | "created_at": 1610062281, 1663 | "filename": "my-data-train.jsonl", 1664 | "purpose": "fine-tune-train" 1665 | } 1666 | ], 1667 | "updated_at": 1614807789, 1668 | } 1669 | 1670 | /fine-tunes/{fine_tune_id}/events: 1671 | get: 1672 | operationId: listFineTuneEvents 1673 | tags: 1674 | - OpenAI 1675 | summary: | 1676 | Get fine-grained status updates for a fine-tune job. 1677 | parameters: 1678 | - in: path 1679 | name: fine_tune_id 1680 | required: true 1681 | schema: 1682 | type: string 1683 | example: 1684 | ft-AF1WoRqd3aJAHsqc9NY7iL8F 1685 | description: | 1686 | The ID of the fine-tune job to get events for. 1687 | - in: query 1688 | name: stream 1689 | required: false 1690 | schema: 1691 | type: boolean 1692 | default: false 1693 | description: | 1694 | Whether to stream events for the fine-tune job. If set to true, 1695 | events will be sent as data-only 1696 | [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) 1697 | as they become available. The stream will terminate with a 1698 | `data: [DONE]` message when the job is finished (succeeded, cancelled, 1699 | or failed). 1700 | 1701 | If set to false, only events generated so far will be returned. 1702 | responses: 1703 | "200": 1704 | description: OK 1705 | content: 1706 | application/json: 1707 | schema: 1708 | $ref: '#/components/schemas/ListFineTuneEventsResponse' 1709 | x-oaiMeta: 1710 | name: List fine-tune events 1711 | group: fine-tunes 1712 | path: events 1713 | examples: 1714 | curl: | 1715 | curl https://api.openai.com/v1/fine-tunes/ft-AF1WoRqd3aJAHsqc9NY7iL8F/events \ 1716 | -H "Authorization: Bearer $OPENAI_API_KEY" 1717 | python: | 1718 | import os 1719 | import openai 1720 | openai.api_key = os.getenv("OPENAI_API_KEY") 1721 | openai.FineTune.list_events(id="ft-AF1WoRqd3aJAHsqc9NY7iL8F") 1722 | node.js: | 1723 | const { Configuration, OpenAIApi } = require("openai"); 1724 | const configuration = new Configuration({ 1725 | apiKey: process.env.OPENAI_API_KEY, 1726 | }); 1727 | const openai = new OpenAIApi(configuration); 1728 | const response = await openai.listFineTuneEvents("ft-AF1WoRqd3aJAHsqc9NY7iL8F"); 1729 | response: | 1730 | { 1731 | "object": "list", 1732 | "data": [ 1733 | { 1734 | "object": "fine-tune-event", 1735 | "created_at": 1614807352, 1736 | "level": "info", 1737 | "message": "Job enqueued. Waiting for jobs ahead to complete. Queue number: 0." 1738 | }, 1739 | { 1740 | "object": "fine-tune-event", 1741 | "created_at": 1614807356, 1742 | "level": "info", 1743 | "message": "Job started." 1744 | }, 1745 | { 1746 | "object": "fine-tune-event", 1747 | "created_at": 1614807861, 1748 | "level": "info", 1749 | "message": "Uploaded snapshot: curie:ft-acmeco-2021-03-03-21-44-20." 1750 | }, 1751 | { 1752 | "object": "fine-tune-event", 1753 | "created_at": 1614807864, 1754 | "level": "info", 1755 | "message": "Uploaded result files: file-QQm6ZpqdNwAaVC3aSz5sWwLT." 1756 | }, 1757 | { 1758 | "object": "fine-tune-event", 1759 | "created_at": 1614807864, 1760 | "level": "info", 1761 | "message": "Job succeeded." 1762 | } 1763 | ] 1764 | } 1765 | 1766 | /models: 1767 | get: 1768 | operationId: listModels 1769 | tags: 1770 | - OpenAI 1771 | summary: Lists the currently available models, and provides basic information about each one such as the owner and availability. 1772 | responses: 1773 | "200": 1774 | description: OK 1775 | content: 1776 | application/json: 1777 | schema: 1778 | $ref: '#/components/schemas/ListModelsResponse' 1779 | x-oaiMeta: 1780 | name: List models 1781 | group: models 1782 | path: list 1783 | examples: 1784 | curl: | 1785 | curl https://api.openai.com/v1/models \ 1786 | -H "Authorization: Bearer $OPENAI_API_KEY" 1787 | python: | 1788 | import os 1789 | import openai 1790 | openai.api_key = os.getenv("OPENAI_API_KEY") 1791 | openai.Model.list() 1792 | node.js: | 1793 | const { Configuration, OpenAIApi } = require("openai"); 1794 | const configuration = new Configuration({ 1795 | apiKey: process.env.OPENAI_API_KEY, 1796 | }); 1797 | const openai = new OpenAIApi(configuration); 1798 | const response = await openai.listModels(); 1799 | response: | 1800 | { 1801 | "data": [ 1802 | { 1803 | "id": "model-id-0", 1804 | "object": "model", 1805 | "owned_by": "organization-owner", 1806 | "permission": [...] 1807 | }, 1808 | { 1809 | "id": "model-id-1", 1810 | "object": "model", 1811 | "owned_by": "organization-owner", 1812 | "permission": [...] 1813 | }, 1814 | { 1815 | "id": "model-id-2", 1816 | "object": "model", 1817 | "owned_by": "openai", 1818 | "permission": [...] 1819 | }, 1820 | ], 1821 | "object": "list" 1822 | } 1823 | 1824 | /models/{model}: 1825 | get: 1826 | operationId: retrieveModel 1827 | tags: 1828 | - OpenAI 1829 | summary: Retrieves a model instance, providing basic information about the model such as the owner and permissioning. 1830 | parameters: 1831 | - in: path 1832 | name: model 1833 | required: true 1834 | schema: 1835 | type: string 1836 | # ideally this will be an actual ID, so this will always work from browser 1837 | example: 1838 | text-davinci-001 1839 | description: 1840 | The ID of the model to use for this request 1841 | responses: 1842 | "200": 1843 | description: OK 1844 | content: 1845 | application/json: 1846 | schema: 1847 | $ref: '#/components/schemas/Model' 1848 | x-oaiMeta: 1849 | name: Retrieve model 1850 | group: models 1851 | path: retrieve 1852 | examples: 1853 | curl: | 1854 | curl https://api.openai.com/v1/models/VAR_model_id \ 1855 | -H "Authorization: Bearer $OPENAI_API_KEY" 1856 | python: | 1857 | import os 1858 | import openai 1859 | openai.api_key = os.getenv("OPENAI_API_KEY") 1860 | openai.Model.retrieve("VAR_model_id") 1861 | node.js: | 1862 | const { Configuration, OpenAIApi } = require("openai"); 1863 | const configuration = new Configuration({ 1864 | apiKey: process.env.OPENAI_API_KEY, 1865 | }); 1866 | const openai = new OpenAIApi(configuration); 1867 | const response = await openai.retrieveModel("VAR_model_id"); 1868 | response: | 1869 | { 1870 | "id": "VAR_model_id", 1871 | "object": "model", 1872 | "owned_by": "openai", 1873 | "permission": [...] 1874 | } 1875 | delete: 1876 | operationId: deleteModel 1877 | tags: 1878 | - OpenAI 1879 | summary: Delete a fine-tuned model. You must have the Owner role in your organization. 1880 | parameters: 1881 | - in: path 1882 | name: model 1883 | required: true 1884 | schema: 1885 | type: string 1886 | example: curie:ft-acmeco-2021-03-03-21-44-20 1887 | description: The model to delete 1888 | responses: 1889 | "200": 1890 | description: OK 1891 | content: 1892 | application/json: 1893 | schema: 1894 | $ref: '#/components/schemas/DeleteModelResponse' 1895 | x-oaiMeta: 1896 | name: Delete fine-tune model 1897 | group: fine-tunes 1898 | path: delete-model 1899 | examples: 1900 | curl: | 1901 | curl https://api.openai.com/v1/models/curie:ft-acmeco-2021-03-03-21-44-20 \ 1902 | -X DELETE \ 1903 | -H "Authorization: Bearer $OPENAI_API_KEY" 1904 | python: | 1905 | import os 1906 | import openai 1907 | openai.api_key = os.getenv("OPENAI_API_KEY") 1908 | openai.Model.delete("curie:ft-acmeco-2021-03-03-21-44-20") 1909 | node.js: | 1910 | const { Configuration, OpenAIApi } = require("openai"); 1911 | const configuration = new Configuration({ 1912 | apiKey: process.env.OPENAI_API_KEY, 1913 | }); 1914 | const openai = new OpenAIApi(configuration); 1915 | const response = await openai.deleteModel('curie:ft-acmeco-2021-03-03-21-44-20'); 1916 | response: | 1917 | { 1918 | "id": "curie:ft-acmeco-2021-03-03-21-44-20", 1919 | "object": "model", 1920 | "deleted": true 1921 | } 1922 | 1923 | /moderations: 1924 | post: 1925 | operationId: createModeration 1926 | tags: 1927 | - OpenAI 1928 | summary: Classifies if text violates OpenAI's Content Policy 1929 | requestBody: 1930 | required: true 1931 | content: 1932 | application/json: 1933 | schema: 1934 | $ref: '#/components/schemas/CreateModerationRequest' 1935 | responses: 1936 | "200": 1937 | description: OK 1938 | content: 1939 | application/json: 1940 | schema: 1941 | $ref: '#/components/schemas/CreateModerationResponse' 1942 | x-oaiMeta: 1943 | name: Create moderation 1944 | group: moderations 1945 | path: create 1946 | examples: 1947 | curl: | 1948 | curl https://api.openai.com/v1/moderations \ 1949 | -H "Content-Type: application/json" \ 1950 | -H "Authorization: Bearer $OPENAI_API_KEY" \ 1951 | -d '{ 1952 | "input": "I want to kill them." 1953 | }' 1954 | python: | 1955 | import os 1956 | import openai 1957 | openai.api_key = os.getenv("OPENAI_API_KEY") 1958 | openai.Moderation.create( 1959 | input="I want to kill them.", 1960 | ) 1961 | node.js: | 1962 | const { Configuration, OpenAIApi } = require("openai"); 1963 | const configuration = new Configuration({ 1964 | apiKey: process.env.OPENAI_API_KEY, 1965 | }); 1966 | const openai = new OpenAIApi(configuration); 1967 | const response = await openai.createModeration({ 1968 | input: "I want to kill them.", 1969 | }); 1970 | parameters: | 1971 | { 1972 | "input": "I want to kill them." 1973 | } 1974 | response: | 1975 | { 1976 | "id": "modr-5MWoLO", 1977 | "model": "text-moderation-001", 1978 | "results": [ 1979 | { 1980 | "categories": { 1981 | "hate": false, 1982 | "hate/threatening": true, 1983 | "self-harm": false, 1984 | "sexual": false, 1985 | "sexual/minors": false, 1986 | "violence": true, 1987 | "violence/graphic": false 1988 | }, 1989 | "category_scores": { 1990 | "hate": 0.22714105248451233, 1991 | "hate/threatening": 0.4132447838783264, 1992 | "self-harm": 0.005232391878962517, 1993 | "sexual": 0.01407341007143259, 1994 | "sexual/minors": 0.0038522258400917053, 1995 | "violence": 0.9223177433013916, 1996 | "violence/graphic": 0.036865197122097015 1997 | }, 1998 | "flagged": true 1999 | } 2000 | ] 2001 | } 2002 | 2003 | components: 2004 | schemas: 2005 | Error: 2006 | type: object 2007 | properties: 2008 | type: 2009 | type: string 2010 | nullable: false 2011 | message: 2012 | type: string 2013 | nullable: false 2014 | param: 2015 | type: string 2016 | nullable: true 2017 | code: 2018 | type: string 2019 | nullable: true 2020 | required: 2021 | - type 2022 | - message 2023 | - param 2024 | - code 2025 | 2026 | ErrorResponse: 2027 | type: object 2028 | properties: 2029 | error: 2030 | $ref: '#/components/schemas/Error' 2031 | required: 2032 | - error 2033 | ListEnginesResponse: 2034 | type: object 2035 | properties: 2036 | object: 2037 | type: string 2038 | data: 2039 | type: array 2040 | items: 2041 | $ref: '#/components/schemas/Engine' 2042 | required: 2043 | - object 2044 | - data 2045 | 2046 | ListModelsResponse: 2047 | type: object 2048 | properties: 2049 | object: 2050 | type: string 2051 | data: 2052 | type: array 2053 | items: 2054 | $ref: '#/components/schemas/Model' 2055 | required: 2056 | - object 2057 | - data 2058 | 2059 | DeleteModelResponse: 2060 | type: object 2061 | properties: 2062 | id: 2063 | type: string 2064 | object: 2065 | type: string 2066 | deleted: 2067 | type: boolean 2068 | required: 2069 | - id 2070 | - object 2071 | - deleted 2072 | 2073 | CreateCompletionRequest: 2074 | type: object 2075 | properties: 2076 | model: &model_configuration 2077 | description: ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. 2078 | type: string 2079 | prompt: 2080 | description: &completions_prompt_description | 2081 | The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. 2082 | 2083 | Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. 2084 | default: '<|endoftext|>' 2085 | nullable: true 2086 | oneOf: 2087 | - type: string 2088 | default: '' 2089 | example: "This is a test." 2090 | - type: array 2091 | items: 2092 | type: string 2093 | default: '' 2094 | example: "This is a test." 2095 | - type: array 2096 | minItems: 1 2097 | items: 2098 | type: integer 2099 | example: "[1212, 318, 257, 1332, 13]" 2100 | - type: array 2101 | minItems: 1 2102 | items: 2103 | type: array 2104 | minItems: 1 2105 | items: 2106 | type: integer 2107 | example: "[[1212, 318, 257, 1332, 13]]" 2108 | suffix: 2109 | description: 2110 | The suffix that comes after a completion of inserted text. 2111 | default: null 2112 | nullable: true 2113 | type: string 2114 | example: "test." 2115 | max_tokens: 2116 | type: integer 2117 | minimum: 0 2118 | default: 16 2119 | example: 16 2120 | nullable: true 2121 | description: &completions_max_tokens_description | 2122 | The maximum number of [tokens](/tokenizer) to generate in the completion. 2123 | 2124 | The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens. 2125 | temperature: 2126 | type: number 2127 | minimum: 0 2128 | maximum: 2 2129 | default: 1 2130 | example: 1 2131 | nullable: true 2132 | description: &completions_temperature_description | 2133 | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. 2134 | 2135 | We generally recommend altering this or `top_p` but not both. 2136 | top_p: 2137 | type: number 2138 | minimum: 0 2139 | maximum: 1 2140 | default: 1 2141 | example: 1 2142 | nullable: true 2143 | description: &completions_top_p_description | 2144 | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. 2145 | 2146 | We generally recommend altering this or `temperature` but not both. 2147 | n: 2148 | type: integer 2149 | minimum: 1 2150 | maximum: 128 2151 | default: 1 2152 | example: 1 2153 | nullable: true 2154 | description: &completions_completions_description | 2155 | How many completions to generate for each prompt. 2156 | 2157 | **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. 2158 | stream: 2159 | description: > 2160 | Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) 2161 | as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). 2162 | type: boolean 2163 | nullable: true 2164 | default: false 2165 | logprobs: &completions_logprobs_configuration 2166 | type: integer 2167 | minimum: 0 2168 | maximum: 5 2169 | default: null 2170 | nullable: true 2171 | description: &completions_logprobs_description | 2172 | Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. 2173 | 2174 | The maximum value for `logprobs` is 5. 2175 | echo: 2176 | type: boolean 2177 | default: false 2178 | nullable: true 2179 | description: &completions_echo_description > 2180 | Echo back the prompt in addition to the completion 2181 | stop: 2182 | description: &completions_stop_description > 2183 | Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. 2184 | default: null 2185 | nullable: true 2186 | oneOf: 2187 | - type: string 2188 | default: <|endoftext|> 2189 | example: "\n" 2190 | nullable: true 2191 | - type: array 2192 | minItems: 1 2193 | maxItems: 4 2194 | items: 2195 | type: string 2196 | example: '["\n"]' 2197 | presence_penalty: 2198 | type: number 2199 | default: 0 2200 | minimum: -2 2201 | maximum: 2 2202 | nullable: true 2203 | description: &completions_presence_penalty_description | 2204 | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. 2205 | 2206 | [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) 2207 | frequency_penalty: 2208 | type: number 2209 | default: 0 2210 | minimum: -2 2211 | maximum: 2 2212 | nullable: true 2213 | description: &completions_frequency_penalty_description | 2214 | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. 2215 | 2216 | [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) 2217 | best_of: 2218 | type: integer 2219 | default: 1 2220 | minimum: 0 2221 | maximum: 20 2222 | nullable: true 2223 | description: &completions_best_of_description | 2224 | Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. 2225 | 2226 | When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. 2227 | 2228 | **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. 2229 | logit_bias: &completions_logit_bias 2230 | type: object 2231 | x-oaiTypeLabel: map 2232 | default: null 2233 | nullable: true 2234 | description: &completions_logit_bias_description | 2235 | Modify the likelihood of specified tokens appearing in the completion. 2236 | 2237 | Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. 2238 | 2239 | As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. 2240 | user: &end_user_param_configuration 2241 | type: string 2242 | example: user-1234 2243 | description: | 2244 | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). 2245 | required: 2246 | - model 2247 | 2248 | CreateCompletionResponse: 2249 | type: object 2250 | properties: 2251 | id: 2252 | type: string 2253 | object: 2254 | type: string 2255 | created: 2256 | type: integer 2257 | model: 2258 | type: string 2259 | choices: 2260 | type: array 2261 | items: 2262 | type: object 2263 | properties: 2264 | text: 2265 | type: string 2266 | index: 2267 | type: integer 2268 | logprobs: 2269 | type: object 2270 | nullable: true 2271 | properties: 2272 | tokens: 2273 | type: array 2274 | items: 2275 | type: string 2276 | token_logprobs: 2277 | type: array 2278 | items: 2279 | type: number 2280 | top_logprobs: 2281 | type: array 2282 | items: 2283 | type: object 2284 | text_offset: 2285 | type: array 2286 | items: 2287 | type: integer 2288 | finish_reason: 2289 | type: string 2290 | usage: 2291 | type: object 2292 | properties: 2293 | prompt_tokens: 2294 | type: integer 2295 | completion_tokens: 2296 | type: integer 2297 | total_tokens: 2298 | type: integer 2299 | required: 2300 | - prompt_tokens 2301 | - completion_tokens 2302 | - total_tokens 2303 | required: 2304 | - id 2305 | - object 2306 | - created 2307 | - model 2308 | - choices 2309 | 2310 | ChatCompletionRequestMessage: 2311 | type: object 2312 | properties: 2313 | role: 2314 | type: string 2315 | enum: ["system", "user", "assistant", "function"] 2316 | description: The role of the messages author. One of `system`, `user`, `assistant`, or `function`. 2317 | content: 2318 | type: string 2319 | description: The contents of the message. `content` is required for all messages except assistant messages with function calls. 2320 | name: 2321 | type: string 2322 | description: The name of the author of this message. `name` is required if role is `function`, and it should be the name of the function whose response is in the `content`. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters. 2323 | function_call: 2324 | type: object 2325 | description: The name and arguments of a function that should be called, as generated by the model. 2326 | properties: 2327 | name: 2328 | type: string 2329 | description: The name of the function to call. 2330 | arguments: 2331 | type: string 2332 | description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. 2333 | required: 2334 | - role 2335 | 2336 | ChatCompletionFunctionParameters: 2337 | type: object 2338 | description: The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. 2339 | # TODO type this as json schema 2340 | additionalProperties: true 2341 | 2342 | ChatCompletionFunctions: 2343 | type: object 2344 | properties: 2345 | name: 2346 | type: string 2347 | description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. 2348 | description: 2349 | type: string 2350 | description: The description of what the function does. 2351 | parameters: 2352 | $ref: '#/components/schemas/ChatCompletionFunctionParameters' 2353 | required: 2354 | - name 2355 | 2356 | ChatCompletionResponseMessage: 2357 | type: object 2358 | properties: 2359 | role: 2360 | type: string 2361 | enum: ["system", "user", "assistant", "function"] 2362 | description: The role of the author of this message. 2363 | content: 2364 | type: string 2365 | description: The contents of the message. 2366 | function_call: 2367 | type: object 2368 | description: The name and arguments of a function that should be called, as generated by the model. 2369 | properties: 2370 | name: 2371 | type: string 2372 | description: The name of the function to call. 2373 | arguments: 2374 | type: string 2375 | description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. 2376 | required: 2377 | - role 2378 | 2379 | CreateChatCompletionRequest: 2380 | type: object 2381 | properties: 2382 | model: 2383 | description: ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. 2384 | type: string 2385 | messages: 2386 | description: A list of messages comprising the conversation so far. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). 2387 | type: array 2388 | minItems: 1 2389 | items: 2390 | $ref: '#/components/schemas/ChatCompletionRequestMessage' 2391 | functions: 2392 | description: A list of functions the model may generate JSON inputs for. 2393 | type: array 2394 | minItems: 1 2395 | items: 2396 | $ref: '#/components/schemas/ChatCompletionFunctions' 2397 | function_call: 2398 | description: Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name":\ "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. 2399 | oneOf: 2400 | - type: string 2401 | enum: [none, auto] 2402 | - type: object 2403 | properties: 2404 | name: 2405 | type: string 2406 | description: The name of the function to call. 2407 | required: 2408 | - name 2409 | temperature: 2410 | type: number 2411 | minimum: 0 2412 | maximum: 2 2413 | default: 1 2414 | example: 1 2415 | nullable: true 2416 | description: *completions_temperature_description 2417 | top_p: 2418 | type: number 2419 | minimum: 0 2420 | maximum: 1 2421 | default: 1 2422 | example: 1 2423 | nullable: true 2424 | description: *completions_top_p_description 2425 | n: 2426 | type: integer 2427 | minimum: 1 2428 | maximum: 128 2429 | default: 1 2430 | example: 1 2431 | nullable: true 2432 | description: How many chat completion choices to generate for each input message. 2433 | stream: 2434 | description: > 2435 | If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) 2436 | as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). 2437 | type: boolean 2438 | nullable: true 2439 | default: false 2440 | stop: 2441 | description: | 2442 | Up to 4 sequences where the API will stop generating further tokens. 2443 | default: null 2444 | oneOf: 2445 | - type: string 2446 | nullable: true 2447 | - type: array 2448 | minItems: 1 2449 | maxItems: 4 2450 | items: 2451 | type: string 2452 | max_tokens: 2453 | description: | 2454 | The maximum number of [tokens](/tokenizer) to generate in the chat completion. 2455 | 2456 | The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens. 2457 | default: inf 2458 | type: integer 2459 | presence_penalty: 2460 | type: number 2461 | default: 0 2462 | minimum: -2 2463 | maximum: 2 2464 | nullable: true 2465 | description: *completions_presence_penalty_description 2466 | frequency_penalty: 2467 | type: number 2468 | default: 0 2469 | minimum: -2 2470 | maximum: 2 2471 | nullable: true 2472 | description: *completions_frequency_penalty_description 2473 | logit_bias: 2474 | type: object 2475 | x-oaiTypeLabel: map 2476 | default: null 2477 | nullable: true 2478 | description: | 2479 | Modify the likelihood of specified tokens appearing in the completion. 2480 | 2481 | Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. 2482 | user: *end_user_param_configuration 2483 | required: 2484 | - model 2485 | - messages 2486 | 2487 | CreateChatCompletionResponse: 2488 | type: object 2489 | properties: 2490 | id: 2491 | type: string 2492 | object: 2493 | type: string 2494 | created: 2495 | type: integer 2496 | model: 2497 | type: string 2498 | choices: 2499 | type: array 2500 | items: 2501 | type: object 2502 | properties: 2503 | index: 2504 | type: integer 2505 | message: 2506 | $ref: '#/components/schemas/ChatCompletionResponseMessage' 2507 | finish_reason: 2508 | type: string 2509 | usage: 2510 | type: object 2511 | properties: 2512 | prompt_tokens: 2513 | type: integer 2514 | completion_tokens: 2515 | type: integer 2516 | total_tokens: 2517 | type: integer 2518 | required: 2519 | - prompt_tokens 2520 | - completion_tokens 2521 | - total_tokens 2522 | required: 2523 | - id 2524 | - object 2525 | - created 2526 | - model 2527 | - choices 2528 | 2529 | CreateEditRequest: 2530 | type: object 2531 | properties: 2532 | model: 2533 | description: ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint. 2534 | type: string 2535 | input: 2536 | description: 2537 | The input text to use as a starting point for the edit. 2538 | type: string 2539 | default: '' 2540 | nullable: true 2541 | example: "What day of the wek is it?" 2542 | instruction: 2543 | description: 2544 | The instruction that tells the model how to edit the prompt. 2545 | type: string 2546 | example: "Fix the spelling mistakes." 2547 | n: 2548 | type: integer 2549 | minimum: 1 2550 | maximum: 20 2551 | default: 1 2552 | example: 1 2553 | nullable: true 2554 | description: 2555 | How many edits to generate for the input and instruction. 2556 | temperature: 2557 | type: number 2558 | minimum: 0 2559 | maximum: 2 2560 | default: 1 2561 | example: 1 2562 | nullable: true 2563 | description: *completions_temperature_description 2564 | top_p: 2565 | type: number 2566 | minimum: 0 2567 | maximum: 1 2568 | default: 1 2569 | example: 1 2570 | nullable: true 2571 | description: *completions_top_p_description 2572 | required: 2573 | - model 2574 | - instruction 2575 | 2576 | CreateEditResponse: 2577 | type: object 2578 | properties: 2579 | object: 2580 | type: string 2581 | created: 2582 | type: integer 2583 | choices: 2584 | type: array 2585 | items: 2586 | type: object 2587 | properties: 2588 | text: 2589 | type: string 2590 | index: 2591 | type: integer 2592 | logprobs: 2593 | type: object 2594 | nullable: true 2595 | properties: 2596 | tokens: 2597 | type: array 2598 | items: 2599 | type: string 2600 | token_logprobs: 2601 | type: array 2602 | items: 2603 | type: number 2604 | top_logprobs: 2605 | type: array 2606 | items: 2607 | type: object 2608 | text_offset: 2609 | type: array 2610 | items: 2611 | type: integer 2612 | finish_reason: 2613 | type: string 2614 | usage: 2615 | type: object 2616 | properties: 2617 | prompt_tokens: 2618 | type: integer 2619 | completion_tokens: 2620 | type: integer 2621 | total_tokens: 2622 | type: integer 2623 | required: 2624 | - prompt_tokens 2625 | - completion_tokens 2626 | - total_tokens 2627 | required: 2628 | - object 2629 | - created 2630 | - choices 2631 | - usage 2632 | 2633 | CreateImageRequest: 2634 | type: object 2635 | properties: 2636 | prompt: 2637 | description: A text description of the desired image(s). The maximum length is 1000 characters. 2638 | type: string 2639 | example: "A cute baby sea otter" 2640 | n: &images_n 2641 | type: integer 2642 | minimum: 1 2643 | maximum: 10 2644 | default: 1 2645 | example: 1 2646 | nullable: true 2647 | description: The number of images to generate. Must be between 1 and 10. 2648 | size: &images_size 2649 | type: string 2650 | enum: ["256x256", "512x512", "1024x1024"] 2651 | default: "1024x1024" 2652 | example: "1024x1024" 2653 | nullable: true 2654 | description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. 2655 | response_format: &images_response_format 2656 | type: string 2657 | enum: ["url", "b64_json"] 2658 | default: "url" 2659 | example: "url" 2660 | nullable: true 2661 | description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. 2662 | user: *end_user_param_configuration 2663 | required: 2664 | - prompt 2665 | 2666 | ImagesResponse: 2667 | properties: 2668 | created: 2669 | type: integer 2670 | data: 2671 | type: array 2672 | items: 2673 | type: object 2674 | properties: 2675 | url: 2676 | type: string 2677 | b64_json: 2678 | type: string 2679 | required: 2680 | - created 2681 | - data 2682 | 2683 | CreateImageEditRequest: 2684 | type: object 2685 | properties: 2686 | image: 2687 | description: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. 2688 | type: string 2689 | format: binary 2690 | mask: 2691 | description: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. 2692 | type: string 2693 | format: binary 2694 | prompt: 2695 | description: A text description of the desired image(s). The maximum length is 1000 characters. 2696 | type: string 2697 | example: "A cute baby sea otter wearing a beret" 2698 | n: *images_n 2699 | size: *images_size 2700 | response_format: *images_response_format 2701 | user: *end_user_param_configuration 2702 | required: 2703 | - prompt 2704 | - image 2705 | 2706 | CreateImageVariationRequest: 2707 | type: object 2708 | properties: 2709 | image: 2710 | description: The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. 2711 | type: string 2712 | format: binary 2713 | n: *images_n 2714 | size: *images_size 2715 | response_format: *images_response_format 2716 | user: *end_user_param_configuration 2717 | required: 2718 | - image 2719 | 2720 | CreateModerationRequest: 2721 | type: object 2722 | properties: 2723 | input: 2724 | description: The input text to classify 2725 | oneOf: 2726 | - type: string 2727 | default: '' 2728 | example: "I want to kill them." 2729 | - type: array 2730 | items: 2731 | type: string 2732 | default: '' 2733 | example: "I want to kill them." 2734 | model: 2735 | description: | 2736 | Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. 2737 | 2738 | The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. 2739 | type: string 2740 | nullable: false 2741 | default: "text-moderation-latest" 2742 | example: "text-moderation-stable" 2743 | required: 2744 | - input 2745 | 2746 | CreateModerationResponse: 2747 | type: object 2748 | properties: 2749 | id: 2750 | type: string 2751 | model: 2752 | type: string 2753 | results: 2754 | type: array 2755 | items: 2756 | type: object 2757 | properties: 2758 | flagged: 2759 | type: boolean 2760 | categories: 2761 | type: object 2762 | properties: 2763 | hate: 2764 | type: boolean 2765 | hate/threatening: 2766 | type: boolean 2767 | self-harm: 2768 | type: boolean 2769 | sexual: 2770 | type: boolean 2771 | sexual/minors: 2772 | type: boolean 2773 | violence: 2774 | type: boolean 2775 | violence/graphic: 2776 | type: boolean 2777 | required: 2778 | - hate 2779 | - hate/threatening 2780 | - self-harm 2781 | - sexual 2782 | - sexual/minors 2783 | - violence 2784 | - violence/graphic 2785 | category_scores: 2786 | type: object 2787 | properties: 2788 | hate: 2789 | type: number 2790 | hate/threatening: 2791 | type: number 2792 | self-harm: 2793 | type: number 2794 | sexual: 2795 | type: number 2796 | sexual/minors: 2797 | type: number 2798 | violence: 2799 | type: number 2800 | violence/graphic: 2801 | type: number 2802 | required: 2803 | - hate 2804 | - hate/threatening 2805 | - self-harm 2806 | - sexual 2807 | - sexual/minors 2808 | - violence 2809 | - violence/graphic 2810 | required: 2811 | - flagged 2812 | - categories 2813 | - category_scores 2814 | required: 2815 | - id 2816 | - model 2817 | - results 2818 | 2819 | CreateSearchRequest: 2820 | type: object 2821 | properties: 2822 | query: 2823 | description: Query to search against the documents. 2824 | type: string 2825 | example: "the president" 2826 | minLength: 1 2827 | documents: 2828 | description: | 2829 | Up to 200 documents to search over, provided as a list of strings. 2830 | 2831 | The maximum document length (in tokens) is 2034 minus the number of tokens in the query. 2832 | 2833 | You should specify either `documents` or a `file`, but not both. 2834 | type: array 2835 | minItems: 1 2836 | maxItems: 200 2837 | items: 2838 | type: string 2839 | nullable: true 2840 | example: "['White House', 'hospital', 'school']" 2841 | file: 2842 | description: | 2843 | The ID of an uploaded file that contains documents to search over. 2844 | 2845 | You should specify either `documents` or a `file`, but not both. 2846 | type: string 2847 | nullable: true 2848 | max_rerank: 2849 | description: | 2850 | The maximum number of documents to be re-ranked and returned by search. 2851 | 2852 | This flag only takes effect when `file` is set. 2853 | type: integer 2854 | minimum: 1 2855 | default: 200 2856 | nullable: true 2857 | return_metadata: &return_metadata_configuration 2858 | description: | 2859 | A special boolean flag for showing metadata. If set to `true`, each document entry in the returned JSON will contain a "metadata" field. 2860 | 2861 | This flag only takes effect when `file` is set. 2862 | type: boolean 2863 | default: false 2864 | nullable: true 2865 | user: *end_user_param_configuration 2866 | required: 2867 | - query 2868 | 2869 | CreateSearchResponse: 2870 | type: object 2871 | properties: 2872 | object: 2873 | type: string 2874 | model: 2875 | type: string 2876 | data: 2877 | type: array 2878 | items: 2879 | type: object 2880 | properties: 2881 | object: 2882 | type: string 2883 | document: 2884 | type: integer 2885 | score: 2886 | type: number 2887 | 2888 | ListFilesResponse: 2889 | type: object 2890 | properties: 2891 | object: 2892 | type: string 2893 | data: 2894 | type: array 2895 | items: 2896 | $ref: '#/components/schemas/OpenAIFile' 2897 | required: 2898 | - object 2899 | - data 2900 | 2901 | CreateFileRequest: 2902 | type: object 2903 | additionalProperties: false 2904 | properties: 2905 | file: 2906 | description: | 2907 | Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. 2908 | 2909 | If the `purpose` is set to "fine-tune", each line is a JSON record with "prompt" and "completion" fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data). 2910 | type: string 2911 | format: binary 2912 | purpose: 2913 | description: | 2914 | The intended purpose of the uploaded documents. 2915 | 2916 | Use "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file. 2917 | 2918 | type: string 2919 | required: 2920 | - file 2921 | - purpose 2922 | 2923 | DeleteFileResponse: 2924 | type: object 2925 | properties: 2926 | id: 2927 | type: string 2928 | object: 2929 | type: string 2930 | deleted: 2931 | type: boolean 2932 | required: 2933 | - id 2934 | - object 2935 | - deleted 2936 | 2937 | CreateAnswerRequest: 2938 | type: object 2939 | additionalProperties: false 2940 | properties: 2941 | model: 2942 | description: ID of the model to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`. 2943 | type: string 2944 | question: 2945 | description: Question to get answered. 2946 | type: string 2947 | minLength: 1 2948 | example: "What is the capital of Japan?" 2949 | examples: 2950 | description: List of (question, answer) pairs that will help steer the model towards the tone and answer format you'd like. We recommend adding 2 to 3 examples. 2951 | type: array 2952 | minItems: 1 2953 | maxItems: 200 2954 | items: 2955 | type: array 2956 | minItems: 2 2957 | maxItems: 2 2958 | items: 2959 | type: string 2960 | minLength: 1 2961 | example: "[['What is the capital of Canada?', 'Ottawa'], ['Which province is Ottawa in?', 'Ontario']]" 2962 | examples_context: 2963 | description: A text snippet containing the contextual information used to generate the answers for the `examples` you provide. 2964 | type: string 2965 | example: "Ottawa, Canada's capital, is located in the east of southern Ontario, near the city of Montréal and the U.S. border." 2966 | documents: 2967 | description: | 2968 | List of documents from which the answer for the input `question` should be derived. If this is an empty list, the question will be answered based on the question-answer examples. 2969 | 2970 | You should specify either `documents` or a `file`, but not both. 2971 | type: array 2972 | maxItems: 200 2973 | items: 2974 | type: string 2975 | example: "['Japan is an island country in East Asia, located in the northwest Pacific Ocean.', 'Tokyo is the capital and most populous prefecture of Japan.']" 2976 | nullable: true 2977 | file: 2978 | description: | 2979 | The ID of an uploaded file that contains documents to search over. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. 2980 | 2981 | You should specify either `documents` or a `file`, but not both. 2982 | type: string 2983 | nullable: true 2984 | search_model: &search_model_configuration 2985 | description: ID of the model to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`. 2986 | type: string 2987 | default: ada 2988 | nullable: true 2989 | max_rerank: 2990 | description: The maximum number of documents to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost. 2991 | type: integer 2992 | default: 200 2993 | nullable: true 2994 | temperature: 2995 | description: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. 2996 | type: number 2997 | default: 0 2998 | nullable: true 2999 | logprobs: &context_completions_logprobs_configuration 3000 | type: integer 3001 | minimum: 0 3002 | maximum: 5 3003 | default: null 3004 | nullable: true 3005 | description: | 3006 | Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. 3007 | 3008 | The maximum value for `logprobs` is 5. 3009 | 3010 | When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs. 3011 | max_tokens: 3012 | description: The maximum number of tokens allowed for the generated answer 3013 | type: integer 3014 | default: 16 3015 | nullable: true 3016 | stop: 3017 | description: *completions_stop_description 3018 | default: null 3019 | oneOf: 3020 | - type: string 3021 | default: <|endoftext|> 3022 | example: "\n" 3023 | - type: array 3024 | minItems: 1 3025 | maxItems: 4 3026 | items: 3027 | type: string 3028 | example: '["\n"]' 3029 | nullable: true 3030 | n: 3031 | description: How many answers to generate for each question. 3032 | type: integer 3033 | minimum: 1 3034 | maximum: 10 3035 | default: 1 3036 | nullable: true 3037 | logit_bias: *completions_logit_bias 3038 | return_metadata: *return_metadata_configuration 3039 | return_prompt: &return_prompt_configuration 3040 | description: If set to `true`, the returned JSON will include a "prompt" field containing the final prompt that was used to request a completion. This is mainly useful for debugging purposes. 3041 | type: boolean 3042 | default: false 3043 | nullable: true 3044 | expand: &expand_configuration 3045 | description: If an object name is in the list, we provide the full information of the object; otherwise, we only provide the object ID. Currently we support `completion` and `file` objects for expansion. 3046 | type: array 3047 | items: {} 3048 | nullable: true 3049 | default: [] 3050 | user: *end_user_param_configuration 3051 | required: 3052 | - model 3053 | - question 3054 | - examples 3055 | - examples_context 3056 | 3057 | CreateAnswerResponse: 3058 | type: object 3059 | properties: 3060 | object: 3061 | type: string 3062 | model: 3063 | type: string 3064 | search_model: 3065 | type: string 3066 | completion: 3067 | type: string 3068 | answers: 3069 | type: array 3070 | items: 3071 | type: string 3072 | selected_documents: 3073 | type: array 3074 | items: 3075 | type: object 3076 | properties: 3077 | document: 3078 | type: integer 3079 | text: 3080 | type: string 3081 | 3082 | CreateClassificationRequest: 3083 | type: object 3084 | additionalProperties: false 3085 | properties: 3086 | model: *model_configuration 3087 | query: 3088 | description: Query to be classified. 3089 | type: string 3090 | minLength: 1 3091 | example: "The plot is not very attractive." 3092 | examples: 3093 | description: | 3094 | A list of examples with labels, in the following format: 3095 | 3096 | `[["The movie is so interesting.", "Positive"], ["It is quite boring.", "Negative"], ...]` 3097 | 3098 | All the label strings will be normalized to be capitalized. 3099 | 3100 | You should specify either `examples` or `file`, but not both. 3101 | type: array 3102 | minItems: 2 3103 | maxItems: 200 3104 | items: 3105 | type: array 3106 | minItems: 2 3107 | maxItems: 2 3108 | items: 3109 | type: string 3110 | minLength: 1 3111 | example: "[['Do not see this film.', 'Negative'], ['Smart, provocative and blisteringly funny.', 'Positive']]" 3112 | nullable: true 3113 | file: 3114 | description: | 3115 | The ID of the uploaded file that contains training examples. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. 3116 | 3117 | You should specify either `examples` or `file`, but not both. 3118 | type: string 3119 | nullable: true 3120 | labels: 3121 | description: The set of categories being classified. If not specified, candidate labels will be automatically collected from the examples you provide. All the label strings will be normalized to be capitalized. 3122 | type: array 3123 | minItems: 2 3124 | maxItems: 200 3125 | default: null 3126 | items: 3127 | type: string 3128 | example: ["Positive", "Negative"] 3129 | nullable: true 3130 | search_model: *search_model_configuration 3131 | temperature: 3132 | description: 3133 | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. 3134 | type: number 3135 | minimum: 0 3136 | maximum: 2 3137 | default: 0 3138 | nullable: true 3139 | example: 0 3140 | logprobs: *context_completions_logprobs_configuration 3141 | max_examples: 3142 | description: The maximum number of examples to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost. 3143 | type: integer 3144 | default: 200 3145 | nullable: true 3146 | logit_bias: *completions_logit_bias 3147 | return_prompt: *return_prompt_configuration 3148 | return_metadata: *return_metadata_configuration 3149 | expand: *expand_configuration 3150 | user: *end_user_param_configuration 3151 | required: 3152 | - model 3153 | - query 3154 | 3155 | CreateClassificationResponse: 3156 | type: object 3157 | properties: 3158 | object: 3159 | type: string 3160 | model: 3161 | type: string 3162 | search_model: 3163 | type: string 3164 | completion: 3165 | type: string 3166 | label: 3167 | type: string 3168 | selected_examples: 3169 | type: array 3170 | items: 3171 | type: object 3172 | properties: 3173 | document: 3174 | type: integer 3175 | text: 3176 | type: string 3177 | label: 3178 | type: string 3179 | 3180 | CreateFineTuneRequest: 3181 | type: object 3182 | properties: 3183 | training_file: 3184 | description: | 3185 | The ID of an uploaded file that contains training data. 3186 | 3187 | See [upload file](/docs/api-reference/files/upload) for how to upload a file. 3188 | 3189 | Your dataset must be formatted as a JSONL file, where each training 3190 | example is a JSON object with the keys "prompt" and "completion". 3191 | Additionally, you must upload your file with the purpose `fine-tune`. 3192 | 3193 | See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details. 3194 | type: string 3195 | example: "file-ajSREls59WBbvgSzJSVWxMCB" 3196 | validation_file: 3197 | description: | 3198 | The ID of an uploaded file that contains validation data. 3199 | 3200 | If you provide this file, the data is used to generate validation 3201 | metrics periodically during fine-tuning. These metrics can be viewed in 3202 | the [fine-tuning results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). 3203 | Your train and validation data should be mutually exclusive. 3204 | 3205 | Your dataset must be formatted as a JSONL file, where each validation 3206 | example is a JSON object with the keys "prompt" and "completion". 3207 | Additionally, you must upload your file with the purpose `fine-tune`. 3208 | 3209 | See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details. 3210 | type: string 3211 | nullable: true 3212 | example: "file-XjSREls59WBbvgSzJSVWxMCa" 3213 | model: 3214 | description: | 3215 | The name of the base model to fine-tune. You can select one of "ada", 3216 | "babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21. 3217 | To learn more about these models, see the 3218 | [Models](https://platform.openai.com/docs/models) documentation. 3219 | default: "curie" 3220 | type: string 3221 | nullable: true 3222 | n_epochs: 3223 | description: | 3224 | The number of epochs to train the model for. An epoch refers to one 3225 | full cycle through the training dataset. 3226 | default: 4 3227 | type: integer 3228 | nullable: true 3229 | batch_size: 3230 | description: | 3231 | The batch size to use for training. The batch size is the number of 3232 | training examples used to train a single forward and backward pass. 3233 | 3234 | By default, the batch size will be dynamically configured to be 3235 | ~0.2% of the number of examples in the training set, capped at 256 - 3236 | in general, we've found that larger batch sizes tend to work better 3237 | for larger datasets. 3238 | default: null 3239 | type: integer 3240 | nullable: true 3241 | learning_rate_multiplier: 3242 | description: | 3243 | The learning rate multiplier to use for training. 3244 | The fine-tuning learning rate is the original learning rate used for 3245 | pretraining multiplied by this value. 3246 | 3247 | By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 3248 | depending on final `batch_size` (larger learning rates tend to 3249 | perform better with larger batch sizes). We recommend experimenting 3250 | with values in the range 0.02 to 0.2 to see what produces the best 3251 | results. 3252 | default: null 3253 | type: number 3254 | nullable: true 3255 | prompt_loss_weight: 3256 | description: | 3257 | The weight to use for loss on the prompt tokens. This controls how 3258 | much the model tries to learn to generate the prompt (as compared 3259 | to the completion which always has a weight of 1.0), and can add 3260 | a stabilizing effect to training when completions are short. 3261 | 3262 | If prompts are extremely long (relative to completions), it may make 3263 | sense to reduce this weight so as to avoid over-prioritizing 3264 | learning the prompt. 3265 | default: 0.01 3266 | type: number 3267 | nullable: true 3268 | compute_classification_metrics: 3269 | description: | 3270 | If set, we calculate classification-specific metrics such as accuracy 3271 | and F-1 score using the validation set at the end of every epoch. 3272 | These metrics can be viewed in the [results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). 3273 | 3274 | In order to compute classification metrics, you must provide a 3275 | `validation_file`. Additionally, you must 3276 | specify `classification_n_classes` for multiclass classification or 3277 | `classification_positive_class` for binary classification. 3278 | type: boolean 3279 | default: false 3280 | nullable: true 3281 | classification_n_classes: 3282 | description: | 3283 | The number of classes in a classification task. 3284 | 3285 | This parameter is required for multiclass classification. 3286 | type: integer 3287 | default: null 3288 | nullable: true 3289 | classification_positive_class: 3290 | description: | 3291 | The positive class in binary classification. 3292 | 3293 | This parameter is needed to generate precision, recall, and F1 3294 | metrics when doing binary classification. 3295 | type: string 3296 | default: null 3297 | nullable: true 3298 | classification_betas: 3299 | description: | 3300 | If this is provided, we calculate F-beta scores at the specified 3301 | beta values. The F-beta score is a generalization of F-1 score. 3302 | This is only used for binary classification. 3303 | 3304 | With a beta of 1 (i.e. the F-1 score), precision and recall are 3305 | given the same weight. A larger beta score puts more weight on 3306 | recall and less on precision. A smaller beta score puts more weight 3307 | on precision and less on recall. 3308 | type: array 3309 | items: 3310 | type: number 3311 | example: [0.6, 1, 1.5, 2] 3312 | default: null 3313 | nullable: true 3314 | suffix: 3315 | description: | 3316 | A string of up to 40 characters that will be added to your fine-tuned model name. 3317 | 3318 | For example, a `suffix` of "custom-model-name" would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. 3319 | type: string 3320 | minLength: 1 3321 | maxLength: 40 3322 | default: null 3323 | nullable: true 3324 | required: 3325 | - training_file 3326 | 3327 | ListFineTunesResponse: 3328 | type: object 3329 | properties: 3330 | object: 3331 | type: string 3332 | data: 3333 | type: array 3334 | items: 3335 | $ref: '#/components/schemas/FineTune' 3336 | required: 3337 | - object 3338 | - data 3339 | 3340 | ListFineTuneEventsResponse: 3341 | type: object 3342 | properties: 3343 | object: 3344 | type: string 3345 | data: 3346 | type: array 3347 | items: 3348 | $ref: '#/components/schemas/FineTuneEvent' 3349 | required: 3350 | - object 3351 | - data 3352 | 3353 | CreateEmbeddingRequest: 3354 | type: object 3355 | additionalProperties: false 3356 | properties: 3357 | model: *model_configuration 3358 | input: 3359 | description: | 3360 | Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed the max input tokens for the model (8191 tokens for `text-embedding-ada-002`). [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens. 3361 | example: "The quick brown fox jumped over the lazy dog" 3362 | oneOf: 3363 | - type: string 3364 | default: '' 3365 | example: "This is a test." 3366 | - type: array 3367 | items: 3368 | type: string 3369 | default: '' 3370 | example: "This is a test." 3371 | - type: array 3372 | minItems: 1 3373 | items: 3374 | type: integer 3375 | example: "[1212, 318, 257, 1332, 13]" 3376 | - type: array 3377 | minItems: 1 3378 | items: 3379 | type: array 3380 | minItems: 1 3381 | items: 3382 | type: integer 3383 | example: "[[1212, 318, 257, 1332, 13]]" 3384 | user: *end_user_param_configuration 3385 | required: 3386 | - model 3387 | - input 3388 | 3389 | CreateEmbeddingResponse: 3390 | type: object 3391 | properties: 3392 | object: 3393 | type: string 3394 | model: 3395 | type: string 3396 | data: 3397 | type: array 3398 | items: 3399 | type: object 3400 | properties: 3401 | index: 3402 | type: integer 3403 | object: 3404 | type: string 3405 | embedding: 3406 | type: array 3407 | items: 3408 | type: number 3409 | required: 3410 | - index 3411 | - object 3412 | - embedding 3413 | usage: 3414 | type: object 3415 | properties: 3416 | prompt_tokens: 3417 | type: integer 3418 | total_tokens: 3419 | type: integer 3420 | required: 3421 | - prompt_tokens 3422 | - total_tokens 3423 | required: 3424 | - object 3425 | - model 3426 | - data 3427 | - usage 3428 | 3429 | CreateTranscriptionRequest: 3430 | type: object 3431 | additionalProperties: false 3432 | properties: 3433 | file: 3434 | description: | 3435 | The audio file object (not file name) to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm. 3436 | type: string 3437 | x-oaiTypeLabel: file 3438 | format: binary 3439 | model: 3440 | description: | 3441 | ID of the model to use. Only `whisper-1` is currently available. 3442 | type: string 3443 | prompt: 3444 | description: | 3445 | An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. 3446 | type: string 3447 | response_format: 3448 | description: | 3449 | The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. 3450 | type: string 3451 | default: json 3452 | temperature: 3453 | description: | 3454 | The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. 3455 | type: number 3456 | default: 0 3457 | language: 3458 | description: | 3459 | The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. 3460 | type: string 3461 | required: 3462 | - file 3463 | - model 3464 | 3465 | # Note: This does not currently support the non-default response format types. 3466 | CreateTranscriptionResponse: 3467 | type: object 3468 | properties: 3469 | text: 3470 | type: string 3471 | required: 3472 | - text 3473 | 3474 | CreateTranslationRequest: 3475 | type: object 3476 | additionalProperties: false 3477 | properties: 3478 | file: 3479 | description: | 3480 | The audio file object (not file name) translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm. 3481 | type: string 3482 | x-oaiTypeLabel: file 3483 | format: binary 3484 | model: 3485 | description: | 3486 | ID of the model to use. Only `whisper-1` is currently available. 3487 | type: string 3488 | prompt: 3489 | description: | 3490 | An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. 3491 | type: string 3492 | response_format: 3493 | description: | 3494 | The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. 3495 | type: string 3496 | default: json 3497 | temperature: 3498 | description: | 3499 | The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. 3500 | type: number 3501 | default: 0 3502 | required: 3503 | - file 3504 | - model 3505 | 3506 | # Note: This does not currently support the non-default response format types. 3507 | CreateTranslationResponse: 3508 | type: object 3509 | properties: 3510 | text: 3511 | type: string 3512 | required: 3513 | - text 3514 | 3515 | Engine: 3516 | title: Engine 3517 | properties: 3518 | id: 3519 | type: string 3520 | object: 3521 | type: string 3522 | created: 3523 | type: integer 3524 | nullable: true 3525 | ready: 3526 | type: boolean 3527 | required: 3528 | - id 3529 | - object 3530 | - created 3531 | - ready 3532 | 3533 | Model: 3534 | title: Model 3535 | properties: 3536 | id: 3537 | type: string 3538 | object: 3539 | type: string 3540 | created: 3541 | type: integer 3542 | owned_by: 3543 | type: string 3544 | required: 3545 | - id 3546 | - object 3547 | - created 3548 | - owned_by 3549 | 3550 | OpenAIFile: 3551 | title: OpenAIFile 3552 | properties: 3553 | id: 3554 | type: string 3555 | object: 3556 | type: string 3557 | bytes: 3558 | type: integer 3559 | created_at: 3560 | type: integer 3561 | filename: 3562 | type: string 3563 | purpose: 3564 | type: string 3565 | status: 3566 | type: string 3567 | status_details: 3568 | type: object 3569 | nullable: true 3570 | required: 3571 | - id 3572 | - object 3573 | - bytes 3574 | - created_at 3575 | - filename 3576 | - purpose 3577 | 3578 | FineTune: 3579 | title: FineTune 3580 | properties: 3581 | id: 3582 | type: string 3583 | object: 3584 | type: string 3585 | created_at: 3586 | type: integer 3587 | updated_at: 3588 | type: integer 3589 | model: 3590 | type: string 3591 | fine_tuned_model: 3592 | type: string 3593 | nullable: true 3594 | organization_id: 3595 | type: string 3596 | status: 3597 | type: string 3598 | hyperparams: 3599 | type: object 3600 | training_files: 3601 | type: array 3602 | items: 3603 | $ref: '#/components/schemas/OpenAIFile' 3604 | validation_files: 3605 | type: array 3606 | items: 3607 | $ref: '#/components/schemas/OpenAIFile' 3608 | result_files: 3609 | type: array 3610 | items: 3611 | $ref: '#/components/schemas/OpenAIFile' 3612 | events: 3613 | type: array 3614 | items: 3615 | $ref: '#/components/schemas/FineTuneEvent' 3616 | required: 3617 | - id 3618 | - object 3619 | - created_at 3620 | - updated_at 3621 | - model 3622 | - fine_tuned_model 3623 | - organization_id 3624 | - status 3625 | - hyperparams 3626 | - training_files 3627 | - validation_files 3628 | - result_files 3629 | 3630 | FineTuneEvent: 3631 | title: FineTuneEvent 3632 | properties: 3633 | object: 3634 | type: string 3635 | created_at: 3636 | type: integer 3637 | level: 3638 | type: string 3639 | message: 3640 | type: string 3641 | required: 3642 | - object 3643 | - created_at 3644 | - level 3645 | - message 3646 | 3647 | x-oaiMeta: 3648 | groups: 3649 | - id: models 3650 | title: Models 3651 | description: | 3652 | List and describe the various models available in the API. You can refer to the [Models](/docs/models) documentation to understand what models are available and the differences between them. 3653 | - id: chat 3654 | title: Chat 3655 | description: | 3656 | Given a list of messages comprising a conversation, the model will return a response. 3657 | - id: completions 3658 | title: Completions 3659 | description: | 3660 | Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position. 3661 | - id: edits 3662 | title: Edits 3663 | description: | 3664 | Given a prompt and an instruction, the model will return an edited version of the prompt. 3665 | - id: images 3666 | title: Images 3667 | description: | 3668 | Given a prompt and/or an input image, the model will generate a new image. 3669 | 3670 | Related guide: [Image generation](/docs/guides/images) 3671 | - id: embeddings 3672 | title: Embeddings 3673 | description: | 3674 | Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. 3675 | 3676 | Related guide: [Embeddings](/docs/guides/embeddings) 3677 | - id: audio 3678 | title: Audio 3679 | description: | 3680 | Learn how to turn audio into text. 3681 | 3682 | Related guide: [Speech to text](/docs/guides/speech-to-text) 3683 | - id: files 3684 | title: Files 3685 | description: | 3686 | Files are used to upload documents that can be used with features like [Fine-tuning](/docs/api-reference/fine-tunes). 3687 | - id: fine-tunes 3688 | title: Fine-tunes 3689 | description: | 3690 | Manage fine-tuning jobs to tailor a model to your specific training data. 3691 | 3692 | Related guide: [Fine-tune models](/docs/guides/fine-tuning) 3693 | - id: moderations 3694 | title: Moderations 3695 | description: | 3696 | Given a input text, outputs if the model classifies it as violating OpenAI's content policy. 3697 | 3698 | Related guide: [Moderations](/docs/guides/moderation) 3699 | - id: searches 3700 | title: Searches 3701 | warning: 3702 | title: This endpoint is deprecated and will be removed on December 3rd, 2022 3703 | message: We’ve developed new methods with better performance. [Learn more](https://help.openai.com/en/articles/6272952-search-transition-guide). 3704 | description: | 3705 | Given a query and a set of documents or labels, the model ranks each document based on its semantic similarity to the provided query. 3706 | 3707 | Related guide: [Search](/docs/guides/search) 3708 | - id: classifications 3709 | title: Classifications 3710 | warning: 3711 | title: This endpoint is deprecated and will be removed on December 3rd, 2022 3712 | message: We’ve developed new methods with better performance. [Learn more](https://help.openai.com/en/articles/6272941-classifications-transition-guide). 3713 | description: | 3714 | Given a query and a set of labeled examples, the model will predict the most likely label for the query. Useful as a drop-in replacement for any ML classification or text-to-label task. 3715 | 3716 | Related guide: [Classification](/docs/guides/classifications) 3717 | - id: answers 3718 | title: Answers 3719 | warning: 3720 | title: This endpoint is deprecated and will be removed on December 3rd, 2022 3721 | message: We’ve developed new methods with better performance. [Learn more](https://help.openai.com/en/articles/6233728-answers-transition-guide). 3722 | description: | 3723 | Given a question, a set of documents, and some examples, the API generates an answer to the question based on the information in the set of documents. This is useful for question-answering applications on sources of truth, like company documentation or a knowledge base. 3724 | 3725 | Related guide: [Question answering](/docs/guides/answers) 3726 | - id: engines 3727 | title: Engines 3728 | description: These endpoints describe and provide access to the various engines available in the API. 3729 | warning: 3730 | title: The Engines endpoints are deprecated. 3731 | message: Please use their replacement, [Models](/docs/api-reference/models), instead. [Learn more](https://help.openai.com/TODO). --------------------------------------------------------------------------------