├── .vscode ├── settings.json └── launch.json ├── .editorconfig ├── Tests └── Get-AIChat.Tests.ps1 ├── src ├── OpenAI.Client │ ├── OpenAI.Client.csproj │ ├── Constructors.cs │ ├── Extensions │ │ ├── StringExtensions.cs │ │ └── HttpClientExtensions.cs │ ├── Stringifiers.cs │ ├── JsonStringEnumConverter.cs │ ├── Chat.cs │ ├── TemplateDirectory │ │ └── Class.liquid │ └── OpenAI.nswag └── PowerShellAssistant │ ├── FormatSettings.settings.ps1 │ ├── Publish.build.ps1 │ ├── PowerShellAssistant.csproj │ ├── PowerShellAssistant.psd1 │ └── PowerShellAssistant.psm1 ├── PowerShellAssistant.sln ├── README.MD ├── LICENSE └── .gitignore /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "gitlens.proxy": {} 3 | } -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | [*.cs] 2 | dotnet_naming_style.interface.required_prefix = none -------------------------------------------------------------------------------- /Tests/Get-AIChat.Tests.ps1: -------------------------------------------------------------------------------- 1 | BeforeAll { 2 | $ManifestPath = Resolve-Path (Join-Path $PSScriptRoot '../src/PowerShellAssistant/PowerShellAssistant.psd1') 3 | Import-Module $ManifestPath 4 | } 5 | Describe 'Get-AIChat' { 6 | Context 'When called with no parameters' { 7 | It 'Should return a chat' -Pending { 8 | $chat = Get-AIChat 'Return only the word PESTER' 9 | $chat | Should -Be 'PESTER' 10 | } 11 | } 12 | } -------------------------------------------------------------------------------- /src/OpenAI.Client/OpenAI.Client.csproj: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | net7.0 5 | enable 6 | enable 7 | 8 | 9 | 10 | false 11 | none 12 | 13 | 14 | -------------------------------------------------------------------------------- /src/OpenAI.Client/Constructors.cs: -------------------------------------------------------------------------------- 1 | namespace OpenAI; 2 | public partial class ChatCompletionRequestMessage 3 | { 4 | public ChatCompletionRequestMessage() { } 5 | 6 | public ChatCompletionRequestMessage(ChatCompletionResponseMessage responseMessage) 7 | { 8 | Content = responseMessage.Content; 9 | Role = Enum.Parse(responseMessage.Role.ToString()); 10 | } 11 | 12 | public ChatCompletionRequestMessage(string userMessage) : this(userMessage, ChatCompletionRequestMessageRole.User) { } 13 | 14 | public ChatCompletionRequestMessage(string userMessage, ChatCompletionRequestMessageRole role = ChatCompletionRequestMessageRole.User) 15 | { 16 | Role = role; 17 | Content = userMessage; 18 | } 19 | } -------------------------------------------------------------------------------- /src/OpenAI.Client/Extensions/StringExtensions.cs: -------------------------------------------------------------------------------- 1 | // Taken with love from: https://github.com/betalgo/openai/blob/master/OpenAI.SDK/Extensions/StringExtensions.cs 2 | 3 | namespace OpenAI.Extensions; 4 | 5 | /// 6 | /// Extension methods for string manipulation 7 | /// 8 | public static class StringExtensions 9 | { 10 | /// 11 | /// Remove the search string from the begging of string if exist 12 | /// 13 | /// 14 | /// 15 | /// 16 | public static string RemoveIfStartWith(this string text, string search) 17 | { 18 | var pos = text.IndexOf(search, StringComparison.Ordinal); 19 | return pos != 0 ? text : text[search.Length..]; 20 | } 21 | } -------------------------------------------------------------------------------- /src/PowerShellAssistant/FormatSettings.settings.ps1: -------------------------------------------------------------------------------- 1 | @{ 2 | 'OpenAI.Engine' = 'Id', 'Ready' 3 | 'OpenAI.Model' = 'Id', 'Created', 'Owned_By' 4 | 'OpenAI.CreateCompletionResponse' = 'Model', 'Created', 'Choices', 'Usage' 5 | 'OpenAI.ChatCompletionRequestMessage' = { & (Get-Module PowerShellAssistant) { Format-ChatMessage $args[0] } $PSItem } 6 | 'OpenAI.ChatCompletionResponseMessage' = { & (Get-Module PowerShellAssistant) { Format-ChatMessage $args[0] } $PSItem } 7 | 'OpenAI.CreateChatCompletionRequest' = { & (Get-Module PowerShellAssistant) { Format-CreateChatCompletionRequest $args[0] } $PSItem } 8 | 'OpenAI.CreateChatCompletionResponse' = { & (Get-Module PowerShellAssistant) { Format-CreateChatCompletionResponse $args[0] } $PSItem } 9 | 'OpenAI.Choices2' = { & (Get-Module PowerShellAssistant) { Format-Choices2 $args[0] } $PSItem } 10 | 'OpenAI.ChatConversation' = { & (Get-Module PowerShellAssistant) { Format-ChatConversation $args[0] } $PSItem } 11 | 'OpenAI.CreateChatCompletionChunkedResponse' = { & (Get-Module PowerShellAssistant) { Format-CreateChatCompletionChunkedResponse $args[0] } $PSItem } 12 | } -------------------------------------------------------------------------------- /PowerShellAssistant.sln: -------------------------------------------------------------------------------- 1 | 2 | Microsoft Visual Studio Solution File, Format Version 12.00 3 | # Visual Studio Version 17 4 | VisualStudioVersion = 17.0.31903.59 5 | MinimumVisualStudioVersion = 10.0.40219.1 6 | Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "src", "src", "{2500AD41-2201-4E29-BDA2-B0090C3D5629}" 7 | EndProject 8 | Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "PowerShellAssistant", "src\PowerShellAssistant\PowerShellAssistant.csproj", "{DD35EB37-A1E7-441B-98F9-D2C75C455739}" 9 | EndProject 10 | Global 11 | GlobalSection(SolutionConfigurationPlatforms) = preSolution 12 | Debug|Any CPU = Debug|Any CPU 13 | Release|Any CPU = Release|Any CPU 14 | EndGlobalSection 15 | GlobalSection(SolutionProperties) = preSolution 16 | HideSolutionNode = FALSE 17 | EndGlobalSection 18 | GlobalSection(ProjectConfigurationPlatforms) = postSolution 19 | {DD35EB37-A1E7-441B-98F9-D2C75C455739}.Debug|Any CPU.ActiveCfg = Debug|Any CPU 20 | {DD35EB37-A1E7-441B-98F9-D2C75C455739}.Debug|Any CPU.Build.0 = Debug|Any CPU 21 | {DD35EB37-A1E7-441B-98F9-D2C75C455739}.Release|Any CPU.ActiveCfg = Release|Any CPU 22 | {DD35EB37-A1E7-441B-98F9-D2C75C455739}.Release|Any CPU.Build.0 = Release|Any CPU 23 | EndGlobalSection 24 | GlobalSection(NestedProjects) = preSolution 25 | {DD35EB37-A1E7-441B-98F9-D2C75C455739} = {2500AD41-2201-4E29-BDA2-B0090C3D5629} 26 | EndGlobalSection 27 | EndGlobal 28 | -------------------------------------------------------------------------------- /src/OpenAI.Client/Stringifiers.cs: -------------------------------------------------------------------------------- 1 | namespace OpenAI; 2 | 3 | public partial class Usage 4 | { 5 | public static string ToUsageString(int total, int prompt, int? completion) 6 | { 7 | if (completion.HasValue) 8 | return $"Total: {total} (Prompt: {prompt}, Completion: {completion.Value})"; 9 | else 10 | return $"Total: {total} (Prompt: {prompt})"; 11 | } 12 | public override string ToString() 13 | { 14 | return ToUsageString(Total_tokens, Prompt_tokens, Completion_tokens); 15 | } 16 | } 17 | public partial class Usage2 18 | { 19 | public override string ToString() 20 | { 21 | return Usage.ToUsageString(Total_tokens, Prompt_tokens, Completion_tokens); 22 | } 23 | } 24 | public partial class Usage3 25 | { 26 | public override string ToString() 27 | { 28 | return Usage.ToUsageString(Total_tokens, Prompt_tokens, Completion_tokens); 29 | } 30 | } 31 | public partial class Usage4 32 | { 33 | public override string ToString() 34 | { 35 | return Usage.ToUsageString(Total_tokens, Prompt_tokens, null); 36 | } 37 | } 38 | 39 | public partial class ChatCompletionRequestMessage 40 | { 41 | public override string ToString() 42 | { 43 | return $"{Role}: {Content}"; 44 | } 45 | } 46 | 47 | public partial class Choices2 48 | { 49 | public override string ToString() 50 | { 51 | return Index.HasValue 52 | ? $"Choice {Index + 1} - {Message?.Role}: {Message?.Content}" 53 | : $"{Message?.Role}: {Message?.Content}"; 54 | } 55 | } -------------------------------------------------------------------------------- /README.MD: -------------------------------------------------------------------------------- 1 | # DEPRECATION NOTICE: I am no longer developing this tool, I recommend you use PowerShellAI or Github Copilot Chat in Visual Studio Code as it meets all the use cases I was going to have for this. 2 | 3 | # PowerShell Assistant 4 | 5 | This module provides support for the OpenAI API and tools to leverage it including a chat client and PSReadline completer. 6 | 7 | Requires Powershell 7.3 due to some .NET 7 feature usage. 8 | 9 | ## Code Generation 10 | 11 | This module's core engine is an NSwag-generated C# client from the OpenAI OpenAPI specification. This should make it so that as new functions and models are released, they can be taken advantage of by regenerating the client. 12 | 13 | ## User Interface 14 | 15 | The `Get-Chat` (aka `chat`) and `Get-Code` (aka `code`) are meant for interactive scenarios. `chat` can be used as a standalone interactive tool which will copy any recommended code discovered to your clipboard automatically. `code` is meant to provide suggestions based on existing code and context, and meant in the future to integrate as a suggestion provider into tools such as PSReadline. 16 | 17 | `Get-AIChat` is the underlying engine that powers `Get-Chat` and can be used for more programmatic noninteractive scenarios. 18 | 19 | ## Formatting 20 | 21 | This module strives so that all UI "output" is done with custom format files rather than write-host or raw strings. This ensures that you still have access to the underlying "object" underneath without requiring a `-Raw` or similar parameter parameter 22 | 23 | ## Alternatives 24 | 25 | Check out Doug Finke's excellent [PowerShellAI](https://github.com/dfinke/PowerShellAI) module for a pure-PowerShell implementation of OpenAI. 26 | -------------------------------------------------------------------------------- /src/PowerShellAssistant/Publish.build.ps1: -------------------------------------------------------------------------------- 1 | #requires -module InvokeBuild 2 | param( 3 | $Destination = $(Resolve-Path (Join-Path $PSScriptRoot '..\..\dist')), 4 | $FormatSettingsPath = $(Join-Path $PSScriptRoot 'FormatSettings.settings.ps1') 5 | ) 6 | 7 | Task Formats { 8 | Import-Module EzOut -ErrorAction Stop 9 | 10 | $formatPath = Join-Path $Destination 'Formats' 11 | New-Item -ItemType Directory -Force -Path $formatPath | Out-Null 12 | 13 | [hashtable]$tableProperties = . $formatSettingsPath 14 | 15 | $formatFilePaths = foreach ($kv in $tableProperties.GetEnumerator()) { 16 | $typeName = $kv.Name 17 | $outPath = Join-Path $formatPath $($typeName + '.Format.ps1xml') 18 | $setting = $kv.Value 19 | 20 | switch ($setting.GetType()) { 21 | ([string]) { 22 | Write-FormatView -TypeName $typeName -Property $kv.Value -AutoSize 23 | | Out-FormatData 24 | | Out-File -Force $outPath 25 | } 26 | ([object[]]) { 27 | Write-FormatView -TypeName $typeName -Property $kv.Value -AutoSize 28 | | Out-FormatData 29 | | Out-File -Force $outPath 30 | } 31 | ([ScriptBlock]) { 32 | Write-FormatView -TypeName $typeName -Action $setting 33 | | Out-FormatData 34 | | Out-File -Force $outPath 35 | } 36 | ([hashtable]) { 37 | throw [NotImplementedException]'TODO: Hashtable not implemented. It will allow select-style expressions to create virtual properties' 38 | } 39 | default { 40 | throw [NotSupportedException]"Unsupported format setting value type: $($setting.GetType())" 41 | } 42 | } 43 | [IO.Path]::GetRelativePath($Destination, $outPath) 44 | } 45 | Update-ModuleManifest -Path $Destination/PowerShellAssistant.psd1 -FormatsToProcess $formatFilePaths 46 | } 47 | 48 | Task . Formats -------------------------------------------------------------------------------- /src/PowerShellAssistant/PowerShellAssistant.csproj: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | net7.0 5 | enable 6 | enable 7 | ../../dist 8 | false 9 | 10 | 11 | 12 | false 13 | none 14 | 15 | 16 | 17 | 18 | 19 | contentFiles 20 | All 21 | 22 | 23 | PreserveNewest 24 | PreserveNewest 25 | 26 | 27 | PreserveNewest 28 | PreserveNewest 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "name": ".NET Core Launch (console)", 9 | "type": "coreclr", 10 | "request": "launch", 11 | "WARNING01": "*********************************************************************************", 12 | "WARNING02": "The C# extension was unable to automatically decode projects in the current", 13 | "WARNING03": "workspace to create a runnable launch.json file. A template launch.json file has", 14 | "WARNING04": "been created as a placeholder.", 15 | "WARNING05": "", 16 | "WARNING06": "If OmniSharp is currently unable to load your project, you can attempt to resolve", 17 | "WARNING07": "this by restoring any missing project dependencies (example: run 'dotnet restore')", 18 | "WARNING08": "and by fixing any reported errors from building the projects in your workspace.", 19 | "WARNING09": "If this allows OmniSharp to now load your project then --", 20 | "WARNING10": " * Delete this file", 21 | "WARNING11": " * Open the Visual Studio Code command palette (View->Command Palette)", 22 | "WARNING12": " * run the command: '.NET: Generate Assets for Build and Debug'.", 23 | "WARNING13": "", 24 | "WARNING14": "If your project requires a more complex launch configuration, you may wish to delete", 25 | "WARNING15": "this configuration and pick a different template using the 'Add Configuration...'", 26 | "WARNING16": "button at the bottom of this file.", 27 | "WARNING17": "*********************************************************************************", 28 | "preLaunchTask": "build", 29 | "program": "${workspaceFolder}/bin/Debug//.dll", 30 | "args": [], 31 | "cwd": "${workspaceFolder}", 32 | "console": "internalConsole", 33 | "stopAtEntry": false 34 | }, 35 | { 36 | "name": ".NET Core Attach", 37 | "type": "coreclr", 38 | "request": "attach" 39 | } 40 | ] 41 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Justin Grote @JustinWGrote 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | --- 24 | 25 | Substantial Portion from https://github.com/betalgo/openai 26 | 27 | MIT License 28 | 29 | Copyright (c) 2022 Betalgo 30 | 31 | Permission is hereby granted, free of charge, to any person obtaining a copy 32 | of this software and associated documentation files (the "Software"), to deal 33 | in the Software without restriction, including without limitation the rights 34 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 35 | copies of the Software, and to permit persons to whom the Software is 36 | furnished to do so, subject to the following conditions: 37 | 38 | The above copyright notice and this permission notice shall be included in all 39 | copies or substantial portions of the Software. 40 | 41 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 42 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 43 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 44 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 45 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 46 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 47 | SOFTWARE. -------------------------------------------------------------------------------- /src/OpenAI.Client/JsonStringEnumConverter.cs: -------------------------------------------------------------------------------- 1 | namespace OpenAI; 2 | 3 | using System.Reflection; 4 | using System.Runtime.Serialization; 5 | using System.Text.Json; 6 | using System.Text.Json.Serialization; 7 | 8 | /// 9 | /// This custom enum converter supports custom name serialization using either the JsonPropertyName or EnumMember attributes. Required because System.Text.JSON doesn't support custom enum names. 10 | /// 11 | public class JsonStringEnumConverter : JsonConverterFactory 12 | { 13 | public override bool CanConvert(Type typeToConvert) 14 | { 15 | return typeToConvert.IsEnum; 16 | } 17 | 18 | public override JsonConverter? CreateConverter(Type typeToConvert, JsonSerializerOptions options) 19 | { 20 | var type = typeof(JsonStringEnumConverter<>).MakeGenericType(typeToConvert); 21 | return (JsonConverter)Activator.CreateInstance(type)!; 22 | } 23 | } 24 | 25 | public class JsonStringEnumConverter : JsonConverter where TEnum : struct, Enum 26 | { 27 | private readonly Dictionary _enumToString = new(); 28 | private readonly Dictionary _stringToEnum = new(); 29 | private readonly Dictionary _numberToEnum = new(); 30 | 31 | public JsonStringEnumConverter() 32 | { 33 | var type = typeof(TEnum); 34 | foreach (var value in Enum.GetValues()) 35 | { 36 | var enumMember = type.GetMember(value.ToString())[0]; 37 | var attr = enumMember.GetCustomAttributes().FirstOrDefault(); 38 | 39 | var serializationAttr = enumMember.GetCustomAttributes().FirstOrDefault(); 40 | 41 | var num = Convert.ToInt32(type.GetField("value__")?.GetValue(value)); 42 | if (attr?.Name != null) 43 | { 44 | _enumToString.Add(value, attr.Name); 45 | _stringToEnum.Add(attr.Name, value); 46 | _numberToEnum.Add(num, value); 47 | } 48 | else if (serializationAttr?.Value != null) 49 | { 50 | _enumToString.Add(value, serializationAttr.Value); 51 | _stringToEnum.Add(serializationAttr.Value, value); 52 | _numberToEnum.Add(num, value); 53 | } 54 | else 55 | { 56 | _enumToString.Add(value, value.ToString()); 57 | _stringToEnum.Add(value.ToString(), value); 58 | _numberToEnum.Add(num, value); 59 | } 60 | } 61 | } 62 | 63 | public override TEnum Read(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options) 64 | { 65 | var type = reader.TokenType; 66 | if (type == JsonTokenType.String) 67 | { 68 | var stringValue = reader.GetString(); 69 | 70 | if (stringValue != null && _stringToEnum.TryGetValue(stringValue, out var enumValue)) 71 | { 72 | return enumValue; 73 | } 74 | } 75 | else if (type == JsonTokenType.Number) 76 | { 77 | var numValue = reader.GetInt32(); 78 | _numberToEnum.TryGetValue(numValue, out var enumValue); 79 | return enumValue; 80 | } 81 | 82 | return default; 83 | } 84 | 85 | public override void Write(Utf8JsonWriter writer, TEnum value, JsonSerializerOptions options) 86 | { 87 | writer.WriteStringValue(_enumToString[value]); 88 | } 89 | } -------------------------------------------------------------------------------- /src/OpenAI.Client/Extensions/HttpClientExtensions.cs: -------------------------------------------------------------------------------- 1 | //Taken with love from: https://raw.githubusercontent.com/betalgo/openai/master/OpenAI.SDK/Extensions/HttpClientExtensions.cs 2 | using System.Net.Http.Headers; 3 | using System.Net.Http.Json; 4 | using System.Text.Json; 5 | using System.Text.Json.Serialization; 6 | 7 | namespace OpenAI.Extensions; 8 | 9 | public static class HttpClientExtensions 10 | { 11 | public static async Task PostAndReadAsAsync(this HttpClient client, string uri, object requestModel, CancellationToken cancellationToken = default) 12 | { 13 | var response = await client.PostAsJsonAsync(uri, requestModel, new JsonSerializerOptions 14 | { 15 | DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingDefault 16 | }, cancellationToken); 17 | return await response.Content.ReadFromJsonAsync(cancellationToken: cancellationToken) ?? throw new InvalidOperationException(); 18 | } 19 | 20 | public static async Task PostAsync(this HttpClient client, string uri, object requestModel, JsonSerializerOptions? options = default, CancellationToken cancellationToken = default) 21 | { 22 | // PostAsync does not support ResponseHeadersRead, so this is a polyfill for that functionality 23 | 24 | options ??= new JsonSerializerOptions 25 | { 26 | DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingDefault 27 | }; 28 | 29 | var content = JsonContent.Create(requestModel, null, options); 30 | 31 | using var request = new HttpRequestMessage(HttpMethod.Post, uri); 32 | request.Headers.Accept.Add(new MediaTypeWithQualityHeaderValue("text/event-stream")); 33 | request.Content = content; 34 | return await client.SendAsync(request, HttpCompletionOption.ResponseHeadersRead, cancellationToken); 35 | } 36 | 37 | public static async Task PostFileAndReadAsAsync(this HttpClient client, string uri, HttpContent content, CancellationToken cancellationToken = default) 38 | { 39 | var response = await client.PostAsync(uri, content, cancellationToken); 40 | return await response.Content.ReadFromJsonAsync(cancellationToken: cancellationToken) ?? throw new InvalidOperationException(); 41 | } 42 | 43 | public static async Task PostFileAndReadAsStringAsync(this HttpClient client, string uri, HttpContent content, CancellationToken cancellationToken = default) 44 | { 45 | var response = await client.PostAsync(uri, content, cancellationToken); 46 | return await response.Content.ReadAsStringAsync(cancellationToken) ?? throw new InvalidOperationException(); 47 | } 48 | 49 | public static async Task DeleteAndReadAsAsync(this HttpClient client, string uri, CancellationToken cancellationToken = default) 50 | { 51 | var response = await client.DeleteAsync(uri, cancellationToken); 52 | return await response.Content.ReadFromJsonAsync(cancellationToken: cancellationToken) ?? throw new InvalidOperationException(); 53 | } 54 | 55 | #if NETSTANDARD2_0 56 | public static async Task ReadAsStringAsync(this HttpContent content, CancellationToken cancellationToken) 57 | { 58 | var stream = await content.ReadAsStreamAsync().WithCancellation(cancellationToken); 59 | using var sr = new StreamReader(stream); 60 | return await sr.ReadToEndAsync().WithCancellation(cancellationToken); 61 | } 62 | 63 | public static async Task ReadAsStreamAsync(this HttpContent content, CancellationToken cancellationToken) 64 | { 65 | var stream = await content.ReadAsStreamAsync().WithCancellation(cancellationToken); 66 | return new AsyncDisposableStream(stream); 67 | } 68 | 69 | public static async Task ReadAsByteArrayAsync(this HttpContent content, CancellationToken cancellationToken) 70 | { 71 | return await content.ReadAsByteArrayAsync().WithCancellation(cancellationToken); 72 | } 73 | 74 | public static async Task GetStreamAsync(this HttpClient client, string requestUri, CancellationToken cancellationToken) 75 | { 76 | var response = await client.GetAsync(requestUri, cancellationToken); 77 | return await response.Content.ReadAsStreamAsync(cancellationToken); 78 | } 79 | 80 | public static async Task WithCancellation(this Task task, CancellationToken cancellationToken) 81 | { 82 | var tcs = new TaskCompletionSource(); 83 | using (cancellationToken.Register(s => ((TaskCompletionSource)s).TrySetResult(true), tcs)) 84 | { 85 | if (task != await Task.WhenAny(task, tcs.Task)) 86 | { 87 | throw new OperationCanceledException(cancellationToken); 88 | } 89 | } 90 | 91 | return await task; 92 | } 93 | #endif 94 | } -------------------------------------------------------------------------------- /src/PowerShellAssistant/PowerShellAssistant.psd1: -------------------------------------------------------------------------------- 1 | # 2 | # Module manifest for module 'PowerShellAssistant' 3 | # 4 | # Generated by: Justin Grote @JustinWGrote 5 | # 6 | # Generated on: 3/2/2023 7 | # 8 | 9 | @{ 10 | 11 | # Script module or binary module file associated with this manifest. 12 | RootModule = './PowerShellAssistant.psm1' 13 | 14 | # Version number of this module. 15 | ModuleVersion = '0.0.0' 16 | 17 | # Supported PSEditions 18 | # CompatiblePSEditions = @() 19 | 20 | # ID used to uniquely identify this module 21 | GUID = '0ed853f2-c5d7-4234-8a2e-7a1c9ebabc75' 22 | 23 | # Author of this module 24 | Author = 'Justin Grote @JustinWGrote' 25 | 26 | # Company or vendor of this module 27 | CompanyName = 'Unspecified' 28 | 29 | # Copyright statement for this module 30 | Copyright = '©2023 Justin Grote @JustinWGrote. All Rights Reserved' 31 | 32 | # Description of the functionality provided by this module 33 | Description = 'Provides OpenAI and Github Copilot enabled features such as a Shell chat interface' 34 | 35 | # Minimum version of the PowerShell engine required by this module 36 | PowerShellVersion = '7.2.0' 37 | 38 | # Name of the PowerShell host required by this module 39 | # PowerShellHostName = '' 40 | 41 | # Minimum version of the PowerShell host required by this module 42 | # PowerShellHostVersion = '' 43 | 44 | # Minimum version of Microsoft .NET Framework required by this module. This prerequisite is valid for the PowerShell Desktop edition only. 45 | # DotNetFrameworkVersion = '' 46 | 47 | # Minimum version of the common language runtime (CLR) required by this module. This prerequisite is valid for the PowerShell Desktop edition only. 48 | # ClrVersion = '' 49 | 50 | # Processor architecture (None, X86, Amd64) required by this module 51 | # ProcessorArchitecture = '' 52 | 53 | # Modules that must be imported into the global environment prior to importing this module 54 | # RequiredModules = @() 55 | 56 | # Assemblies that must be loaded prior to importing this module 57 | # RequiredAssemblies = @() 58 | 59 | # Script files (.ps1) that are run in the caller's environment prior to importing this module. 60 | # ScriptsToProcess = @() 61 | 62 | # Type files (.ps1xml) to be loaded when importing this module 63 | # TypesToProcess = @() 64 | 65 | # Format files (.ps1xml) to be loaded when importing this module 66 | # FormatsToProcess = @() 67 | 68 | # Modules to import as nested modules of the module specified in RootModule/ModuleToProcess 69 | # NestedModules = @() 70 | 71 | # Functions to export from this module, for best performance, do not use wildcards and do not delete the entry, use an empty array if there are no functions to export. 72 | FunctionsToExport = '*' 73 | 74 | # Cmdlets to export from this module, for best performance, do not use wildcards and do not delete the entry, use an empty array if there are no cmdlets to export. 75 | CmdletsToExport = '*' 76 | 77 | # Variables to export from this module 78 | VariablesToExport = '*' 79 | 80 | # Aliases to export from this module, for best performance, do not use wildcards and do not delete the entry, use an empty array if there are no aliases to export. 81 | AliasesToExport = '*' 82 | 83 | # DSC resources to export from this module 84 | # DscResourcesToExport = @() 85 | 86 | # List of all modules packaged with this module 87 | # ModuleList = @() 88 | 89 | # List of all files packaged with this module 90 | # FileList = @() 91 | 92 | # Private data to pass to the module specified in RootModule/ModuleToProcess. This may also contain a PSData hashtable with additional module metadata used by PowerShell. 93 | PrivateData = @{ 94 | 95 | PSData = @{ 96 | 97 | # Tags applied to this module. These help with module discovery in online galleries. 98 | Tags = @('OpenAI', 'AI', 'ChatGPT', 'GPT', 'Copilot', 'GitHub') 99 | 100 | # A URL to the license for this module. 101 | LicenseUri = 'https://github.com/JustinGrote/PowerShellAssistant/blob/main/LICENSE' 102 | 103 | # A URL to the main website for this project. 104 | # ProjectUri = 'https://github.com/JustinGrote/PowerShellAssistant' 105 | 106 | # A URL to an icon representing this module. 107 | # IconUri = '' 108 | 109 | # ReleaseNotes of this module 110 | # ReleaseNotes = '' 111 | 112 | # Prerelease string of this module 113 | Prerelease = 'Source' 114 | 115 | # Flag to indicate whether the module requires explicit user acceptance for install/update/save 116 | # RequireLicenseAcceptance = $false 117 | 118 | # External dependent modules of this module 119 | # ExternalModuleDependencies = @() 120 | 121 | } # End of PSData hashtable 122 | 123 | } # End of PrivateData hashtable 124 | 125 | # HelpInfo URI of this module 126 | # HelpInfoURI = '' 127 | 128 | # Default prefix for commands exported from this module. Override the default prefix using Import-Module -Prefix. 129 | # DefaultCommandPrefix = '' 130 | 131 | } 132 | 133 | -------------------------------------------------------------------------------- /src/OpenAI.Client/Chat.cs: -------------------------------------------------------------------------------- 1 | 2 | using System.Collections.ObjectModel; 3 | using System.Runtime.CompilerServices; 4 | using System.Text.Json; 5 | using System.Text.Json.Serialization; 6 | using OpenAI.Extensions; 7 | 8 | namespace OpenAI; 9 | 10 | /// 11 | /// Combines a chat request and response into a single object to provide context for conversations. 12 | /// 13 | public record ChatConversation 14 | { 15 | public CreateChatCompletionRequest Request { get; set; } 16 | public CreateChatCompletionResponse Response { get; set; } 17 | 18 | public ChatConversation() 19 | { 20 | Request = new(); 21 | Response = new(); 22 | } 23 | } 24 | 25 | public class CreateChatCompletionChunkedResponse 26 | { 27 | [JsonPropertyName("id")] 28 | [JsonIgnore(Condition = JsonIgnoreCondition.Never)] 29 | [System.ComponentModel.DataAnnotations.Required(AllowEmptyStrings = true)] 30 | public string Id { get; set; } = default!; 31 | 32 | [JsonPropertyName("object")] 33 | [JsonIgnore(Condition = JsonIgnoreCondition.Never)] 34 | [System.ComponentModel.DataAnnotations.Required(AllowEmptyStrings = true)] 35 | public string Object { get; set; } = default!; 36 | 37 | [JsonPropertyName("created")] 38 | [JsonIgnore(Condition = JsonIgnoreCondition.Never)] 39 | public int Created { get; set; } = default!; 40 | 41 | [JsonPropertyName("model")] 42 | [JsonIgnore(Condition = JsonIgnoreCondition.Never)] 43 | [System.ComponentModel.DataAnnotations.Required(AllowEmptyStrings = true)] 44 | public string Model { get; set; } = default!; 45 | 46 | [JsonPropertyName("choices")] 47 | [JsonIgnore(Condition = JsonIgnoreCondition.Never)] 48 | [System.ComponentModel.DataAnnotations.Required] 49 | public ICollection Choices { get; set; } 50 | } 51 | 52 | public class DeltaChoice 53 | { 54 | [JsonPropertyName("index")] 55 | [JsonIgnore(Condition = JsonIgnoreCondition.Never)] 56 | [System.ComponentModel.DataAnnotations.Required(AllowEmptyStrings = true)] 57 | public int? Index { get; set; } 58 | 59 | public ChatCompletionResponseMessage? Message { get; set; } 60 | 61 | [JsonPropertyName("finish_reason")] 62 | 63 | [JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)] 64 | public string? Finish_reason { get; set; } 65 | 66 | [JsonPropertyName("delta")] 67 | [JsonIgnore(Condition = JsonIgnoreCondition.Never)] 68 | public DeltaContent? Delta { get; set; } 69 | } 70 | 71 | public class DeltaContent 72 | { 73 | [JsonPropertyName("role")] 74 | [JsonConverter(typeof(JsonStringEnumConverter))] 75 | [JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)] 76 | public ChatCompletionResponseMessageRole? Role { get; set; } 77 | 78 | [JsonPropertyName("content")] 79 | [JsonIgnore(Condition = JsonIgnoreCondition.Never)] 80 | [System.ComponentModel.DataAnnotations.Required(AllowEmptyStrings = true)] 81 | public string? Content { get; set; } 82 | } 83 | 84 | /// 85 | /// Unifying interface for the various chat messages. Actual interface cannot be used due to enums 86 | /// 87 | public record ChatMessage 88 | { 89 | public ChatMessageRole Role; 90 | public string Content = string.Empty; 91 | 92 | // TODO: There must be a more generic way to implement this than explicit constructors 93 | public ChatMessage(ChatCompletionResponseMessage message) 94 | { 95 | Role = Enum.Parse(message.Role.ToString()); 96 | Content = message.Content; 97 | } 98 | 99 | public ChatMessage(ChatCompletionRequestMessage message) 100 | { 101 | Role = Enum.Parse(message.Role.ToString()); 102 | Content = message.Content; 103 | } 104 | } 105 | 106 | public enum ChatMessageRole 107 | { 108 | [System.Runtime.Serialization.EnumMember(Value = "system")] 109 | System = 0, 110 | 111 | [System.Runtime.Serialization.EnumMember(Value = "user")] 112 | User = 1, 113 | 114 | [System.Runtime.Serialization.EnumMember(Value = "assistant")] 115 | Assistant = 2, 116 | } 117 | 118 | public partial class Client 119 | { 120 | public IEnumerable CreateChatCompletionAsStream(CreateChatCompletionRequest request, CancellationToken cancellationToken = default) 121 | { 122 | return CreateChatCompletionAsStreamAsync(request, cancellationToken).ToBlockingEnumerable(cancellationToken); 123 | } 124 | 125 | public async IAsyncEnumerable CreateChatCompletionAsStreamAsync(CreateChatCompletionRequest request, [EnumeratorCancellation] CancellationToken cancellationToken = default) 126 | { 127 | // Enable streaming if it is not already enabled 128 | request.Stream = true; 129 | 130 | var urlBuilder = new System.Text.StringBuilder(); 131 | urlBuilder.Append(BaseUrl != null ? BaseUrl.TrimEnd('/') : "").Append("/chat/completions"); 132 | 133 | using var response = await _httpClient.PostAsync(urlBuilder.ToString(), request, _settings.Value, cancellationToken); 134 | 135 | await using var stream = await response.Content.ReadAsStreamAsync(cancellationToken); 136 | using var reader = new StreamReader(stream); 137 | 138 | // Continuously read the stream until the end of it 139 | while (!reader.EndOfStream) 140 | { 141 | cancellationToken.ThrowIfCancellationRequested(); 142 | 143 | var line = await reader.ReadLineAsync(cancellationToken); 144 | // Skip empty lines 145 | if (string.IsNullOrEmpty(line)) 146 | { 147 | continue; 148 | } 149 | 150 | line = line.RemoveIfStartWith("data: "); 151 | 152 | // Exit the loop if the stream is done 153 | if (line.StartsWith("[DONE]")) 154 | { 155 | break; 156 | } 157 | 158 | CreateChatCompletionChunkedResponse? block; 159 | try 160 | { 161 | // When the response is good, each line is a serializable 162 | block = JsonSerializer.Deserialize(line); 163 | } 164 | catch 165 | { 166 | // When the API returns an error, it does not come back as a block, it returns a single character of text ("{"). 167 | // In this instance, read through the rest of the response, which should be a complete object to parse. 168 | line += await reader.ReadToEndAsync(cancellationToken); 169 | block = JsonSerializer.Deserialize(line); 170 | throw; 171 | } 172 | 173 | if (block is not null) 174 | { 175 | yield return block; 176 | } 177 | } 178 | } 179 | } -------------------------------------------------------------------------------- /src/OpenAI.Client/TemplateDirectory/Class.liquid: -------------------------------------------------------------------------------- 1 | {%- if HasDescription -%} 2 | /// 3 | /// {{ Description | csharpdocs }} 4 | /// 5 | {%- endif -%} 6 | {%- if HasDiscriminator -%} 7 | {%- if UseSystemTextJson -%} 8 | [JsonInheritanceConverter(typeof({{ ClassName }}), "{{ Discriminator }}")] 9 | {%- else -%} 10 | [Newtonsoft.Json.JsonConverter(typeof(JsonInheritanceConverter), "{{ Discriminator }}")] 11 | {%- endif -%} 12 | {%- for derivedClass in DerivedClasses -%} 13 | {%- if derivedClass.IsAbstract != true -%} 14 | [JsonInheritanceAttribute("{{ derivedClass.Discriminator }}", typeof({{ derivedClass.ClassName }}))] 15 | {%- endif -%} 16 | {%- endfor -%} 17 | {%- endif -%} 18 | [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "{{ ToolchainVersion }}")] 19 | {%- if InheritsExceptionSchema -%} 20 | {%- if UseSystemTextJson -%} 21 | // TODO(system.text.json): What to do here? 22 | {%- else -%} 23 | [Newtonsoft.Json.JsonObjectAttribute] 24 | {%- endif -%} 25 | {%- endif -%} 26 | {%- if IsDeprecated -%} 27 | [System.Obsolete{% if HasDeprecatedMessage %}({{ DeprecatedMessage | literal }}){% endif %}] 28 | {% endif -%} 29 | {%- template Class.Annotations -%} 30 | {{ TypeAccessModifier }} {% if IsAbstract %}abstract {% endif %}partial {% if GenerateNativeRecords %}record{% else %}class{% endif %} {{ClassName}} {%- template Class.Inheritance %} 31 | { 32 | {%- if IsTuple -%} 33 | public {{ ClassName }}({%- for tupleType in TupleTypes %}{{ tupleType }} item{{ forloop.index }}{%- if forloop.last == false %}, {% endif %}{% endfor %}) : base({%- for tupleType in TupleTypes %}item{{ forloop.index }}{%- if forloop.last == false %}, {% endif %}{% endfor %}) 34 | { 35 | } 36 | 37 | {%- endif -%} 38 | {%- if RenderInpc or RenderPrism -%} 39 | {%- for property in Properties -%} 40 | private {{ property.Type }} {{ property.FieldName }}{%- if property.HasDefaultValue %} = {{ property.DefaultValue }}{% elsif GenerateNullableReferenceTypes %} = default!{%- endif %}; 41 | {%- endfor -%} 42 | 43 | {%- endif -%} 44 | {%- template Class.Constructor -%} 45 | {%- if RenderRecord -%} 46 | {% template Class.Constructor.Record -%} 47 | {%- endif -%} 48 | {%- for property in Properties -%} 49 | {%- if property.HasDescription -%} 50 | /// 51 | /// {{ property.Description | csharpdocs }} 52 | /// 53 | {%- endif -%} 54 | {%- if UseSystemTextJson %} 55 | [System.Text.Json.Serialization.JsonPropertyName("{{ property.Name }}")] 56 | {%- if property.HasJsonIgnoreCondition %} 57 | [System.Text.Json.Serialization.JsonIgnore(Condition = {{ property.JsonIgnoreCondition }})] 58 | {%- endif -%} 59 | {%- if property.IsStringEnumArray %} 60 | // TODO(system.text.json): Add string enum item converter 61 | {%- endif -%} 62 | {%- else -%} 63 | [Newtonsoft.Json.JsonProperty("{{ property.Name }}", Required = {{ property.JsonPropertyRequiredCode }}{% if property.IsStringEnumArray %}, ItemConverterType = typeof(Newtonsoft.Json.Converters.StringEnumConverter){% endif %})] 64 | {%- endif -%} 65 | {%- if property.RenderRequiredAttribute -%} 66 | [System.ComponentModel.DataAnnotations.Required{% if property.AllowEmptyStrings %}(AllowEmptyStrings = true){% endif %}] 67 | {%- endif -%} 68 | {%- if property.RenderRangeAttribute -%} 69 | [System.ComponentModel.DataAnnotations.Range({{ property.RangeMinimumValue }}, {{ property.RangeMaximumValue }})] 70 | {%- endif -%} 71 | {%- if property.RenderStringLengthAttribute -%} 72 | [System.ComponentModel.DataAnnotations.StringLength({{ property.StringLengthMaximumValue }}{% if property.StringLengthMinimumValue > 0 %}, MinimumLength = {{ property.StringLengthMinimumValue }}{% endif %})] 73 | {%- endif -%} 74 | {%- if property.RenderMinLengthAttribute -%} 75 | [System.ComponentModel.DataAnnotations.MinLength({{ property.MinLengthAttribute }})] 76 | {%- endif -%} 77 | {%- if property.RenderMaxLengthAttribute -%} 78 | [System.ComponentModel.DataAnnotations.MaxLength({{ property.MaxLengthAttribute }})] 79 | {%- endif -%} 80 | {%- if property.RenderRegularExpressionAttribute -%} 81 | [System.ComponentModel.DataAnnotations.RegularExpression(@"{{ property.RegularExpressionValue }}")] 82 | {%- endif -%} 83 | {%- if property.IsDate and UseDateFormatConverter -%} 84 | {%- if UseSystemTextJson -%} 85 | [System.Text.Json.Serialization.JsonConverter(typeof(DateFormatConverter))] 86 | {%- else -%} 87 | [Newtonsoft.Json.JsonConverter(typeof(DateFormatConverter))] 88 | {%- endif -%} 89 | {%- endif -%} 90 | {%- if property.IsDeprecated -%} 91 | [System.Obsolete{% if property.HasDeprecatedMessage %}({{ property.DeprecatedMessage | literal }}){% endif %}] 92 | {%- endif -%} 93 | {%- template Class.Property.Annotations -%} 94 | public {{ property.Type }} {{ property.PropertyName }}{% if RenderInpc == false and RenderPrism == false %} { get; {% if property.HasSetter and RenderRecord == false %}set; {% elsif RenderRecord and GenerateNativeRecords %}init; {% endif %}}{% if property.HasDefaultValue and RenderRecord == false %} = {{ property.DefaultValue }};{% elsif GenerateNullableReferenceTypes and RenderRecord == false %} = default!;{% endif %} 95 | {% else %} 96 | { 97 | get { return {{ property.FieldName }}; } 98 | 99 | {%- if property.HasSetter -%} 100 | {%- if RenderInpc -%} 101 | {{PropertySetterAccessModifier}}set 102 | { 103 | if ({{ property.FieldName }} != value) 104 | { 105 | {{ property.FieldName }} = value; 106 | RaisePropertyChanged(); 107 | } 108 | } 109 | {%- else -%} 110 | {{PropertySetterAccessModifier}}set { SetProperty(ref {{ property.FieldName }}, value); } 111 | {%- endif -%} 112 | {%- endif -%} 113 | } 114 | {%- endif %} 115 | {%- endfor -%} 116 | 117 | {%- if GenerateAdditionalPropertiesProperty -%} 118 | 119 | private System.Collections.Generic.IDictionary{% if GenerateNullableReferenceTypes %}?{% endif %} _additionalProperties; 120 | 121 | {%- if UseSystemTextJson -%} 122 | [System.Text.Json.Serialization.JsonExtensionData] 123 | {%- else -%} 124 | [Newtonsoft.Json.JsonExtensionData] 125 | {%- endif -%} 126 | public System.Collections.Generic.IDictionary AdditionalProperties 127 | { 128 | get { return _additionalProperties ?? (_additionalProperties = new System.Collections.Generic.Dictionary()); } 129 | {{PropertySetterAccessModifier}}set { _additionalProperties = value; } 130 | } 131 | 132 | {%- endif -%} 133 | {%- if GenerateJsonMethods -%} 134 | {% template Class.ToJson %} 135 | {% template Class.FromJson %} 136 | 137 | {%- endif -%} 138 | {%- if RenderInpc -%} 139 | {% template Class.Inpc %} 140 | {%- endif -%} 141 | {% template Class.Body %} 142 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ## Ignore Visual Studio temporary files, build results, and 2 | ## files generated by popular Visual Studio add-ons. 3 | ## 4 | ## Get latest from https://github.com/github/gitignore/blob/main/VisualStudio.gitignore 5 | 6 | # User-specific files 7 | *.rsuser 8 | *.suo 9 | *.user 10 | *.userosscache 11 | *.sln.docstates 12 | 13 | # User-specific files (MonoDevelop/Xamarin Studio) 14 | *.userprefs 15 | 16 | # Mono auto generated files 17 | mono_crash.* 18 | 19 | # Build results 20 | [Dd]ebug/ 21 | [Dd]ebugPublic/ 22 | [Rr]elease/ 23 | [Rr]eleases/ 24 | x64/ 25 | x86/ 26 | [Ww][Ii][Nn]32/ 27 | [Aa][Rr][Mm]/ 28 | [Aa][Rr][Mm]64/ 29 | bld/ 30 | [Bb]in/ 31 | [Oo]bj/ 32 | [Ll]og/ 33 | [Ll]ogs/ 34 | 35 | # Visual Studio 2015/2017 cache/options directory 36 | .vs/ 37 | # Uncomment if you have tasks that create the project's static files in wwwroot 38 | #wwwroot/ 39 | 40 | # Visual Studio 2017 auto generated files 41 | Generated\ Files/ 42 | 43 | # MSTest test Results 44 | [Tt]est[Rr]esult*/ 45 | [Bb]uild[Ll]og.* 46 | 47 | # NUnit 48 | *.VisualState.xml 49 | TestResult.xml 50 | nunit-*.xml 51 | 52 | # Build Results of an ATL Project 53 | [Dd]ebugPS/ 54 | [Rr]eleasePS/ 55 | dlldata.c 56 | 57 | # Benchmark Results 58 | BenchmarkDotNet.Artifacts/ 59 | 60 | # .NET 61 | project.lock.json 62 | project.fragment.lock.json 63 | artifacts/ 64 | 65 | # Tye 66 | .tye/ 67 | 68 | # ASP.NET Scaffolding 69 | ScaffoldingReadMe.txt 70 | 71 | # StyleCop 72 | StyleCopReport.xml 73 | 74 | # Files built by Visual Studio 75 | *_i.c 76 | *_p.c 77 | *_h.h 78 | *.ilk 79 | *.meta 80 | *.obj 81 | *.iobj 82 | *.pch 83 | *.pdb 84 | *.ipdb 85 | *.pgc 86 | *.pgd 87 | *.rsp 88 | *.sbr 89 | *.tlb 90 | *.tli 91 | *.tlh 92 | *.tmp 93 | *.tmp_proj 94 | *_wpftmp.csproj 95 | *.log 96 | *.tlog 97 | *.vspscc 98 | *.vssscc 99 | .builds 100 | *.pidb 101 | *.svclog 102 | *.scc 103 | 104 | # Chutzpah Test files 105 | _Chutzpah* 106 | 107 | # Visual C++ cache files 108 | ipch/ 109 | *.aps 110 | *.ncb 111 | *.opendb 112 | *.opensdf 113 | *.sdf 114 | *.cachefile 115 | *.VC.db 116 | *.VC.VC.opendb 117 | 118 | # Visual Studio profiler 119 | *.psess 120 | *.vsp 121 | *.vspx 122 | *.sap 123 | 124 | # Visual Studio Trace Files 125 | *.e2e 126 | 127 | # TFS 2012 Local Workspace 128 | $tf/ 129 | 130 | # Guidance Automation Toolkit 131 | *.gpState 132 | 133 | # ReSharper is a .NET coding add-in 134 | _ReSharper*/ 135 | *.[Rr]e[Ss]harper 136 | *.DotSettings.user 137 | 138 | # TeamCity is a build add-in 139 | _TeamCity* 140 | 141 | # DotCover is a Code Coverage Tool 142 | *.dotCover 143 | 144 | # AxoCover is a Code Coverage Tool 145 | .axoCover/* 146 | !.axoCover/settings.json 147 | 148 | # Coverlet is a free, cross platform Code Coverage Tool 149 | coverage*.json 150 | coverage*.xml 151 | coverage*.info 152 | 153 | # Visual Studio code coverage results 154 | *.coverage 155 | *.coveragexml 156 | 157 | # NCrunch 158 | _NCrunch_* 159 | .*crunch*.local.xml 160 | nCrunchTemp_* 161 | 162 | # MightyMoose 163 | *.mm.* 164 | AutoTest.Net/ 165 | 166 | # Web workbench (sass) 167 | .sass-cache/ 168 | 169 | # Installshield output folder 170 | [Ee]xpress/ 171 | 172 | # DocProject is a documentation generator add-in 173 | DocProject/buildhelp/ 174 | DocProject/Help/*.HxT 175 | DocProject/Help/*.HxC 176 | DocProject/Help/*.hhc 177 | DocProject/Help/*.hhk 178 | DocProject/Help/*.hhp 179 | DocProject/Help/Html2 180 | DocProject/Help/html 181 | 182 | # Click-Once directory 183 | publish/ 184 | 185 | # Publish Web Output 186 | *.[Pp]ublish.xml 187 | *.azurePubxml 188 | # Note: Comment the next line if you want to checkin your web deploy settings, 189 | # but database connection strings (with potential passwords) will be unencrypted 190 | *.pubxml 191 | *.publishproj 192 | 193 | # Microsoft Azure Web App publish settings. Comment the next line if you want to 194 | # checkin your Azure Web App publish settings, but sensitive information contained 195 | # in these scripts will be unencrypted 196 | PublishScripts/ 197 | 198 | # NuGet Packages 199 | *.nupkg 200 | # NuGet Symbol Packages 201 | *.snupkg 202 | # The packages folder can be ignored because of Package Restore 203 | **/[Pp]ackages/* 204 | # except build/, which is used as an MSBuild target. 205 | !**/[Pp]ackages/build/ 206 | # Uncomment if necessary however generally it will be regenerated when needed 207 | #!**/[Pp]ackages/repositories.config 208 | # NuGet v3's project.json files produces more ignorable files 209 | *.nuget.props 210 | *.nuget.targets 211 | 212 | # Microsoft Azure Build Output 213 | csx/ 214 | *.build.csdef 215 | 216 | # Microsoft Azure Emulator 217 | ecf/ 218 | rcf/ 219 | 220 | # Windows Store app package directories and files 221 | AppPackages/ 222 | BundleArtifacts/ 223 | Package.StoreAssociation.xml 224 | _pkginfo.txt 225 | *.appx 226 | *.appxbundle 227 | *.appxupload 228 | 229 | # Visual Studio cache files 230 | # files ending in .cache can be ignored 231 | *.[Cc]ache 232 | # but keep track of directories ending in .cache 233 | !?*.[Cc]ache/ 234 | 235 | # Others 236 | ClientBin/ 237 | ~$* 238 | *~ 239 | *.dbmdl 240 | *.dbproj.schemaview 241 | *.jfm 242 | *.pfx 243 | *.publishsettings 244 | orleans.codegen.cs 245 | 246 | # Including strong name files can present a security risk 247 | # (https://github.com/github/gitignore/pull/2483#issue-259490424) 248 | #*.snk 249 | 250 | # Since there are multiple workflows, uncomment next line to ignore bower_components 251 | # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622) 252 | #bower_components/ 253 | 254 | # RIA/Silverlight projects 255 | Generated_Code/ 256 | 257 | # Backup & report files from converting an old project file 258 | # to a newer Visual Studio version. Backup files are not needed, 259 | # because we have git ;-) 260 | _UpgradeReport_Files/ 261 | Backup*/ 262 | UpgradeLog*.XML 263 | UpgradeLog*.htm 264 | ServiceFabricBackup/ 265 | *.rptproj.bak 266 | 267 | # SQL Server files 268 | *.mdf 269 | *.ldf 270 | *.ndf 271 | 272 | # Business Intelligence projects 273 | *.rdl.data 274 | *.bim.layout 275 | *.bim_*.settings 276 | *.rptproj.rsuser 277 | *- [Bb]ackup.rdl 278 | *- [Bb]ackup ([0-9]).rdl 279 | *- [Bb]ackup ([0-9][0-9]).rdl 280 | 281 | # Microsoft Fakes 282 | FakesAssemblies/ 283 | 284 | # GhostDoc plugin setting file 285 | *.GhostDoc.xml 286 | 287 | # Node.js Tools for Visual Studio 288 | .ntvs_analysis.dat 289 | node_modules/ 290 | 291 | # Visual Studio 6 build log 292 | *.plg 293 | 294 | # Visual Studio 6 workspace options file 295 | *.opt 296 | 297 | # Visual Studio 6 auto-generated workspace file (contains which files were open etc.) 298 | *.vbw 299 | 300 | # Visual Studio 6 auto-generated project file (contains which files were open etc.) 301 | *.vbp 302 | 303 | # Visual Studio 6 workspace and project file (working project files containing files to include in project) 304 | *.dsw 305 | *.dsp 306 | 307 | # Visual Studio 6 technical files 308 | *.ncb 309 | *.aps 310 | 311 | # Visual Studio LightSwitch build output 312 | **/*.HTMLClient/GeneratedArtifacts 313 | **/*.DesktopClient/GeneratedArtifacts 314 | **/*.DesktopClient/ModelManifest.xml 315 | **/*.Server/GeneratedArtifacts 316 | **/*.Server/ModelManifest.xml 317 | _Pvt_Extensions 318 | 319 | # Paket dependency manager 320 | .paket/paket.exe 321 | paket-files/ 322 | 323 | # FAKE - F# Make 324 | .fake/ 325 | 326 | # CodeRush personal settings 327 | .cr/personal 328 | 329 | # Python Tools for Visual Studio (PTVS) 330 | __pycache__/ 331 | *.pyc 332 | 333 | # Cake - Uncomment if you are using it 334 | # tools/** 335 | # !tools/packages.config 336 | 337 | # Tabs Studio 338 | *.tss 339 | 340 | # Telerik's JustMock configuration file 341 | *.jmconfig 342 | 343 | # BizTalk build output 344 | *.btp.cs 345 | *.btm.cs 346 | *.odx.cs 347 | *.xsd.cs 348 | 349 | # OpenCover UI analysis results 350 | OpenCover/ 351 | 352 | # Azure Stream Analytics local run output 353 | ASALocalRun/ 354 | 355 | # MSBuild Binary and Structured Log 356 | *.binlog 357 | 358 | # NVidia Nsight GPU debugger configuration file 359 | *.nvuser 360 | 361 | # MFractors (Xamarin productivity tool) working folder 362 | .mfractor/ 363 | 364 | # Local History for Visual Studio 365 | .localhistory/ 366 | 367 | # Visual Studio History (VSHistory) files 368 | .vshistory/ 369 | 370 | # BeatPulse healthcheck temp database 371 | healthchecksdb 372 | 373 | # Backup folder for Package Reference Convert tool in Visual Studio 2017 374 | MigrationBackup/ 375 | 376 | # Ionide (cross platform F# VS Code tools) working folder 377 | .ionide/ 378 | 379 | # Fody - auto-generated XML schema 380 | FodyWeavers.xsd 381 | 382 | # VS Code files for those working on multiple tools 383 | .vscode/* 384 | !.vscode/settings.json 385 | !.vscode/tasks.json 386 | !.vscode/launch.json 387 | !.vscode/extensions.json 388 | *.code-workspace 389 | 390 | # Local History for Visual Studio Code 391 | .history/ 392 | 393 | # Windows Installer files from build outputs 394 | *.cab 395 | *.msi 396 | *.msix 397 | *.msm 398 | *.msp 399 | 400 | # JetBrains Rider 401 | *.sln.iml 402 | 403 | ## 404 | ## Visual studio for Mac 405 | ## 406 | 407 | 408 | # globs 409 | Makefile.in 410 | *.userprefs 411 | *.usertasks 412 | config.make 413 | config.status 414 | aclocal.m4 415 | install-sh 416 | autom4te.cache/ 417 | *.tar.gz 418 | tarballs/ 419 | test-results/ 420 | 421 | # Mac bundle stuff 422 | *.dmg 423 | *.app 424 | 425 | # content below from: https://github.com/github/gitignore/blob/master/Global/macOS.gitignore 426 | # General 427 | .DS_Store 428 | .AppleDouble 429 | .LSOverride 430 | 431 | # Icon must end with two \r 432 | Icon 433 | 434 | 435 | # Thumbnails 436 | ._* 437 | 438 | # Files that might appear in the root of a volume 439 | .DocumentRevisions-V100 440 | .fseventsd 441 | .Spotlight-V100 442 | .TemporaryItems 443 | .Trashes 444 | .VolumeIcon.icns 445 | .com.apple.timemachine.donotpresent 446 | 447 | # Directories potentially created on remote AFP share 448 | .AppleDB 449 | .AppleDesktop 450 | Network Trash Folder 451 | Temporary Items 452 | .apdisk 453 | 454 | # content below from: https://github.com/github/gitignore/blob/master/Global/Windows.gitignore 455 | # Windows thumbnail cache files 456 | Thumbs.db 457 | ehthumbs.db 458 | ehthumbs_vista.db 459 | 460 | # Dump file 461 | *.stackdump 462 | 463 | # Folder config file 464 | [Dd]esktop.ini 465 | 466 | # Recycle Bin used on file shares 467 | $RECYCLE.BIN/ 468 | 469 | # Windows Installer files 470 | *.cab 471 | *.msi 472 | *.msix 473 | *.msm 474 | *.msp 475 | 476 | # Windows shortcuts 477 | *.lnk 478 | 479 | **/dist -------------------------------------------------------------------------------- /src/PowerShellAssistant/PowerShellAssistant.psm1: -------------------------------------------------------------------------------- 1 | using namespace OpenAI 2 | using namespace System.Net.Http 3 | using namespace System.Net.Http.Headers 4 | using namespace System.Collections.Generic 5 | using namespace System.Management.Automation 6 | using namespace System.Reflection 7 | 8 | $ErrorActionPreference = 'Stop' 9 | #TODO: This should be better 10 | $debugBinPath = Join-Path $PSScriptRoot '/bin/Debug/net7.0' 11 | if (Test-Path $debugBinPath) { 12 | Write-Warning "Debug build detected. Using assemblies at $debugBinPath" 13 | Add-Type -Path $debugBinPath/*.dll 14 | } else { 15 | Add-Type -Path $PSScriptRoot/*.dll 16 | } 17 | 18 | #These are the cheapest models for testing, opt into more powerful models 19 | $SCRIPT:aiDefaultModel = 'ada' 20 | $SCRIPT:aiDefaultChatModel = 'gpt-3.5-turbo' 21 | $SCRIPT:aiDefaultCodeModel = 'code-davinci-002' 22 | 23 | 24 | #region Public 25 | function Connect-AI { 26 | [CmdletBinding()] 27 | param( 28 | # Provide your API Key as the password, and optionally your organization ID as the username 29 | [string]$APIKey, 30 | 31 | # By default, this uses the OpenAI API. Specify this if you want to use GitHub Copilot (UNSUPPORTED) 32 | [switch]$GitHubCopilot, 33 | 34 | # Don't set this client as the default client. You can pass the client to the various commands instead. Implies -PassThru 35 | [switch]$NoDefault, 36 | 37 | # Return the client for use in other commands 38 | [switch]$PassThru, 39 | 40 | #Replace the existing default client if it exists 41 | [switch]$Force 42 | ) 43 | if ($SCRIPT:aiClient -and (-not $NoDefault -and -not $Force)) { 44 | Write-Warning 'Already connected to an AI engine. You can use -NoDefault to not set this client as the default client, or -Force to replace the existing default client.' 45 | return 46 | } 47 | 48 | if (-not $APIKey -and $env:OPENAI_API_KEY) { 49 | Write-Verbose 'Using API key from environment variable OPENAI_API_KEY' 50 | $APIKey = $env:OPENAI_API_KEY 51 | } 52 | 53 | $client = New-AIClient @newAIClientParams -APIKey $APIKey -GithubCopilot:$GitHubCopilot 54 | 55 | if ($NoDefault) { 56 | $PassThru = $true 57 | } else { 58 | $SCRIPT:aiClient = $client 59 | } 60 | 61 | if ($PassThru) { 62 | return $client 63 | } 64 | } 65 | 66 | filter Get-AIModel { 67 | [OutputType([OpenAI.Model])] 68 | [CmdletBinding()] 69 | param( 70 | # The ID of the model to get. If not specified, returns all models. 71 | [Parameter(ValueFromPipeline)][string]$Id, 72 | [ValidateNotNullOrEmpty()][OpenAI.Client]$Client = $SCRIPT:aiClient 73 | ) 74 | if (-not $Client) { 75 | Assert-Connected 76 | $Client = $SCRIPT:aiClient 77 | } 78 | 79 | if ($Id) { 80 | return $Client.RetrieveModel($Id) 81 | } 82 | 83 | $Client.ListModels().Data 84 | } 85 | 86 | function Get-AIEngine { 87 | [OutputType([OpenAI.Engine])] 88 | [CmdletBinding()] 89 | param( 90 | [ValidateNotNullOrEmpty()][OpenAI.Client]$Client = $SCRIPT:aiClient 91 | ) 92 | Write-Warning 'Engines are deprecated. Use Get-AIModel instead.' 93 | if (-not $Client) { 94 | Assert-Connected 95 | $Client = $SCRIPT:aiClient 96 | } 97 | 98 | $Client.ListEngines() 99 | | ConvertFrom-ListResponse 100 | } 101 | 102 | function Get-AICompletion { 103 | [CmdletBinding()] 104 | [OutputType([OpenAI.CreateCompletionResponse])] 105 | param( 106 | [Parameter(Mandatory)]$Prompt, 107 | #The name of the model to use. 108 | [ValidateSet([AvailableModels])][String]$Model = $SCRIPT:aiDefaultModel, 109 | [ValidateNotNullOrEmpty()][OpenAI.Client]$Client = $SCRIPT:aiClient, 110 | [ValidateNotNullOrEmpty()][uint]$MaxTokens = 1000, 111 | [ValidateNotNullOrEmpty()][uint]$Temperature = 0 112 | ) 113 | if (-not $Client) { 114 | Assert-Connected 115 | $Client = $SCRIPT:aiClient 116 | } 117 | 118 | $request = [CreateCompletionRequest]@{ 119 | Prompt = $Prompt 120 | Stream = $false 121 | Model = $Model 122 | Max_tokens = $MaxTokens 123 | Temperature = $Temperature 124 | } 125 | $Client.CreateCompletion($request) 126 | } 127 | 128 | function Get-AICode { 129 | <# 130 | .SYNOPSIS 131 | Utilizes the Codex models to fetch a code completion given a prompt. 132 | .LINK 133 | https://platform.openai.com/docs/guides/code/introduction 134 | #> 135 | [OutputType([OpenAI.CreateCompletionResponse])] 136 | [CmdletBinding()] 137 | param( 138 | [string[]]$Prompt, 139 | #The name of the model to use. 140 | $Language = 'PowerShell 7', 141 | [ValidateSet([AvailableModels])][String]$Model = $SCRIPT:aiDefaultCodeModel, 142 | [ValidateNotNullOrEmpty()][OpenAI.Client]$Client = $SCRIPT:aiClient, 143 | [ValidateNotNullOrEmpty()][uint]$MaxTokens = 1000, 144 | [ValidateNotNullOrEmpty()][uint]$Temperature = 0 145 | ) 146 | if (-not $Client) { 147 | Assert-Connected 148 | $Client = $SCRIPT:aiClient 149 | } 150 | 151 | #Add a language specifier to the prompt 152 | $Prompt.Insert(0, "#$Language") 153 | 154 | Get-AICompletion -Prompt $Prompt -Model $Model -MaxTokens $MaxTokens -Temperature $Temperature 155 | } 156 | 157 | function Get-AIChat { 158 | [OutputType([OpenAI.ChatConversation])] 159 | [CmdletBinding(DefaultParameterSetName = 'Prompt')] 160 | param( 161 | #Include one or more prompts to start the conversation 162 | [Parameter(Mandatory, Position = 0, ValueFromPipeline, ParameterSetName = 'Prompt')] 163 | [Parameter(ParameterSetName = 'ChatSession')] 164 | [OpenAI.ChatCompletionRequestMessage[]]$Prompt, 165 | 166 | #Supply a previous chat session to add new responses to it 167 | [Parameter(Mandatory, ValueFromPipeline, ParameterSetName = 'ChatSession')] 168 | [Parameter(ParameterSetName = 'Prompt')] 169 | [OpenAI.ChatConversation]$ChatSession, 170 | 171 | #The name of the model to use. 172 | [ValidateSet([AvailableModels])] 173 | [String]$Model = $SCRIPT:aiDefaultChatModel, 174 | 175 | [ValidateNotNullOrEmpty()] 176 | [OpenAI.Client]$Client = $SCRIPT:aiClient, 177 | 178 | [ValidateNotNullOrEmpty()] 179 | [uint]$MaxTokens = 1000, 180 | 181 | [ValidateNotNullOrEmpty()] 182 | [uint]$Temperature = 0, 183 | 184 | #Stream the response. You will lose syntax highlighting and usage info. 185 | [switch]$Stream 186 | ) 187 | if (-not $Client) { 188 | Assert-Connected 189 | $Client = $SCRIPT:aiClient 190 | } 191 | 192 | $ChatSession ??= [ChatConversation]@{ 193 | Request = @{ 194 | Messages = [List[ChatCompletionRequestMessage]]@() 195 | Stream = $false 196 | Model = $Model 197 | Max_tokens = $MaxTokens 198 | Temperature = $Temperature 199 | } 200 | } 201 | 202 | #Append any response to the initial request. This is the continuation of a chat. 203 | $responseChoices = $ChatSession.Response.Choices 204 | $requestMessages = $ChatSession.Request.Messages 205 | if ($responseChoices.Count -gt 0) { 206 | if ($responseChoices.count -gt 1) { 207 | Write-Error 'The previous chat response contained more than one choice. Continuing a conversation with multiple choices is not supported.' -Category 'NotImplemented' 208 | return 209 | } 210 | $requestMessages.Add($responseChoices[0].Message) 211 | } 212 | 213 | foreach ($PromptItem in $Prompt) { 214 | $requestMessages.Add( 215 | $PromptItem 216 | ) 217 | } 218 | 219 | if ($Stream) { 220 | $Client.CreateChatCompletionAsStream($ChatSession.Request) 221 | | ForEach-Object { 222 | $PSItem 223 | } 224 | Write-Host 225 | return 226 | } 227 | 228 | $chatResponse = $Client.CreateChatCompletion($ChatSession.Request) 229 | $chatSession.Response = $chatResponse 230 | 231 | $price = Get-UsagePrice -Model $chatResponse.Model -Total $chatResponse.Usage.Total_tokens 232 | 233 | Write-Verbose "Chat usage - $($chatResponse.Usage) $($price ? "$price " : $null)for Id $($chatResponse.Id)" 234 | return $chatSession 235 | 236 | #Stream the response 237 | } 238 | #endregion Public 239 | 240 | #Region Private 241 | function New-AIClient { 242 | [OutputType([OpenAI.Client])] 243 | param( 244 | [string]$ApiKey, 245 | [Switch]$GithubCopilot 246 | ) 247 | 248 | if (-not $APIKey) { 249 | Write-Error 'You must supply an OpenAI API key via the -APIKey parameter or by setting the OPENAI_API_KEY variable' 250 | return 251 | } 252 | 253 | if ($SCRIPT:client -and -not $Force) { 254 | Write-Warning 'Assistant is already connected. Please use -Force to reset the client.' 255 | return 256 | } 257 | $httpClient = [HttpClient]::new() 258 | $httpClient.DefaultRequestHeaders.Authorization = [AuthenticationHeaderValue]::new('Bearer', $APIKey) 259 | 260 | $aiClient = [Client]::new($httpClient) 261 | 262 | if ($GitHubCopilot) { 263 | $aiClient.BaseUrl = 'https://copilot-proxy.githubusercontent.com' 264 | } 265 | 266 | return $aiClient 267 | } 268 | 269 | function Assert-Connected { 270 | if (-not $SCRIPT:aiClient) { 271 | Connect-AI 272 | } 273 | } 274 | 275 | #If the returned result was a list, return the actual data 276 | filter ConvertFrom-ListResponse { 277 | if ($PSItem.Object -ne 'list') { return } 278 | return $PSItem.Data 279 | } 280 | 281 | #endregion Private 282 | 283 | 284 | # function Connect-Copilot { 285 | # [CmdletBinding()] 286 | # param( 287 | # # Provide your Copilot API Key as the password, and optionally your organization ID as the username 288 | # [string]$Token, 289 | 290 | # #Reset if a client already exists 291 | # [Switch]$Force 292 | # ) 293 | # $ErrorActionPreference = 'Stop' 294 | 295 | # if ($SCRIPT:GHClient -and -not $Force) { 296 | # Write-Warning 'Copilot is already connected. Please use -Force to reset the client.' 297 | # return 298 | # } 299 | 300 | # if ($SCRIPT:GHCopilotToken -and -not $Force) { 301 | # Write-Warning 'GitHub Copilot is already connected. Please use -Force to reset the client.' 302 | # return 303 | # } 304 | 305 | # $SCRIPT:GHCopilotToken = if (-not $Token) { 306 | # #Try to autodiscover it from GitHub Copilot CLI 307 | # if (-not (Test-Path $HOME/.copilot-cli-access-token)) { 308 | # Write-Error "To use PowerShell Assistant with GitHub Copilot, you must install GitHub Copilot CLI and run 'github-copilot-cli auth' at least once to generate a Copilot Personal Access Token (PAT)" 309 | # return 310 | # } 311 | # Get-Content $HOME/.copilot-cli-access-token 312 | # } else { 313 | # $Token 314 | # } 315 | 316 | # $config = [OpenAIOptions]@{ 317 | # ApiKey = Update-GitHubCopilotToken $SCRIPT:GHCopilotToken 318 | # BaseDomain = 'https://copilot-proxy.githubusercontent.com' 319 | # DefaultEngineId = 'copilot-labs-codex' 320 | # } 321 | 322 | # $SCRIPT:GHClient = [OpenAIService]::new($config) 323 | # } 324 | 325 | # function Get-CopilotSuggestion { 326 | # [CmdletBinding()] 327 | # param( 328 | # [Parameter(Mandatory)][string]$prompt, 329 | # [ValidateNotNullOrEmpty()]$client = $SCRIPT:GHClient 330 | # ) 331 | 332 | # if (-not $SCRIPT:GHClient) { Connect-Copilot } 333 | # $request = [CompletionCreateRequest]@{ 334 | # N = 1 335 | # StopAsList = [string[]]@('---', '\n') 336 | # MaxTokens = 256 337 | # Temperature = 0 338 | # TopP = 1 339 | # Prompt = $prompt 340 | # Stream = $true 341 | # } 342 | # $resultStream = $client.Completions.CreateCompletionAsStream($request).GetAwaiter.GetResult() 343 | # foreach ($resultItem in $resultStream) { 344 | # Write-Host -NoNewline 'NEW TOKEN' 345 | # #This gives us intellisense in vscode 346 | # [CompletionCreateResponse]$result = $resultItem 347 | # if ($result.Error) { 348 | # Write-Error $result.Error 349 | # return 350 | # } 351 | # $token = $result.Choices[0].Text 352 | # Write-Host -NoNewline -fore DarkGray $token 353 | # } 354 | # Write-Host 'DONE' 355 | # } 356 | 357 | 358 | # function Assert-Connected { 359 | # if (-not $SCRIPT:client) { Connect-Assistant } 360 | # } 361 | 362 | function Update-GitHubCopilotToken { 363 | <# 364 | .SYNOPSIS 365 | Fetches the latest token for GitHub Copilot 366 | #> 367 | param( 368 | [ValidateNotNullOrEmpty()] 369 | $GitHubToken = $SCRIPT:GHCopilotToken 370 | ) 371 | $ErrorActionPreference = 'Stop' 372 | $response = Invoke-RestMethod 'https://api.github.com/copilot_internal/v2/token' -Headers @{ 373 | Authorization = "token $($GitHubToken.trim())" 374 | } 375 | return $response.token 376 | } 377 | 378 | function Get-Chat { 379 | <# 380 | .SYNOPSIS 381 | Provides an interactive assistant for PowerShell. Mostly a frontend to Get-AIChat 382 | #> 383 | [CmdletBinding()] 384 | param( 385 | #Provide a chat prompt to initiate the conversation 386 | [string[]]$chatPrompt, 387 | 388 | #If you just want the result and don't want to be prompted for further replies, specify this 389 | [Switch]$NoReply, 390 | 391 | #By default, the latest code recommendation is copied to your clipboard, specify this to disable the behavior 392 | [switch]$NoClipboard, 393 | 394 | [ValidateNotNullOrEmpty()] 395 | #Specify a prompt that guides Chat how to behave. By default, it is told to prefer PowerShell as a language. 396 | [string]$SystemPrompt = 'PowerShell syntax and be brief', 397 | 398 | #Maximum tokens to generate. Defaults to 500 to minimize accidental API billing 399 | [ValidateNotNullOrEmpty()] 400 | [uint]$MaxTokens = 500, 401 | 402 | [ValidateSet([AvailableModels])] 403 | [string]$Model 404 | ) 405 | 406 | begin { 407 | $ErrorActionPreference = 'Stop' 408 | Assert-Connected 409 | [List[ChatCompletionRequestMessage]]$chatHistory = @( 410 | [ChatCompletionRequestMessage]@{ 411 | Role = [ChatCompletionRequestMessageRole]::System 412 | Content = $SystemPrompt 413 | } 414 | ) 415 | } 416 | 417 | process { 418 | do { 419 | $chatPrompt ??= Read-Host -Prompt 'You' 420 | foreach ($promptItem in $chatPrompt) { 421 | $chatHistory.Add( 422 | ([ChatCompletionRequestMessage]$promptItem) 423 | ) 424 | } 425 | 426 | $chatParams = @{ 427 | Prompt = $chatHistory 428 | MaxTokens = $MaxTokens 429 | Stream = $true 430 | } 431 | if ($Model) { $chatParams.Model = $Model } 432 | 433 | [List[CreateChatCompletionChunkedResponse]]$streamedResponse = @() 434 | [Text.StringBuilder]$chatStream = '' 435 | 436 | Get-AIChat @chatParams 437 | | ForEach-Object { 438 | [CreateChatCompletionChunkedResponse]$response = $PSItem 439 | $streamedResponse.Add($response) 440 | 441 | [DeltaChoice]$firstChoice = $response.Choices[0] 442 | [string]$firstChoiceContent = $firstChoice.Delta.Content 443 | [void]$chatStream.Append($firstChoiceContent) 444 | 445 | $markdownCodeFenceRegex = '```(?\w+)?\s*(?[\s\S]*?)```' 446 | 447 | #Start recording if a code block occurs, and if it does, reformat it and copy it to clipboard 448 | #TODO: This could maybe be faster by watching the stream for the starting and trailing backticks 449 | if ($chatStream -match $markdownCodeFenceRegex) { 450 | $codeblock = $matches[0] 451 | $code = $matches.code 452 | $lang = $matches.lang 453 | $codeBlockLineCount = ($codeBlock -split '\r?\n').Count 454 | 455 | #Use ANSI Codes Move the cursor up to the start of the code block to overwrite it 456 | Write-Host -NoNewline "`e[${codeBlockLineCount}F" 457 | Write-Host -NoNewline "`e[0J" 458 | 459 | $formattedCodeBlock = [Environment]::NewLine + 460 | $PSStyle.Reverse + 461 | $code + 462 | $PSStyle.Reset + 463 | [Environment]::NewLine 464 | 465 | Write-Host -ForegroundColor DarkGray -NoNewline $formattedCodeBlock 466 | 467 | #Update the stringbuilder 468 | #TODO: Add the start index which generally should not be necessary but probably smart 469 | [void]$chatStream.Replace($codeBlock, $formattedCodeBlock) 470 | } else { 471 | Write-Host -ForegroundColor DarkGray -NoNewline $firstChoice.Delta.Content 472 | } 473 | 474 | if ($firstChoice.Finish_reason -eq 'length') { 475 | Write-Host -ForegroundColor $PSStyle.Formatting.Warning '[END]' 476 | Write-Warning "Response truncated due to length. Consider setting -MaxTokens greater than $MaxTokens" 477 | } 478 | } 479 | 480 | $message = [ChatCompletionRequestMessage]::new( 481 | [string]::Concat($streamedResponse.Choices.Delta.Content), 482 | [ChatCompletionRequestMessageRole]::Assistant 483 | ) 484 | 485 | $chatHistory.Add($message) 486 | 487 | if (-not $NoClipboard) { 488 | $message.Content 489 | | Convert-ChatCodeToClipboard 490 | | Out-Null 491 | } 492 | 493 | #TODO: Move this into the formatter 494 | # switch ($aiResponse.FinishReason) { 495 | # 'stop' {} #This is the normal response 496 | # 'length' { 497 | # Write-Warning "$MaxTokens tokens reached. Consider increasing the value of -MaxTokens for longer responses." 498 | # } 499 | # $null { 500 | # Write-Debug 'Null FinishReason received. This seems to occur on occasion and may or may not be a bug.' 501 | # } 502 | # default { 503 | # Write-Warning "Chat response finished abruply due to: $($aiResponse.FinishReason)" 504 | # } 505 | # } 506 | 507 | $chatPrompt = $null 508 | if (-not $NoReply) { 509 | Write-Host -Fore Cyan '' 510 | } 511 | } while ( 512 | -not $NoReply 513 | ) 514 | } 515 | } 516 | 517 | filter Convert-ChatCodeToClipboard { 518 | <# 519 | .SYNOPSIS 520 | Given a string, take the last occurance of text surrounded by a fenced code block, and copy it to the clipboard. 521 | It will also pass through the string for further filtering 522 | #> 523 | $fencedCodeBlockRegex = '(?s)```[\r|\n|powershell]+(.+?)```' 524 | $matchResult = $PSItem -match $fencedCodeBlockRegex 525 | $savedMatches = $matches 526 | $cbMatch = $savedMatches.($savedMatches.Keys | Sort-Object | Select-Object -Last 1) 527 | if (-not $matchResult) { 528 | Write-Debug 'No code block detected, skipping this step' 529 | return $PSItem 530 | } 531 | 532 | Write-Verbose "Copying last suggested code block to clipboard:`n$cbMatch" 533 | Set-Clipboard -Value $cbMatch 534 | 535 | return $PSItem 536 | } 537 | 538 | class AvailableModels : IValidateSetValuesGenerator { 539 | [String[]] GetValidValues() { 540 | trap { Write-Host ''; Write-Host -NoNewline -ForegroundColor Red "Validation Error: $PSItem" } 541 | $models = Get-AIModel 542 | return $models.Id 543 | } 544 | } 545 | 546 | filter Format-ChatCode { 547 | <# 548 | .SYNOPSIS 549 | Given a string, for any occurance of text surrounded by backticks, replace the backticks with ANSI escape codes 550 | #> 551 | $codeBlockRegex = '(?s)```[\r|\n|powershell]+(.+?)```' 552 | $codeSnippetRegex = '(?s)`(.+?)`' 553 | $boldSelectedText = ($PSStyle.Italic + '$1' + $PSStyle.ItalicOff) 554 | $PSItem -replace $codeBlockRegex, $boldSelectedText -replace $codeSnippetRegex, $boldSelectedText 555 | } 556 | 557 | filter Format-ChatMessage { 558 | param( 559 | [Parameter(ValueFromPipeline)]$message, 560 | #Notes that the content should be streamed rather than returned line by line 561 | [switch]$Stream 562 | ) 563 | 564 | $role = $message.Role 565 | $content = $message.Content 566 | 567 | $roleColor = switch ($role) { 568 | 'System' { 'DarkYellow' } 569 | 'Assistant' { 'Green' } 570 | 'User' { 'DarkCyan' } 571 | default { 'DarkGray' } 572 | } 573 | 574 | if ($Stream) { 575 | if ($role) { 576 | return "$($PSStyle.Foreground.$roleColor)$role`:$($PSStyle.Reset) " 577 | } elseif ($content) { 578 | return "$($PSStyle.Foreground.BrightBlack)$content$($PSStyle.Reset)" 579 | } else { 580 | #Blank entry, we might want to throw here just in case tho it is technically allowed. 581 | return 582 | } 583 | } 584 | 585 | $formattedMessage = $content.Trim() | Format-ChatCode 586 | return "$($PSStyle.Foreground.$roleColor)$role`:$($PSStyle.Reset) $($PSStyle.ForeGround.BrightBlack)$formattedMessage" 587 | } 588 | 589 | function Format-CreateChatCompletionChunkedResponse { 590 | param( 591 | [Parameter(ValueFromPipeline)][CreateChatCompletionChunkedResponse]$response 592 | ) 593 | Format-ChatMessage -Stream $response.Choices[0].Delta 594 | } 595 | 596 | function Format-Choices2 { 597 | [AssemblyMetadata('Format-Custom', 'Choices2')] 598 | param( 599 | [Choices2]$choice 600 | ) 601 | $PSStyle.Foreground.BrightCyan + 602 | "Choice $([int]$choice.Index + 1): " + 603 | (Format-ChatMessage $choice.Message) 604 | } 605 | 606 | 607 | filter Format-CreateChatCompletionRequest { 608 | [AssemblyMetadata('Format-Custom', 'OpenAI.CreateChatCompletionRequest')] 609 | param( 610 | [Parameter(ValueFromPipeline)][CreateChatCompletionRequest]$request 611 | ) 612 | $request.messages | Format-ChatMessage 613 | } 614 | filter Format-CreateChatCompletionResponse { 615 | [AssemblyMetadata('Format-Custom', 'OpenAI.CreateChatCompletionResponse')] 616 | param( 617 | [Parameter(ValueFromPipeline)][CreateChatCompletionResponse]$response 618 | ) 619 | if ($response.Choices.Count -eq 1) { 620 | Format-ChatMessage $response.Choices[0].Message 621 | } else { 622 | $Response.Choices 623 | } 624 | } 625 | 626 | function Format-ChatConversation { 627 | param( 628 | [ChatConversation]$conversation 629 | ) 630 | $messages = @() 631 | 632 | $messages += $conversation.Request | Format-CreateChatCompletionRequest 633 | $messages += $conversation.Response | Format-CreateChatCompletionResponse 634 | return $messages -join ($PSStyle.Reset + [Environment]::NewLine) 635 | } 636 | 637 | function Get-UsagePrice { 638 | param( 639 | [string]$Model, 640 | [int]$Total 641 | ) 642 | 643 | #Taken from: https://openai.com/pricing 644 | $pricePerToken = @{ 645 | 'code' = 0 646 | 'gpt-3.5-turbo' = .002 / 1000 647 | 'ada' = .0004 / 1000 648 | 'babbage' = .0005 / 1000 649 | 'curie' = .002 / 1000 650 | 'davinci' = .002 / 1000 651 | } 652 | 653 | foreach ($priceItem in $pricePerToken.GetEnumerator()) { 654 | if ($Model.Contains($priceItem.key)) { 655 | #Will return the first match 656 | $totalPrice = $total * $priceItem.Value 657 | 658 | #Formats as currency ($3.2629) and strips trailing zeroes 659 | return $totalPrice.ToString('C15').TrimEnd('0') 660 | } 661 | } 662 | 663 | #Return an empty string if no pricing engine found. 664 | return [string]::Empty 665 | 666 | } -------------------------------------------------------------------------------- /src/OpenAI.Client/OpenAI.nswag: -------------------------------------------------------------------------------- 1 | { 2 | "runtime": "Net60", 3 | "defaultVariables": null, 4 | "documentGenerator": { 5 | "fromDocument": { 6 | "json": "openapi: 3.0.0\ninfo:\n title: OpenAI API\n description: APIs for sampling from and fine-tuning language models\n version: '1.2.0'\nservers:\n - url: https://api.openai.com/v1\ntags:\n- name: OpenAI\n description: The OpenAI REST API\npaths:\n /engines:\n get:\n operationId: listEngines\n deprecated: true\n tags:\n - OpenAI\n summary: Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability.\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/ListEnginesResponse'\n x-oaiMeta:\n name: List engines\n group: engines\n path: list\n examples:\n curl: |\n curl https://api.openai.com/v1/engines \\\n -H 'Authorization: Bearer YOUR_API_KEY'\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n openai.Engine.list()\n node.js: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const response = await openai.listEngines();\n response: |\n {\n \"data\": [\n {\n \"id\": \"engine-id-0\",\n \"object\": \"engine\",\n \"owner\": \"organization-owner\",\n \"ready\": true\n },\n {\n \"id\": \"engine-id-2\",\n \"object\": \"engine\",\n \"owner\": \"organization-owner\",\n \"ready\": true\n },\n {\n \"id\": \"engine-id-3\",\n \"object\": \"engine\",\n \"owner\": \"openai\",\n \"ready\": false\n },\n ],\n \"object\": \"list\"\n }\n\n /engines/{engine_id}:\n get:\n operationId: retrieveEngine\n deprecated: true\n tags:\n - OpenAI\n summary: Retrieves a model instance, providing basic information about it such as the owner and availability.\n parameters:\n - in: path\n name: engine_id\n required: true\n schema:\n type: string\n # ideally this will be an actual ID, so this will always work from browser\n example:\n davinci\n description: &engine_id_description >\n The ID of the engine to use for this request\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/Engine'\n x-oaiMeta:\n name: Retrieve engine\n group: engines\n path: retrieve\n examples:\n curl: |\n curl https://api.openai.com/v1/engines/VAR_model_id \\\n -H 'Authorization: Bearer YOUR_API_KEY'\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n openai.Engine.retrieve(\"VAR_model_id\")\n node.js: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const response = await openai.retrieveEngine(\"VAR_model_id\");\n response: |\n {\n \"id\": \"VAR_model_id\",\n \"object\": \"engine\",\n \"owner\": \"openai\",\n \"ready\": true\n }\n\n /completions:\n post:\n operationId: createCompletion\n tags:\n - OpenAI\n summary: Creates a completion for the provided prompt and parameters\n requestBody:\n required: true\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/CreateCompletionRequest'\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/CreateCompletionResponse'\n x-oaiMeta:\n name: Create completion\n group: completions\n path: create\n examples:\n curl: |\n curl https://api.openai.com/v1/completions \\\n -H 'Content-Type: application/json' \\\n -H 'Authorization: Bearer YOUR_API_KEY' \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"prompt\": \"Say this is a test\",\n \"max_tokens\": 7,\n \"temperature\": 0\n }'\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n openai.Completion.create(\n model=\"VAR_model_id\",\n prompt=\"Say this is a test\",\n max_tokens=7,\n temperature=0\n )\n node.js: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const response = await openai.createCompletion({\n model: \"VAR_model_id\",\n prompt: \"Say this is a test\",\n max_tokens: 7,\n temperature: 0,\n });\n parameters: |\n {\n \"model\": \"VAR_model_id\",\n \"prompt\": \"Say this is a test\",\n \"max_tokens\": 7,\n \"temperature\": 0,\n \"top_p\": 1,\n \"n\": 1,\n \"stream\": false,\n \"logprobs\": null,\n \"stop\": \"\\n\"\n }\n response: |\n {\n \"id\": \"cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7\",\n \"object\": \"text_completion\",\n \"created\": 1589478378,\n \"model\": \"VAR_model_id\",\n \"choices\": [\n {\n \"text\": \"\\n\\nThis is indeed a test\",\n \"index\": 0,\n \"logprobs\": null,\n \"finish_reason\": \"length\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 5,\n \"completion_tokens\": 7,\n \"total_tokens\": 12\n }\n }\n /chat/completions:\n post:\n operationId: createChatCompletion\n tags:\n - OpenAI\n summary: Creates a completion for the chat message\n requestBody:\n required: true\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/CreateChatCompletionRequest'\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/CreateChatCompletionResponse'\n\n x-oaiMeta:\n name: Create chat completion\n group: chat\n path: create\n beta: true\n examples:\n curl: |\n curl https://api.openai.com/v1/chat/completions \\\n -H 'Content-Type: application/json' \\\n -H 'Authorization: Bearer YOUR_API_KEY' \\\n -d '{\n \"model\": \"gpt-3.5-turbo\",\n \"messages\": [{\"role\": \"user\", \"content\": \"Hello!\"}]\n }'\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\n completion = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ]\n )\n\n print(completion.choices[0].message)\n node.js: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n\n const completion = await openai.createChatCompletion({\n model: \"gpt-3.5-turbo\",\n messages: [{role: \"user\", content: \"Hello world\"}],\n });\n console.log(completion.data.choices[0].message);\n parameters: |\n {\n \"model\": \"gpt-3.5-turbo\",\n \"messages\": [{\"role\": \"user\", \"content\": \"Hello!\"}]\n }\n response: |\n {\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nHello there, how may I assist you today?\",\n },\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21\n }\n }\n\n /edits:\n post:\n operationId: createEdit\n tags:\n - OpenAI\n summary: Creates a new edit for the provided input, instruction, and parameters.\n requestBody:\n required: true\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/CreateEditRequest'\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/CreateEditResponse'\n x-oaiMeta:\n name: Create edit\n group: edits\n path: create\n examples:\n curl: |\n curl https://api.openai.com/v1/edits \\\n -H 'Content-Type: application/json' \\\n -H 'Authorization: Bearer YOUR_API_KEY' \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"input\": \"What day of the wek is it?\",\n \"instruction\": \"Fix the spelling mistakes\"\n }'\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n openai.Edit.create(\n model=\"VAR_model_id\",\n input=\"What day of the wek is it?\",\n instruction=\"Fix the spelling mistakes\"\n )\n node.js: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const response = await openai.createEdit({\n model: \"VAR_model_id\",\n input: \"What day of the wek is it?\",\n instruction: \"Fix the spelling mistakes\",\n });\n parameters: |\n {\n \"model\": \"VAR_model_id\",\n \"input\": \"What day of the wek is it?\",\n \"instruction\": \"Fix the spelling mistakes\",\n }\n response: |\n {\n \"object\": \"edit\",\n \"created\": 1589478378,\n \"choices\": [\n {\n \"text\": \"What day of the week is it?\",\n \"index\": 0,\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 25,\n \"completion_tokens\": 32,\n \"total_tokens\": 57\n }\n }\n\n /images/generations:\n post:\n operationId: createImage\n tags:\n - OpenAI\n summary: Creates an image given a prompt.\n requestBody:\n required: true\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/CreateImageRequest'\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/ImagesResponse'\n x-oaiMeta:\n name: Create image\n group: images\n path: create\n beta: true\n examples:\n curl: |\n curl https://api.openai.com/v1/images/generations \\\n -H 'Content-Type: application/json' \\\n -H 'Authorization: Bearer YOUR_API_KEY' \\\n -d '{\n \"prompt\": \"A cute baby sea otter\",\n \"n\": 2,\n \"size\": \"1024x1024\"\n }'\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n openai.Image.create(\n prompt=\"A cute baby sea otter\",\n n=2,\n size=\"1024x1024\"\n )\n node.js: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const response = await openai.createImage({\n prompt: \"A cute baby sea otter\",\n n: 2,\n size: \"1024x1024\",\n });\n parameters: |\n {\n \"prompt\": \"A cute baby sea otter\",\n \"n\": 2,\n \"size\": \"1024x1024\"\n }\n response: |\n {\n \"created\": 1589478378,\n \"data\": [\n {\n \"url\": \"https://...\"\n },\n {\n \"url\": \"https://...\"\n }\n ]\n }\n\n /images/edits:\n post:\n operationId: createImageEdit\n tags:\n - OpenAI\n summary: Creates an edited or extended image given an original image and a prompt.\n requestBody:\n required: true\n content:\n multipart/form-data:\n schema:\n $ref: '#/components/schemas/CreateImageEditRequest'\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/ImagesResponse'\n x-oaiMeta:\n name: Create image edit\n group: images\n path: create-edit\n beta: true\n examples:\n curl: |\n curl https://api.openai.com/v1/images/edits \\\n -H 'Authorization: Bearer YOUR_API_KEY' \\\n -F image='@otter.png' \\\n -F mask='@mask.png' \\\n -F prompt=\"A cute baby sea otter wearing a beret\" \\\n -F n=2 \\\n -F size=\"1024x1024\"\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n openai.Image.create_edit(\n image=open(\"otter.png\", \"rb\"),\n mask=open(\"mask.png\", \"rb\"),\n prompt=\"A cute baby sea otter wearing a beret\",\n n=2,\n size=\"1024x1024\"\n )\n node.js: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const response = await openai.createImageEdit(\n fs.createReadStream(\"otter.png\"),\n fs.createReadStream(\"mask.png\"),\n \"A cute baby sea otter wearing a beret\",\n 2,\n \"1024x1024\"\n );\n response: |\n {\n \"created\": 1589478378,\n \"data\": [\n {\n \"url\": \"https://...\"\n },\n {\n \"url\": \"https://...\"\n }\n ]\n }\n\n /images/variations:\n post:\n operationId: createImageVariation\n tags:\n - OpenAI\n summary: Creates a variation of a given image.\n requestBody:\n required: true\n content:\n multipart/form-data:\n schema:\n $ref: '#/components/schemas/CreateImageVariationRequest'\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/ImagesResponse'\n x-oaiMeta:\n name: Create image variation\n group: images\n path: create-variation\n beta: true\n examples:\n curl: |\n curl https://api.openai.com/v1/images/variations \\\n -H 'Authorization: Bearer YOUR_API_KEY' \\\n -F image='@otter.png' \\\n -F n=2 \\\n -F size=\"1024x1024\"\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n openai.Image.create_variation(\n image=open(\"otter.png\", \"rb\"),\n n=2,\n size=\"1024x1024\"\n )\n node.js: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const response = await openai.createImageVariation(\n fs.createReadStream(\"otter.png\"),\n 2,\n \"1024x1024\"\n );\n response: |\n {\n \"created\": 1589478378,\n \"data\": [\n {\n \"url\": \"https://...\"\n },\n {\n \"url\": \"https://...\"\n }\n ]\n }\n\n /embeddings:\n post:\n operationId: createEmbedding\n tags:\n - OpenAI\n summary: Creates an embedding vector representing the input text.\n requestBody:\n required: true\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/CreateEmbeddingRequest'\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/CreateEmbeddingResponse'\n x-oaiMeta:\n name: Create embeddings\n group: embeddings\n path: create\n examples:\n curl: |\n curl https://api.openai.com/v1/embeddings \\\n -X POST \\\n -H \"Authorization: Bearer YOUR_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\"input\": \"The food was delicious and the waiter...\",\n \"model\": \"text-embedding-ada-002\"}'\n\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n openai.Embedding.create(\n model=\"text-embedding-ada-002\",\n input=\"The food was delicious and the waiter...\"\n )\n node.js: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const response = await openai.createEmbedding({\n model: \"text-embedding-ada-002\",\n input: \"The food was delicious and the waiter...\",\n });\n parameters: |\n {\n \"model\": \"text-embedding-ada-002\",\n \"input\": \"The food was delicious and the waiter...\"\n }\n response: |\n {\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"embedding\",\n \"embedding\": [\n 0.0023064255,\n -0.009327292,\n .... (1536 floats total for ada-002)\n -0.0028842222,\n ],\n \"index\": 0\n }\n ],\n \"model\": \"text-embedding-ada-002\",\n \"usage\": {\n \"prompt_tokens\": 8,\n \"total_tokens\": 8\n }\n }\n\n /audio/transcriptions:\n post:\n operationId: createTranscription\n tags:\n - OpenAI\n summary: Transcribes audio into the input language.\n requestBody:\n required: true\n content:\n multipart/form-data:\n schema:\n $ref: '#/components/schemas/CreateTranscriptionRequest'\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/CreateTranscriptionResponse'\n x-oaiMeta:\n name: Create transcription\n group: audio\n path: create\n beta: true\n examples:\n curl: |\n curl https://api.openai.com/v1/audio/transcriptions \\\n -X POST \\\n -H 'Authorization: Bearer TOKEN' \\\n -H 'Content-Type: multipart/form-data' \\\n -F file=@/path/to/file/audio.mp3 \\\n -F model=whisper-1\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n audio_file = open(\"audio.mp3\", \"rb\")\n transcript = openai.Audio.transcribe(\"whisper-1\", audio_file)\n node: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const resp = await openai.createTranscription(\n fs.createReadStream(\"audio.mp3\"),\n \"whisper-1\"\n );\n parameters: |\n {\n \"file\": \"audio.mp3\",\n \"model\": \"whisper-1\"\n }\n response: |\n {\n \"text\": \"Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that.\"\n }\n\n /audio/translations:\n post:\n operationId: createTranslation\n tags:\n - OpenAI\n summary: Translates audio into into English.\n requestBody:\n required: true\n content:\n multipart/form-data:\n schema:\n $ref: '#/components/schemas/CreateTranslationRequest'\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/CreateTranslationResponse'\n x-oaiMeta:\n name: Create translation\n group: audio\n path: create\n beta: true\n examples:\n curl: |\n curl https://api.openai.com/v1/audio/translations \\\n -X POST \\\n -H 'Authorization: Bearer TOKEN' \\\n -H 'Content-Type: multipart/form-data' \\\n -F file=@/path/to/file/german.m4a \\\n -F model=whisper-1\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n audio_file = open(\"german.m4a\", \"rb\")\n transcript = openai.Audio.translate(\"whisper-1\", audio_file)\n node: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const resp = await openai.createTranslation(\n fs.createReadStream(\"audio.mp3\"),\n \"whisper-1\"\n );\n parameters: |\n {\n \"file\": \"german.m4a\",\n \"model\": \"whisper-1\"\n }\n response: |\n {\n \"text\": \"Hello, my name is Wolfgang and I come from Germany. Where are you heading today?\"\n }\n\n /engines/{engine_id}/search:\n post:\n operationId: createSearch\n deprecated: true\n tags:\n - OpenAI\n summary: |\n The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them.\n\n To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores.\n\n The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.\n parameters:\n - in: path\n name: engine_id\n required: true\n schema:\n type: string\n example: davinci\n description: The ID of the engine to use for this request. You can select one of `ada`, `babbage`, `curie`, or `davinci`.\n requestBody:\n required: true\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/CreateSearchRequest'\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/CreateSearchResponse'\n x-oaiMeta:\n name: Create search\n group: searches\n path: create\n examples:\n curl: |\n curl https://api.openai.com/v1/engines/davinci/search \\\n -H \"Content-Type: application/json\" \\\n -H 'Authorization: Bearer YOUR_API_KEY' \\\n -d '{\n \"documents\": [\"White House\", \"hospital\", \"school\"],\n \"query\": \"the president\"\n }'\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n openai.Engine(\"davinci\").search(\n documents=[\"White House\", \"hospital\", \"school\"],\n query=\"the president\"\n )\n node.js: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const response = await openai.createSearch(\"davinci\", {\n documents: [\"White House\", \"hospital\", \"school\"],\n query: \"the president\",\n });\n parameters: |\n {\n \"documents\": [\n \"White House\",\n \"hospital\",\n \"school\"\n ],\n \"query\": \"the president\"\n }\n response: |\n {\n \"data\": [\n {\n \"document\": 0,\n \"object\": \"search_result\",\n \"score\": 215.412\n },\n {\n \"document\": 1,\n \"object\": \"search_result\",\n \"score\": 40.316\n },\n {\n \"document\": 2,\n \"object\": \"search_result\",\n \"score\": 55.226\n }\n ],\n \"object\": \"list\"\n }\n\n /files:\n get:\n operationId: listFiles\n tags:\n - OpenAI\n summary: Returns a list of files that belong to the user's organization.\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/ListFilesResponse'\n x-oaiMeta:\n name: List files\n group: files\n path: list\n examples:\n curl: |\n curl https://api.openai.com/v1/files \\\n -H 'Authorization: Bearer YOUR_API_KEY'\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n openai.File.list()\n node.js: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const response = await openai.listFiles();\n response: |\n {\n \"data\": [\n {\n \"id\": \"file-ccdDZrC3iZVNiQVeEA6Z66wf\",\n \"object\": \"file\",\n \"bytes\": 175,\n \"created_at\": 1613677385,\n \"filename\": \"train.jsonl\",\n \"purpose\": \"search\"\n },\n {\n \"id\": \"file-XjGxS3KTG0uNmNOK362iJua3\",\n \"object\": \"file\",\n \"bytes\": 140,\n \"created_at\": 1613779121,\n \"filename\": \"puppy.jsonl\",\n \"purpose\": \"search\"\n }\n ],\n \"object\": \"list\"\n }\n post:\n operationId: createFile\n tags:\n - OpenAI\n summary: |\n Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit.\n\n requestBody:\n required: true\n content:\n multipart/form-data:\n schema:\n $ref: '#/components/schemas/CreateFileRequest'\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/OpenAIFile'\n x-oaiMeta:\n name: Upload file\n group: files\n path: upload\n examples:\n curl: |\n curl https://api.openai.com/v1/files \\\n -H \"Authorization: Bearer YOUR_API_KEY\" \\\n -F purpose=\"fine-tune\" \\\n -F file='@mydata.jsonl'\n\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n openai.File.create(\n file=open(\"mydata.jsonl\", \"rb\"),\n purpose='fine-tune'\n )\n node.js: |\n const fs = require(\"fs\");\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const response = await openai.createFile(\n fs.createReadStream(\"mydata.jsonl\"),\n \"fine-tune\"\n );\n response: |\n {\n \"id\": \"file-XjGxS3KTG0uNmNOK362iJua3\",\n \"object\": \"file\",\n \"bytes\": 140,\n \"created_at\": 1613779121,\n \"filename\": \"mydata.jsonl\",\n \"purpose\": \"fine-tune\"\n }\n\n /files/{file_id}:\n delete:\n operationId: deleteFile\n tags:\n - OpenAI\n summary: Delete a file.\n parameters:\n - in: path\n name: file_id\n required: true\n schema:\n type: string\n description: The ID of the file to use for this request\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/DeleteFileResponse'\n x-oaiMeta:\n name: Delete file\n group: files\n path: delete\n examples:\n curl: |\n curl https://api.openai.com/v1/files/file-XjGxS3KTG0uNmNOK362iJua3 \\\n -X DELETE \\\n -H 'Authorization: Bearer YOUR_API_KEY'\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n openai.File.delete(\"file-XjGxS3KTG0uNmNOK362iJua3\")\n node.js: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const response = await openai.deleteFile(\"file-XjGxS3KTG0uNmNOK362iJua3\");\n response: |\n {\n \"id\": \"file-XjGxS3KTG0uNmNOK362iJua3\",\n \"object\": \"file\",\n \"deleted\": true\n }\n get:\n operationId: retrieveFile\n tags:\n - OpenAI\n summary: Returns information about a specific file.\n parameters:\n - in: path\n name: file_id\n required: true\n schema:\n type: string\n description: The ID of the file to use for this request\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/OpenAIFile'\n x-oaiMeta:\n name: Retrieve file\n group: files\n path: retrieve\n examples:\n curl: |\n curl https://api.openai.com/v1/files/file-XjGxS3KTG0uNmNOK362iJua3 \\\n -H 'Authorization: Bearer YOUR_API_KEY'\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n openai.File.retrieve(\"file-XjGxS3KTG0uNmNOK362iJua3\")\n node.js: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const response = await openai.retrieveFile(\"file-XjGxS3KTG0uNmNOK362iJua3\");\n response: |\n {\n \"id\": \"file-XjGxS3KTG0uNmNOK362iJua3\",\n \"object\": \"file\",\n \"bytes\": 140,\n \"created_at\": 1613779657,\n \"filename\": \"mydata.jsonl\",\n \"purpose\": \"fine-tune\"\n }\n\n /files/{file_id}/content:\n get:\n operationId: downloadFile\n tags:\n - OpenAI\n summary: Returns the contents of the specified file\n parameters:\n - in: path\n name: file_id\n required: true\n schema:\n type: string\n description: The ID of the file to use for this request\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n type: string\n x-oaiMeta:\n name: Retrieve file content\n group: files\n path: retrieve-content\n examples:\n curl: |\n curl https://api.openai.com/v1/files/file-XjGxS3KTG0uNmNOK362iJua3/content \\\n -H 'Authorization: Bearer YOUR_API_KEY' > file.jsonl\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n content = openai.File.download(\"file-XjGxS3KTG0uNmNOK362iJua3\")\n node.js: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const response = await openai.downloadFile(\"file-XjGxS3KTG0uNmNOK362iJua3\");\n\n /answers:\n post:\n operationId: createAnswer\n deprecated: true\n tags:\n - OpenAI\n summary: |\n Answers the specified question using the provided documents and examples.\n\n The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions).\n requestBody:\n required: true\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/CreateAnswerRequest'\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/CreateAnswerResponse'\n x-oaiMeta:\n name: Create answer\n group: answers\n path: create\n examples:\n curl: |\n curl https://api.openai.com/v1/answers \\\n -X POST \\\n -H \"Authorization: Bearer YOUR_API_KEY\" \\\n -H 'Content-Type: application/json' \\\n -d '{\n \"documents\": [\"Puppy A is happy.\", \"Puppy B is sad.\"],\n \"question\": \"which puppy is happy?\",\n \"search_model\": \"ada\",\n \"model\": \"curie\",\n \"examples_context\": \"In 2017, U.S. life expectancy was 78.6 years.\",\n \"examples\": [[\"What is human life expectancy in the United States?\",\"78 years.\"]],\n \"max_tokens\": 5,\n \"stop\": [\"\\n\", \"<|endoftext|>\"]\n }'\n\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n openai.Answer.create(\n search_model=\"ada\",\n model=\"curie\",\n question=\"which puppy is happy?\",\n documents=[\"Puppy A is happy.\", \"Puppy B is sad.\"],\n examples_context=\"In 2017, U.S. life expectancy was 78.6 years.\",\n examples=[[\"What is human life expectancy in the United States?\",\"78 years.\"]],\n max_tokens=5,\n stop=[\"\\n\", \"<|endoftext|>\"],\n )\n node.js: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const response = await openai.createAnswer({\n search_model: \"ada\",\n model: \"curie\",\n question: \"which puppy is happy?\",\n documents: [\"Puppy A is happy.\", \"Puppy B is sad.\"],\n examples_context: \"In 2017, U.S. life expectancy was 78.6 years.\",\n examples: [[\"What is human life expectancy in the United States?\",\"78 years.\"]],\n max_tokens: 5,\n stop: [\"\\n\", \"<|endoftext|>\"],\n });\n parameters: |\n {\n \"documents\": [\"Puppy A is happy.\", \"Puppy B is sad.\"],\n \"question\": \"which puppy is happy?\",\n \"search_model\": \"ada\",\n \"model\": \"curie\",\n \"examples_context\": \"In 2017, U.S. life expectancy was 78.6 years.\",\n \"examples\": [[\"What is human life expectancy in the United States?\",\"78 years.\"]],\n \"max_tokens\": 5,\n \"stop\": [\"\\n\", \"<|endoftext|>\"]\n }\n response: |\n {\n \"answers\": [\n \"puppy A.\"\n ],\n \"completion\": \"cmpl-2euVa1kmKUuLpSX600M41125Mo9NI\",\n \"model\": \"curie:2020-05-03\",\n \"object\": \"answer\",\n \"search_model\": \"ada\",\n \"selected_documents\": [\n {\n \"document\": 0,\n \"text\": \"Puppy A is happy. \"\n },\n {\n \"document\": 1,\n \"text\": \"Puppy B is sad. \"\n }\n ]\n }\n\n /classifications:\n post:\n operationId: createClassification\n deprecated: true\n tags:\n - OpenAI\n summary: |\n Classifies the specified `query` using provided examples.\n\n The endpoint first [searches](/docs/api-reference/searches) over the labeled examples\n to select the ones most relevant for the particular query. Then, the relevant examples\n are combined with the query to construct a prompt to produce the final label via the\n [completions](/docs/api-reference/completions) endpoint.\n\n Labeled examples can be provided via an uploaded `file`, or explicitly listed in the\n request using the `examples` parameter for quick tests and small scale use cases.\n requestBody:\n required: true\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/CreateClassificationRequest'\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/CreateClassificationResponse'\n x-oaiMeta:\n name: Create classification\n group: classifications\n path: create\n examples:\n curl: |\n curl https://api.openai.com/v1/classifications \\\n -X POST \\\n -H \"Authorization: Bearer YOUR_API_KEY\" \\\n -H 'Content-Type: application/json' \\\n -d '{\n \"examples\": [\n [\"A happy moment\", \"Positive\"],\n [\"I am sad.\", \"Negative\"],\n [\"I am feeling awesome\", \"Positive\"]],\n \"query\": \"It is a raining day :(\",\n \"search_model\": \"ada\",\n \"model\": \"curie\",\n \"labels\":[\"Positive\", \"Negative\", \"Neutral\"]\n }'\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n openai.Classification.create(\n search_model=\"ada\",\n model=\"curie\",\n examples=[\n [\"A happy moment\", \"Positive\"],\n [\"I am sad.\", \"Negative\"],\n [\"I am feeling awesome\", \"Positive\"]\n ],\n query=\"It is a raining day :(\",\n labels=[\"Positive\", \"Negative\", \"Neutral\"],\n )\n node.js: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const response = await openai.createClassification({\n search_model: \"ada\",\n model: \"curie\",\n examples: [\n [\"A happy moment\", \"Positive\"],\n [\"I am sad.\", \"Negative\"],\n [\"I am feeling awesome\", \"Positive\"]\n ],\n query:\"It is a raining day :(\",\n labels: [\"Positive\", \"Negative\", \"Neutral\"],\n });\n parameters: |\n {\n \"examples\": [\n [\"A happy moment\", \"Positive\"],\n [\"I am sad.\", \"Negative\"],\n [\"I am feeling awesome\", \"Positive\"]\n ],\n \"labels\": [\"Positive\", \"Negative\", \"Neutral\"],\n \"query\": \"It is a raining day :(\",\n \"search_model\": \"ada\",\n \"model\": \"curie\"\n }\n response: |\n {\n \"completion\": \"cmpl-2euN7lUVZ0d4RKbQqRV79IiiE6M1f\",\n \"label\": \"Negative\",\n \"model\": \"curie:2020-05-03\",\n \"object\": \"classification\",\n \"search_model\": \"ada\",\n \"selected_examples\": [\n {\n \"document\": 1,\n \"label\": \"Negative\",\n \"text\": \"I am sad.\"\n },\n {\n \"document\": 0,\n \"label\": \"Positive\",\n \"text\": \"A happy moment\"\n },\n {\n \"document\": 2,\n \"label\": \"Positive\",\n \"text\": \"I am feeling awesome\"\n }\n ]\n }\n\n /fine-tunes:\n post:\n operationId: createFineTune\n tags:\n - OpenAI\n summary: |\n Creates a job that fine-tunes a specified model from a given dataset.\n\n Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete.\n\n [Learn more about Fine-tuning](/docs/guides/fine-tuning)\n requestBody:\n required: true\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/CreateFineTuneRequest'\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/FineTune'\n x-oaiMeta:\n name: Create fine-tune\n group: fine-tunes\n path: create\n examples:\n curl: |\n curl https://api.openai.com/v1/fine-tunes \\\n -X POST \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer YOUR_API_KEY\" \\\n -d '{\n \"training_file\": \"file-XGinujblHPwGLSztz8cPS8XY\"\n }'\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n openai.FineTune.create(training_file=\"file-XGinujblHPwGLSztz8cPS8XY\")\n node.js: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const response = await openai.createFineTune({\n training_file: \"file-XGinujblHPwGLSztz8cPS8XY\",\n });\n response: |\n {\n \"id\": \"ft-AF1WoRqd3aJAHsqc9NY7iL8F\",\n \"object\": \"fine-tune\",\n \"model\": \"curie\",\n \"created_at\": 1614807352,\n \"events\": [\n {\n \"object\": \"fine-tune-event\",\n \"created_at\": 1614807352,\n \"level\": \"info\",\n \"message\": \"Job enqueued. Waiting for jobs ahead to complete. Queue number: 0.\"\n }\n ],\n \"fine_tuned_model\": null,\n \"hyperparams\": {\n \"batch_size\": 4,\n \"learning_rate_multiplier\": 0.1,\n \"n_epochs\": 4,\n \"prompt_loss_weight\": 0.1,\n },\n \"organization_id\": \"org-...\",\n \"result_files\": [],\n \"status\": \"pending\",\n \"validation_files\": [],\n \"training_files\": [\n {\n \"id\": \"file-XGinujblHPwGLSztz8cPS8XY\",\n \"object\": \"file\",\n \"bytes\": 1547276,\n \"created_at\": 1610062281,\n \"filename\": \"my-data-train.jsonl\",\n \"purpose\": \"fine-tune-train\"\n }\n ],\n \"updated_at\": 1614807352,\n }\n get:\n operationId: listFineTunes\n tags:\n - OpenAI\n summary: |\n List your organization's fine-tuning jobs\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/ListFineTunesResponse'\n x-oaiMeta:\n name: List fine-tunes\n group: fine-tunes\n path: list\n examples:\n curl: |\n curl https://api.openai.com/v1/fine-tunes \\\n -H 'Authorization: Bearer YOUR_API_KEY'\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n openai.FineTune.list()\n node.js: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const response = await openai.listFineTunes();\n response: |\n {\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"ft-AF1WoRqd3aJAHsqc9NY7iL8F\",\n \"object\": \"fine-tune\",\n \"model\": \"curie\",\n \"created_at\": 1614807352,\n \"fine_tuned_model\": null,\n \"hyperparams\": { ... },\n \"organization_id\": \"org-...\",\n \"result_files\": [],\n \"status\": \"pending\",\n \"validation_files\": [],\n \"training_files\": [ { ... } ],\n \"updated_at\": 1614807352,\n },\n { ... },\n { ... }\n ]\n }\n\n /fine-tunes/{fine_tune_id}:\n get:\n operationId: retrieveFineTune\n tags:\n - OpenAI\n summary: |\n Gets info about the fine-tune job.\n\n [Learn more about Fine-tuning](/docs/guides/fine-tuning)\n parameters:\n - in: path\n name: fine_tune_id\n required: true\n schema:\n type: string\n example:\n ft-AF1WoRqd3aJAHsqc9NY7iL8F\n description: |\n The ID of the fine-tune job\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/FineTune'\n x-oaiMeta:\n name: Retrieve fine-tune\n group: fine-tunes\n path: retrieve\n examples:\n curl: |\n curl https://api.openai.com/v1/fine-tunes/ft-AF1WoRqd3aJAHsqc9NY7iL8F \\\n -H \"Authorization: Bearer YOUR_API_KEY\"\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n openai.FineTune.retrieve(id=\"ft-AF1WoRqd3aJAHsqc9NY7iL8F\")\n node.js: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const response = await openai.retrieveFineTune(\"ft-AF1WoRqd3aJAHsqc9NY7iL8F\");\n response: |\n {\n \"id\": \"ft-AF1WoRqd3aJAHsqc9NY7iL8F\",\n \"object\": \"fine-tune\",\n \"model\": \"curie\",\n \"created_at\": 1614807352,\n \"events\": [\n {\n \"object\": \"fine-tune-event\",\n \"created_at\": 1614807352,\n \"level\": \"info\",\n \"message\": \"Job enqueued. Waiting for jobs ahead to complete. Queue number: 0.\"\n },\n {\n \"object\": \"fine-tune-event\",\n \"created_at\": 1614807356,\n \"level\": \"info\",\n \"message\": \"Job started.\"\n },\n {\n \"object\": \"fine-tune-event\",\n \"created_at\": 1614807861,\n \"level\": \"info\",\n \"message\": \"Uploaded snapshot: curie:ft-acmeco-2021-03-03-21-44-20.\"\n },\n {\n \"object\": \"fine-tune-event\",\n \"created_at\": 1614807864,\n \"level\": \"info\",\n \"message\": \"Uploaded result files: file-QQm6ZpqdNwAaVC3aSz5sWwLT.\"\n },\n {\n \"object\": \"fine-tune-event\",\n \"created_at\": 1614807864,\n \"level\": \"info\",\n \"message\": \"Job succeeded.\"\n }\n ],\n \"fine_tuned_model\": \"curie:ft-acmeco-2021-03-03-21-44-20\",\n \"hyperparams\": {\n \"batch_size\": 4,\n \"learning_rate_multiplier\": 0.1,\n \"n_epochs\": 4,\n \"prompt_loss_weight\": 0.1,\n },\n \"organization_id\": \"org-...\",\n \"result_files\": [\n {\n \"id\": \"file-QQm6ZpqdNwAaVC3aSz5sWwLT\",\n \"object\": \"file\",\n \"bytes\": 81509,\n \"created_at\": 1614807863,\n \"filename\": \"compiled_results.csv\",\n \"purpose\": \"fine-tune-results\"\n }\n ],\n \"status\": \"succeeded\",\n \"validation_files\": [],\n \"training_files\": [\n {\n \"id\": \"file-XGinujblHPwGLSztz8cPS8XY\",\n \"object\": \"file\",\n \"bytes\": 1547276,\n \"created_at\": 1610062281,\n \"filename\": \"my-data-train.jsonl\",\n \"purpose\": \"fine-tune-train\"\n }\n ],\n \"updated_at\": 1614807865,\n }\n\n /fine-tunes/{fine_tune_id}/cancel:\n post:\n operationId: cancelFineTune\n tags:\n - OpenAI\n summary: |\n Immediately cancel a fine-tune job.\n parameters:\n - in: path\n name: fine_tune_id\n required: true\n schema:\n type: string\n example:\n ft-AF1WoRqd3aJAHsqc9NY7iL8F\n description: |\n The ID of the fine-tune job to cancel\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/FineTune'\n x-oaiMeta:\n name: Cancel fine-tune\n group: fine-tunes\n path: cancel\n examples:\n curl: |\n curl https://api.openai.com/v1/fine-tunes/ft-AF1WoRqd3aJAHsqc9NY7iL8F/cancel \\\n -X POST \\\n -H \"Authorization: Bearer YOUR_API_KEY\"\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n openai.FineTune.cancel(id=\"ft-AF1WoRqd3aJAHsqc9NY7iL8F\")\n node.js: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const response = await openai.cancelFineTune(\"ft-AF1WoRqd3aJAHsqc9NY7iL8F\");\n response: |\n {\n \"id\": \"ft-xhrpBbvVUzYGo8oUO1FY4nI7\",\n \"object\": \"fine-tune\",\n \"model\": \"curie\",\n \"created_at\": 1614807770,\n \"events\": [ { ... } ],\n \"fine_tuned_model\": null,\n \"hyperparams\": { ... },\n \"organization_id\": \"org-...\",\n \"result_files\": [],\n \"status\": \"cancelled\",\n \"validation_files\": [],\n \"training_files\": [\n {\n \"id\": \"file-XGinujblHPwGLSztz8cPS8XY\",\n \"object\": \"file\",\n \"bytes\": 1547276,\n \"created_at\": 1610062281,\n \"filename\": \"my-data-train.jsonl\",\n \"purpose\": \"fine-tune-train\"\n }\n ],\n \"updated_at\": 1614807789,\n }\n\n /fine-tunes/{fine_tune_id}/events:\n get:\n operationId: listFineTuneEvents\n tags:\n - OpenAI\n summary: |\n Get fine-grained status updates for a fine-tune job.\n parameters:\n - in: path\n name: fine_tune_id\n required: true\n schema:\n type: string\n example:\n ft-AF1WoRqd3aJAHsqc9NY7iL8F\n description: |\n The ID of the fine-tune job to get events for.\n - in: query\n name: stream\n required: false\n schema:\n type: boolean\n default: false\n description: |\n Whether to stream events for the fine-tune job. If set to true,\n events will be sent as data-only\n [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)\n as they become available. The stream will terminate with a\n `data: [DONE]` message when the job is finished (succeeded, cancelled,\n or failed).\n\n If set to false, only events generated so far will be returned.\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/ListFineTuneEventsResponse'\n x-oaiMeta:\n name: List fine-tune events\n group: fine-tunes\n path: events\n examples:\n curl: |\n curl https://api.openai.com/v1/fine-tunes/ft-AF1WoRqd3aJAHsqc9NY7iL8F/events \\\n -H \"Authorization: Bearer YOUR_API_KEY\"\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n openai.FineTune.list_events(id=\"ft-AF1WoRqd3aJAHsqc9NY7iL8F\")\n node.js: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const response = await openai.listFineTuneEvents(\"ft-AF1WoRqd3aJAHsqc9NY7iL8F\");\n response: |\n {\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"fine-tune-event\",\n \"created_at\": 1614807352,\n \"level\": \"info\",\n \"message\": \"Job enqueued. Waiting for jobs ahead to complete. Queue number: 0.\"\n },\n {\n \"object\": \"fine-tune-event\",\n \"created_at\": 1614807356,\n \"level\": \"info\",\n \"message\": \"Job started.\"\n },\n {\n \"object\": \"fine-tune-event\",\n \"created_at\": 1614807861,\n \"level\": \"info\",\n \"message\": \"Uploaded snapshot: curie:ft-acmeco-2021-03-03-21-44-20.\"\n },\n {\n \"object\": \"fine-tune-event\",\n \"created_at\": 1614807864,\n \"level\": \"info\",\n \"message\": \"Uploaded result files: file-QQm6ZpqdNwAaVC3aSz5sWwLT.\"\n },\n {\n \"object\": \"fine-tune-event\",\n \"created_at\": 1614807864,\n \"level\": \"info\",\n \"message\": \"Job succeeded.\"\n }\n ]\n }\n\n /models:\n get:\n operationId: listModels\n tags:\n - OpenAI\n summary: Lists the currently available models, and provides basic information about each one such as the owner and availability.\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/ListModelsResponse'\n x-oaiMeta:\n name: List models\n group: models\n path: list\n examples:\n curl: |\n curl https://api.openai.com/v1/models \\\n -H 'Authorization: Bearer YOUR_API_KEY'\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n openai.Model.list()\n node.js: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const response = await openai.listModels();\n response: |\n {\n \"data\": [\n {\n \"id\": \"model-id-0\",\n \"object\": \"model\",\n \"owned_by\": \"organization-owner\",\n \"permission\": [...]\n },\n {\n \"id\": \"model-id-1\",\n \"object\": \"model\",\n \"owned_by\": \"organization-owner\",\n \"permission\": [...]\n },\n {\n \"id\": \"model-id-2\",\n \"object\": \"model\",\n \"owned_by\": \"openai\",\n \"permission\": [...]\n },\n ],\n \"object\": \"list\"\n }\n\n /models/{model}:\n get:\n operationId: retrieveModel\n tags:\n - OpenAI\n summary: Retrieves a model instance, providing basic information about the model such as the owner and permissioning.\n parameters:\n - in: path\n name: model\n required: true\n schema:\n type: string\n # ideally this will be an actual ID, so this will always work from browser\n example:\n text-davinci-001\n description:\n The ID of the model to use for this request\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/Model'\n x-oaiMeta:\n name: Retrieve model\n group: models\n path: retrieve\n examples:\n curl: |\n curl https://api.openai.com/v1/models/VAR_model_id \\\n -H 'Authorization: Bearer YOUR_API_KEY'\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n openai.Model.retrieve(\"VAR_model_id\")\n node.js: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const response = await openai.retrieveModel(\"VAR_model_id\");\n response: |\n {\n \"id\": \"VAR_model_id\",\n \"object\": \"model\",\n \"owned_by\": \"openai\",\n \"permission\": [...]\n }\n delete:\n operationId: deleteModel\n tags:\n - OpenAI\n summary: Delete a fine-tuned model. You must have the Owner role in your organization.\n parameters:\n - in: path\n name: model\n required: true\n schema:\n type: string\n example: curie:ft-acmeco-2021-03-03-21-44-20\n description: The model to delete\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/DeleteModelResponse'\n x-oaiMeta:\n name: Delete fine-tune model\n group: fine-tunes\n path: delete-model\n examples:\n curl: |\n curl https://api.openai.com/v1/models/curie:ft-acmeco-2021-03-03-21-44-20 \\\n -X DELETE \\\n -H \"Authorization: Bearer YOUR_API_KEY\"\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n openai.Model.delete(\"curie:ft-acmeco-2021-03-03-21-44-20\")\n node.js: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const response = await openai.deleteModel('curie:ft-acmeco-2021-03-03-21-44-20');\n response: |\n {\n \"id\": \"curie:ft-acmeco-2021-03-03-21-44-20\",\n \"object\": \"model\",\n \"deleted\": true\n }\n\n /moderations:\n post:\n operationId: createModeration\n tags:\n - OpenAI\n summary: Classifies if text violates OpenAI's Content Policy\n requestBody:\n required: true\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/CreateModerationRequest'\n responses:\n \"200\":\n description: OK\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/CreateModerationResponse'\n x-oaiMeta:\n name: Create moderation\n group: moderations\n path: create\n examples:\n curl: |\n curl https://api.openai.com/v1/moderations \\\n -H 'Content-Type: application/json' \\\n -H 'Authorization: Bearer YOUR_API_KEY' \\\n -d '{\n \"input\": \"I want to kill them.\"\n }'\n python: |\n import os\n import openai\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n openai.Moderation.create(\n input=\"I want to kill them.\",\n )\n node.js: |\n const { Configuration, OpenAIApi } = require(\"openai\");\n const configuration = new Configuration({\n apiKey: process.env.OPENAI_API_KEY,\n });\n const openai = new OpenAIApi(configuration);\n const response = await openai.createModeration({\n input: \"I want to kill them.\",\n });\n parameters: |\n {\n \"input\": \"I want to kill them.\"\n }\n response: |\n {\n \"id\": \"modr-5MWoLO\",\n \"model\": \"text-moderation-001\",\n \"results\": [\n {\n \"categories\": {\n \"hate\": false,\n \"hate/threatening\": true,\n \"self-harm\": false,\n \"sexual\": false,\n \"sexual/minors\": false,\n \"violence\": true,\n \"violence/graphic\": false\n },\n \"category_scores\": {\n \"hate\": 0.22714105248451233,\n \"hate/threatening\": 0.4132447838783264,\n \"self-harm\": 0.005232391878962517,\n \"sexual\": 0.01407341007143259,\n \"sexual/minors\": 0.0038522258400917053,\n \"violence\": 0.9223177433013916,\n \"violence/graphic\": 0.036865197122097015\n },\n \"flagged\": true\n }\n ]\n }\n\ncomponents:\n schemas:\n ListEnginesResponse:\n type: object\n properties:\n object:\n type: string\n data:\n type: array\n items:\n $ref: '#/components/schemas/Engine'\n required:\n - object\n - data\n\n ListModelsResponse:\n type: object\n properties:\n object:\n type: string\n data:\n type: array\n items:\n $ref: '#/components/schemas/Model'\n required:\n - object\n - data\n\n DeleteModelResponse:\n type: object\n properties:\n id:\n type: string\n object:\n type: string\n deleted:\n type: boolean\n required:\n - id\n - object\n - deleted\n\n CreateCompletionRequest:\n type: object\n properties:\n model: &model_configuration\n description: ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n type: string\n prompt:\n description: &completions_prompt_description |\n The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.\n\n Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.\n default: '<|endoftext|>'\n nullable: true\n oneOf:\n - type: string\n default: ''\n example: \"This is a test.\"\n - type: array\n items:\n type: string\n default: ''\n example: \"This is a test.\"\n - type: array\n minItems: 1\n items:\n type: integer\n example: \"[1212, 318, 257, 1332, 13]\"\n - type: array\n minItems: 1\n items:\n type: array\n minItems: 1\n items:\n type: integer\n example: \"[[1212, 318, 257, 1332, 13]]\"\n suffix:\n description:\n The suffix that comes after a completion of inserted text.\n default: null\n nullable: true\n type: string\n example: \"test.\"\n max_tokens:\n type: integer\n minimum: 0\n default: 16\n example: 16\n nullable: true\n description: &completions_max_tokens_description |\n The maximum number of [tokens](/tokenizer) to generate in the completion.\n\n The token count of your prompt plus `max_tokens` cannot exceed the model's context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096).\n temperature:\n type: number\n minimum: 0\n maximum: 2\n default: 1\n example: 1\n nullable: true\n description: &completions_temperature_description |\n What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n\n We generally recommend altering this or `top_p` but not both.\n top_p:\n type: number\n minimum: 0\n maximum: 1\n default: 1\n example: 1\n nullable: true\n description: &completions_top_p_description |\n An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\n We generally recommend altering this or `temperature` but not both.\n n:\n type: integer\n minimum: 1\n maximum: 128\n default: 1\n example: 1\n nullable: true\n description: &completions_completions_description |\n How many completions to generate for each prompt.\n\n **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.\n stream:\n description: >\n Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)\n as they become available, with the stream terminated by a `data: [DONE]` message.\n type: boolean\n nullable: true\n default: false\n logprobs: &completions_logprobs_configuration\n type: integer\n minimum: 0\n maximum: 5\n default: null\n nullable: true\n description: &completions_logprobs_description |\n Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.\n\n The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case.\n echo:\n type: boolean\n default: false\n nullable: true\n description: &completions_echo_description >\n Echo back the prompt in addition to the completion\n stop:\n description: &completions_stop_description >\n Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.\n default: null\n nullable: true\n oneOf:\n - type: string\n default: <|endoftext|>\n example: \"\\n\"\n nullable: true\n - type: array\n minItems: 1\n maxItems: 4\n items:\n type: string\n example: '[\"\\n\"]'\n presence_penalty:\n type: number\n default: 0\n minimum: -2\n maximum: 2\n nullable: true\n description: &completions_presence_penalty_description |\n Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\n\n [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)\n frequency_penalty:\n type: number\n default: 0\n minimum: -2\n maximum: 2\n nullable: true\n description: &completions_frequency_penalty_description |\n Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n\n [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)\n best_of:\n type: integer\n default: 1\n minimum: 0\n maximum: 20\n nullable: true\n description: &completions_best_of_description |\n Generates `best_of` completions server-side and returns the \"best\" (the one with the highest log probability per token). Results cannot be streamed.\n\n When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`.\n\n **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.\n logit_bias: &completions_logit_bias\n type: object\n x-oaiTypeLabel: map\n default: null\n nullable: true\n description: &completions_logit_bias_description |\n Modify the likelihood of specified tokens appearing in the completion.\n\n Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n\n As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.\n user: &end_user_param_configuration\n type: string\n example: user-1234\n description: |\n A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n required:\n - model\n \n CreateCompletionResponse:\n type: object\n properties:\n id:\n type: string\n object:\n type: string\n created:\n type: integer\n model:\n type: string\n choices:\n type: array\n items:\n type: object\n properties:\n text:\n type: string\n index:\n type: integer\n logprobs:\n type: object\n nullable: true\n properties:\n tokens:\n type: array\n items:\n type: string\n token_logprobs:\n type: array\n items:\n type: number\n top_logprobs:\n type: array\n items:\n type: object\n text_offset:\n type: array\n items:\n type: integer\n finish_reason:\n type: string\n usage:\n type: object\n properties:\n prompt_tokens:\n type: integer\n completion_tokens:\n type: integer\n total_tokens:\n type: integer\n required: \n - prompt_tokens\n - completion_tokens\n - total_tokens\n required: \n - id\n - object\n - created\n - model\n - choices\n\n ChatCompletionRequestMessage:\n type: object\n properties:\n role:\n type: string\n enum: [\"system\", \"user\", \"assistant\"]\n description: The role of the author of this message.\n content:\n type: string\n description: The contents of the message\n name:\n type: string\n description: The name of the user in a multi-user chat\n required: \n - role\n - content\n\n ChatCompletionResponseMessage:\n type: object\n properties:\n role:\n type: string\n enum: [\"system\", \"user\", \"assistant\"]\n description: The role of the author of this message.\n content:\n type: string\n description: The contents of the message\n required: \n - role\n - content\n\n CreateChatCompletionRequest:\n type: object\n properties:\n model:\n description: ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported.\n type: string\n messages:\n description: The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction).\n type: array\n minItems: 1\n items:\n $ref: '#/components/schemas/ChatCompletionRequestMessage'\n temperature:\n type: number\n minimum: 0\n maximum: 2\n default: 1\n example: 1\n nullable: true\n description: *completions_temperature_description\n top_p:\n type: number\n minimum: 0\n maximum: 1\n default: 1\n example: 1\n nullable: true\n description: *completions_top_p_description\n n:\n type: integer\n minimum: 1\n maximum: 128\n default: 1\n example: 1\n nullable: true\n description: How many chat completion choices to generate for each input message.\n stream:\n description: >\n If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)\n as they become available, with the stream terminated by a `data: [DONE]` message.\n type: boolean\n nullable: true\n default: false\n stop:\n description: |\n Up to 4 sequences where the API will stop generating further tokens.\n default: null\n oneOf:\n - type: string\n nullable: true\n - type: array\n minItems: 1\n maxItems: 4\n items:\n type: string\n max_tokens:\n description: |\n The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens).\n default: inf\n type: integer\n presence_penalty:\n type: number\n default: 0\n minimum: -2\n maximum: 2\n nullable: true\n description: *completions_presence_penalty_description\n frequency_penalty:\n type: number\n default: 0\n minimum: -2\n maximum: 2\n nullable: true\n description: *completions_frequency_penalty_description\n logit_bias:\n type: object\n x-oaiTypeLabel: map\n default: null\n nullable: true\n description: |\n Modify the likelihood of specified tokens appearing in the completion.\n\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n user: *end_user_param_configuration\n required:\n - model\n - messages\n\n CreateChatCompletionResponse:\n type: object\n properties:\n id:\n type: string\n object:\n type: string\n created:\n type: integer\n model:\n type: string\n choices:\n type: array\n items:\n type: object\n properties:\n index:\n type: integer\n message:\n $ref: '#/components/schemas/ChatCompletionResponseMessage'\n finish_reason:\n type: string\n usage:\n type: object\n properties:\n prompt_tokens:\n type: integer\n completion_tokens:\n type: integer\n total_tokens:\n type: integer\n required: \n - prompt_tokens\n - completion_tokens\n - total_tokens\n required: \n - id\n - object\n - created\n - model\n - choices\n\n CreateEditRequest:\n type: object\n properties:\n model:\n description: ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint.\n type: string\n input:\n description:\n The input text to use as a starting point for the edit.\n type: string\n default: ''\n nullable: true\n example: \"What day of the wek is it?\"\n instruction:\n description:\n The instruction that tells the model how to edit the prompt.\n type: string\n example: \"Fix the spelling mistakes.\"\n n:\n type: integer\n minimum: 1\n maximum: 20\n default: 1\n example: 1\n nullable: true\n description:\n How many edits to generate for the input and instruction.\n temperature:\n type: number\n minimum: 0\n maximum: 2\n default: 1\n example: 1\n nullable: true\n description: *completions_temperature_description\n top_p:\n type: number\n minimum: 0\n maximum: 1\n default: 1\n example: 1\n nullable: true\n description: *completions_top_p_description\n required:\n - model\n - instruction\n\n CreateEditResponse:\n type: object\n properties:\n object:\n type: string\n created:\n type: integer\n choices:\n type: array\n items:\n type: object\n properties:\n text:\n type: string\n index:\n type: integer\n logprobs:\n type: object\n nullable: true\n properties:\n tokens:\n type: array\n items:\n type: string\n token_logprobs:\n type: array\n items:\n type: number\n top_logprobs:\n type: array\n items:\n type: object\n text_offset:\n type: array\n items:\n type: integer\n finish_reason:\n type: string\n usage:\n type: object\n properties:\n prompt_tokens:\n type: integer\n completion_tokens:\n type: integer\n total_tokens:\n type: integer\n required: \n - prompt_tokens\n - completion_tokens\n - total_tokens\n required: \n - object\n - created\n - choices\n - usage\n\n CreateImageRequest:\n type: object\n properties:\n prompt:\n description: A text description of the desired image(s). The maximum length is 1000 characters.\n type: string\n example: \"A cute baby sea otter\"\n n: &images_n\n type: integer\n minimum: 1\n maximum: 10\n default: 1\n example: 1\n nullable: true\n description: The number of images to generate. Must be between 1 and 10.\n size: &images_size\n type: string\n enum: [\"256x256\", \"512x512\", \"1024x1024\"]\n default: \"1024x1024\"\n example: \"1024x1024\"\n nullable: true\n description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.\n response_format: &images_response_format\n type: string\n enum: [\"url\", \"b64_json\"]\n default: \"url\"\n example: \"url\"\n nullable: true\n description: The format in which the generated images are returned. Must be one of `url` or `b64_json`.\n user: *end_user_param_configuration\n required:\n - prompt\n\n ImagesResponse:\n properties:\n created:\n type: integer\n data:\n type: array\n items:\n type: object\n properties:\n url:\n type: string\n b64_json:\n type: string\n required:\n - created\n - data\n\n CreateImageEditRequest:\n type: object\n properties:\n image:\n description: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.\n type: string\n format: binary\n mask:\n description: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.\n type: string\n format: binary\n prompt:\n description: A text description of the desired image(s). The maximum length is 1000 characters.\n type: string\n example: \"A cute baby sea otter wearing a beret\"\n n: *images_n\n size: *images_size\n response_format: *images_response_format\n user: *end_user_param_configuration\n required:\n - prompt\n - image\n\n CreateImageVariationRequest:\n type: object\n properties:\n image:\n description: The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square.\n type: string\n format: binary\n n: *images_n\n size: *images_size\n response_format: *images_response_format\n user: *end_user_param_configuration\n required:\n - image\n\n CreateModerationRequest:\n type: object\n properties:\n input:\n description: The input text to classify\n oneOf:\n - type: string\n default: ''\n example: \"I want to kill them.\"\n - type: array\n items:\n type: string\n default: ''\n example: \"I want to kill them.\"\n model:\n description: |\n Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`.\n\n The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`.\n type: string\n nullable: false\n default: \"text-moderation-latest\"\n example: \"text-moderation-stable\"\n required:\n - input\n\n CreateModerationResponse:\n type: object\n properties:\n id:\n type: string\n model:\n type: string\n results:\n type: array\n items:\n type: object\n properties:\n flagged:\n type: boolean\n categories:\n type: object\n properties:\n hate:\n type: boolean\n hate/threatening:\n type: boolean\n self-harm:\n type: boolean\n sexual:\n type: boolean\n sexual/minors:\n type: boolean\n violence:\n type: boolean\n violence/graphic:\n type: boolean\n required: \n - hate\n - hate/threatening\n - self-harm\n - sexual\n - sexual/minors\n - violence\n - violence/graphic\n category_scores:\n type: object\n properties:\n hate:\n type: number\n hate/threatening:\n type: number\n self-harm:\n type: number\n sexual:\n type: number\n sexual/minors:\n type: number\n violence:\n type: number\n violence/graphic:\n type: number\n required: \n - hate\n - hate/threatening\n - self-harm\n - sexual\n - sexual/minors\n - violence\n - violence/graphic\n required: \n - flagged\n - categories\n - category_scores\n required: \n - id\n - model\n - results\n\n CreateSearchRequest:\n type: object\n properties:\n query:\n description: Query to search against the documents.\n type: string\n example: \"the president\"\n minLength: 1\n documents:\n description: |\n Up to 200 documents to search over, provided as a list of strings.\n\n The maximum document length (in tokens) is 2034 minus the number of tokens in the query.\n\n You should specify either `documents` or a `file`, but not both.\n type: array\n minItems: 1\n maxItems: 200\n items:\n type: string\n nullable: true\n example: \"['White House', 'hospital', 'school']\"\n file:\n description: |\n The ID of an uploaded file that contains documents to search over.\n\n You should specify either `documents` or a `file`, but not both.\n type: string\n nullable: true\n max_rerank:\n description: |\n The maximum number of documents to be re-ranked and returned by search.\n\n This flag only takes effect when `file` is set.\n type: integer\n minimum: 1\n default: 200\n nullable: true\n return_metadata: &return_metadata_configuration\n description: |\n A special boolean flag for showing metadata. If set to `true`, each document entry in the returned JSON will contain a \"metadata\" field.\n\n This flag only takes effect when `file` is set.\n type: boolean\n default: false\n nullable: true\n user: *end_user_param_configuration\n required:\n - query\n\n CreateSearchResponse:\n type: object\n properties:\n object:\n type: string\n model:\n type: string\n data:\n type: array\n items:\n type: object\n properties:\n object:\n type: string\n document:\n type: integer\n score:\n type: number\n\n ListFilesResponse:\n type: object\n properties:\n object:\n type: string\n data:\n type: array\n items:\n $ref: '#/components/schemas/OpenAIFile'\n required: \n - object\n - data\n\n CreateFileRequest:\n type: object\n additionalProperties: false\n properties:\n file:\n description: |\n Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded.\n\n If the `purpose` is set to \"fine-tune\", each line is a JSON record with \"prompt\" and \"completion\" fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data).\n type: string\n format: binary\n purpose:\n description: |\n The intended purpose of the uploaded documents.\n\n Use \"fine-tune\" for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file.\n\n type: string\n required:\n - file\n - purpose\n\n DeleteFileResponse:\n type: object\n properties:\n id:\n type: string\n object:\n type: string\n deleted:\n type: boolean\n required: \n - id\n - object\n - deleted\n\n CreateAnswerRequest:\n type: object\n additionalProperties: false\n properties:\n model:\n description: ID of the model to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`.\n type: string\n question:\n description: Question to get answered.\n type: string\n minLength: 1\n example: \"What is the capital of Japan?\"\n examples:\n description: List of (question, answer) pairs that will help steer the model towards the tone and answer format you'd like. We recommend adding 2 to 3 examples.\n type: array\n minItems: 1\n maxItems: 200\n items:\n type: array\n minItems: 2\n maxItems: 2\n items:\n type: string\n minLength: 1\n example: \"[['What is the capital of Canada?', 'Ottawa'], ['Which province is Ottawa in?', 'Ontario']]\"\n examples_context:\n description: A text snippet containing the contextual information used to generate the answers for the `examples` you provide.\n type: string\n example: \"Ottawa, Canada's capital, is located in the east of southern Ontario, near the city of Montréal and the U.S. border.\"\n documents:\n description: |\n List of documents from which the answer for the input `question` should be derived. If this is an empty list, the question will be answered based on the question-answer examples.\n\n You should specify either `documents` or a `file`, but not both.\n type: array\n maxItems: 200\n items:\n type: string\n example: \"['Japan is an island country in East Asia, located in the northwest Pacific Ocean.', 'Tokyo is the capital and most populous prefecture of Japan.']\"\n nullable: true\n file:\n description: |\n The ID of an uploaded file that contains documents to search over. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose.\n\n You should specify either `documents` or a `file`, but not both.\n type: string\n nullable: true\n search_model: &search_model_configuration\n description: ID of the model to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`.\n type: string\n default: ada\n nullable: true\n max_rerank:\n description: The maximum number of documents to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost.\n type: integer\n default: 200\n nullable: true\n temperature:\n description: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n type: number\n default: 0\n nullable: true\n logprobs: &context_completions_logprobs_configuration\n type: integer\n minimum: 0\n maximum: 5\n default: null\n nullable: true\n description: |\n Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.\n\n The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case.\n\n When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs.\n max_tokens:\n description: The maximum number of tokens allowed for the generated answer\n type: integer\n default: 16\n nullable: true\n stop:\n description: *completions_stop_description\n default: null\n oneOf:\n - type: string\n default: <|endoftext|>\n example: \"\\n\"\n - type: array\n minItems: 1\n maxItems: 4\n items:\n type: string\n example: '[\"\\n\"]'\n nullable: true\n n:\n description: How many answers to generate for each question.\n type: integer\n minimum: 1\n maximum: 10\n default: 1\n nullable: true\n logit_bias: *completions_logit_bias\n return_metadata: *return_metadata_configuration\n return_prompt: &return_prompt_configuration\n description: If set to `true`, the returned JSON will include a \"prompt\" field containing the final prompt that was used to request a completion. This is mainly useful for debugging purposes.\n type: boolean\n default: false\n nullable: true\n expand: &expand_configuration\n description: If an object name is in the list, we provide the full information of the object; otherwise, we only provide the object ID. Currently we support `completion` and `file` objects for expansion.\n type: array\n items: {}\n nullable: true\n default: []\n user: *end_user_param_configuration\n required:\n - model\n - question\n - examples\n - examples_context\n\n CreateAnswerResponse:\n type: object\n properties:\n object:\n type: string\n model:\n type: string\n search_model:\n type: string\n completion:\n type: string\n answers:\n type: array\n items:\n type: string\n selected_documents:\n type: array\n items:\n type: object\n properties:\n document:\n type: integer\n text:\n type: string\n\n CreateClassificationRequest:\n type: object\n additionalProperties: false\n properties:\n model: *model_configuration\n query:\n description: Query to be classified.\n type: string\n minLength: 1\n example: \"The plot is not very attractive.\"\n examples:\n description: |\n A list of examples with labels, in the following format:\n\n `[[\"The movie is so interesting.\", \"Positive\"], [\"It is quite boring.\", \"Negative\"], ...]`\n\n All the label strings will be normalized to be capitalized.\n\n You should specify either `examples` or `file`, but not both.\n type: array\n minItems: 2\n maxItems: 200\n items:\n type: array\n minItems: 2\n maxItems: 2\n items:\n type: string\n minLength: 1\n example: \"[['Do not see this film.', 'Negative'], ['Smart, provocative and blisteringly funny.', 'Positive']]\"\n nullable: true\n file:\n description: |\n The ID of the uploaded file that contains training examples. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose.\n\n You should specify either `examples` or `file`, but not both.\n type: string\n nullable: true\n labels:\n description: The set of categories being classified. If not specified, candidate labels will be automatically collected from the examples you provide. All the label strings will be normalized to be capitalized.\n type: array\n minItems: 2\n maxItems: 200\n default: null\n items:\n type: string\n example: [\"Positive\", \"Negative\"]\n nullable: true\n search_model: *search_model_configuration\n temperature:\n description:\n What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n type: number\n minimum: 0\n maximum: 2\n default: 0\n nullable: true\n example: 0\n logprobs: *context_completions_logprobs_configuration\n max_examples:\n description: The maximum number of examples to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost.\n type: integer\n default: 200\n nullable: true\n logit_bias: *completions_logit_bias\n return_prompt: *return_prompt_configuration\n return_metadata: *return_metadata_configuration\n expand: *expand_configuration\n user: *end_user_param_configuration\n required:\n - model\n - query\n\n CreateClassificationResponse:\n type: object\n properties:\n object:\n type: string\n model:\n type: string\n search_model:\n type: string\n completion:\n type: string\n label:\n type: string\n selected_examples:\n type: array\n items:\n type: object\n properties:\n document:\n type: integer\n text:\n type: string\n label:\n type: string\n\n CreateFineTuneRequest:\n type: object\n properties:\n training_file:\n description: |\n The ID of an uploaded file that contains training data.\n\n See [upload file](/docs/api-reference/files/upload) for how to upload a file.\n\n Your dataset must be formatted as a JSONL file, where each training\n example is a JSON object with the keys \"prompt\" and \"completion\".\n Additionally, you must upload your file with the purpose `fine-tune`.\n\n See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details.\n type: string\n example: \"file-ajSREls59WBbvgSzJSVWxMCB\"\n validation_file:\n description: |\n The ID of an uploaded file that contains validation data.\n\n If you provide this file, the data is used to generate validation\n metrics periodically during fine-tuning. These metrics can be viewed in\n the [fine-tuning results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model).\n Your train and validation data should be mutually exclusive.\n\n Your dataset must be formatted as a JSONL file, where each validation\n example is a JSON object with the keys \"prompt\" and \"completion\".\n Additionally, you must upload your file with the purpose `fine-tune`.\n\n See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details.\n type: string\n nullable: true\n example: \"file-XjSREls59WBbvgSzJSVWxMCa\"\n model:\n description: |\n The name of the base model to fine-tune. You can select one of \"ada\",\n \"babbage\", \"curie\", \"davinci\", or a fine-tuned model created after 2022-04-21.\n To learn more about these models, see the\n [Models](https://platform.openai.com/docs/models) documentation.\n default: \"curie\"\n type: string\n nullable: true\n n_epochs:\n description: |\n The number of epochs to train the model for. An epoch refers to one\n full cycle through the training dataset.\n default: 4\n type: integer\n nullable: true\n batch_size:\n description: |\n The batch size to use for training. The batch size is the number of\n training examples used to train a single forward and backward pass.\n\n By default, the batch size will be dynamically configured to be\n ~0.2% of the number of examples in the training set, capped at 256 -\n in general, we've found that larger batch sizes tend to work better\n for larger datasets.\n default: null\n type: integer\n nullable: true\n learning_rate_multiplier:\n description: |\n The learning rate multiplier to use for training.\n The fine-tuning learning rate is the original learning rate used for\n pretraining multiplied by this value.\n\n By default, the learning rate multiplier is the 0.05, 0.1, or 0.2\n depending on final `batch_size` (larger learning rates tend to\n perform better with larger batch sizes). We recommend experimenting\n with values in the range 0.02 to 0.2 to see what produces the best\n results.\n default: null\n type: number\n nullable: true\n prompt_loss_weight:\n description: |\n The weight to use for loss on the prompt tokens. This controls how\n much the model tries to learn to generate the prompt (as compared\n to the completion which always has a weight of 1.0), and can add\n a stabilizing effect to training when completions are short.\n\n If prompts are extremely long (relative to completions), it may make\n sense to reduce this weight so as to avoid over-prioritizing\n learning the prompt.\n default: 0.01\n type: number\n nullable: true\n compute_classification_metrics:\n description: |\n If set, we calculate classification-specific metrics such as accuracy\n and F-1 score using the validation set at the end of every epoch.\n These metrics can be viewed in the [results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model).\n\n In order to compute classification metrics, you must provide a\n `validation_file`. Additionally, you must\n specify `classification_n_classes` for multiclass classification or\n `classification_positive_class` for binary classification.\n type: boolean\n default: false\n nullable: true\n classification_n_classes:\n description: |\n The number of classes in a classification task.\n\n This parameter is required for multiclass classification.\n type: integer\n default: null\n nullable: true\n classification_positive_class:\n description: |\n The positive class in binary classification.\n\n This parameter is needed to generate precision, recall, and F1\n metrics when doing binary classification.\n type: string\n default: null\n nullable: true\n classification_betas:\n description: |\n If this is provided, we calculate F-beta scores at the specified\n beta values. The F-beta score is a generalization of F-1 score.\n This is only used for binary classification.\n\n With a beta of 1 (i.e. the F-1 score), precision and recall are\n given the same weight. A larger beta score puts more weight on\n recall and less on precision. A smaller beta score puts more weight\n on precision and less on recall.\n type: array\n items:\n type: number\n example: [0.6, 1, 1.5, 2]\n default: null\n nullable: true\n suffix:\n description: |\n A string of up to 40 characters that will be added to your fine-tuned model name.\n\n For example, a `suffix` of \"custom-model-name\" would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`.\n type: string\n minLength: 1\n maxLength: 40\n default: null\n nullable: true\n required:\n - training_file\n\n ListFineTunesResponse:\n type: object\n properties:\n object:\n type: string\n data:\n type: array\n items:\n $ref: '#/components/schemas/FineTune'\n required: \n - object\n - data\n\n ListFineTuneEventsResponse:\n type: object\n properties:\n object:\n type: string\n data:\n type: array\n items:\n $ref: '#/components/schemas/FineTuneEvent'\n required: \n - object\n - data\n\n CreateEmbeddingRequest:\n type: object\n additionalProperties: false\n properties:\n model: *model_configuration\n input:\n description: |\n Input text to get embeddings for, encoded as a string or array of tokens. To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed 8192 tokens in length.\n example: \"The quick brown fox jumped over the lazy dog\"\n oneOf:\n - type: string\n default: ''\n example: \"This is a test.\"\n - type: array\n items:\n type: string\n default: ''\n example: \"This is a test.\"\n - type: array\n minItems: 1\n items:\n type: integer\n example: \"[1212, 318, 257, 1332, 13]\"\n - type: array\n minItems: 1\n items:\n type: array\n minItems: 1\n items:\n type: integer\n example: \"[[1212, 318, 257, 1332, 13]]\"\n user: *end_user_param_configuration\n required:\n - model\n - input\n\n CreateEmbeddingResponse:\n type: object\n properties:\n object:\n type: string\n model:\n type: string\n data:\n type: array\n items:\n type: object\n properties:\n index:\n type: integer\n object:\n type: string\n embedding:\n type: array\n items:\n type: number\n required: \n - index\n - object\n - embedding\n usage:\n type: object\n properties:\n prompt_tokens:\n type: integer\n total_tokens:\n type: integer\n required: \n - prompt_tokens\n - total_tokens\n required: \n - object\n - model\n - data\n - usage\n\n CreateTranscriptionRequest:\n type: object\n additionalProperties: false\n properties:\n file: \n description: |\n The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.\n type: string\n format: binary\n model: \n description: |\n ID of the model to use. Only `whisper-1` is currently available.\n type: string\n prompt:\n description: |\n An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.\n type: string\n response_format:\n description: |\n The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.\n type: string\n default: json\n temperature:\n description: |\n The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.\n type: number\n default: 0\n language:\n description: |\n The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.\n type: string\n required:\n - file\n - model\n\n # Note: This does not currently support the non-default response format types. \n CreateTranscriptionResponse:\n type: object\n properties:\n text:\n type: string\n required: \n - text\n\n CreateTranslationRequest:\n type: object\n additionalProperties: false\n properties:\n file: \n description: |\n The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.\n type: string\n format: binary\n model: \n description: |\n ID of the model to use. Only `whisper-1` is currently available.\n type: string\n prompt:\n description: |\n An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.\n type: string\n response_format:\n description: |\n The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.\n type: string\n default: json\n temperature:\n description: |\n The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.\n type: number\n default: 0\n required:\n - file\n - model\n\n # Note: This does not currently support the non-default response format types. \n CreateTranslationResponse:\n type: object\n properties:\n text:\n type: string\n required: \n - text\n\n Engine:\n title: Engine\n properties:\n id:\n type: string\n object:\n type: string\n created:\n type: integer\n nullable: true\n ready:\n type: boolean\n required: \n - id\n - object\n - created\n - ready\n\n Model:\n title: Model\n properties:\n id:\n type: string\n object:\n type: string\n created:\n type: integer\n owned_by:\n type: string\n required: \n - id\n - object\n - created\n - owned_by\n\n OpenAIFile:\n title: OpenAIFile\n properties:\n id:\n type: string\n object:\n type: string\n bytes:\n type: integer\n created_at:\n type: integer\n filename:\n type: string\n purpose:\n type: string\n status:\n type: string\n status_details:\n type: object\n nullable: true\n required: \n - id\n - object\n - bytes\n - created_at\n - filename\n - purpose\n\n FineTune:\n title: FineTune\n properties:\n id:\n type: string\n object:\n type: string\n created_at:\n type: integer\n updated_at:\n type: integer\n model:\n type: string\n fine_tuned_model:\n type: string\n nullable: true\n organization_id:\n type: string\n status:\n type: string\n hyperparams:\n type: object\n training_files:\n type: array\n items:\n $ref: '#/components/schemas/OpenAIFile'\n validation_files:\n type: array\n items:\n $ref: '#/components/schemas/OpenAIFile'\n result_files:\n type: array\n items:\n $ref: '#/components/schemas/OpenAIFile'\n events:\n type: array\n items:\n $ref: '#/components/schemas/FineTuneEvent'\n required: \n - id\n - object\n - created_at\n - updated_at\n - model\n - fine_tuned_model\n - organization_id\n - status\n - hyperparams\n - training_files\n - validation_files\n - result_files\n\n FineTuneEvent:\n title: FineTuneEvent\n properties:\n object:\n type: string\n created_at:\n type: integer\n level:\n type: string\n message:\n type: string\n required: \n - object\n - created_at\n - level\n - message\n\nx-oaiMeta:\n groups:\n - id: models\n title: Models\n description: |\n List and describe the various models available in the API. You can refer to the [Models](/docs/models) documentation to understand what models are available and the differences between them.\n - id: completions\n title: Completions\n description: |\n Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position.\n - id: chat\n title: Chat\n description: |\n Given a chat conversation, the model will return a chat completion response.\n - id: edits\n title: Edits\n description: |\n Given a prompt and an instruction, the model will return an edited version of the prompt.\n - id: images\n title: Images\n description: |\n Given a prompt and/or an input image, the model will generate a new image.\n\n Related guide: [Image generation](/docs/guides/images)\n - id: embeddings\n title: Embeddings\n description: |\n Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.\n\n Related guide: [Embeddings](/docs/guides/embeddings)\n - id: audio\n title: Audio\n description: |\n Learn how to turn audio into text.\n\n Related guide: [Speech to text](/docs/guides/speech-to-text)\n - id: files\n title: Files\n description: |\n Files are used to upload documents that can be used with features like [Fine-tuning](/docs/api-reference/fine-tunes).\n - id: fine-tunes\n title: Fine-tunes\n description: |\n Manage fine-tuning jobs to tailor a model to your specific training data.\n\n Related guide: [Fine-tune models](/docs/guides/fine-tuning)\n - id: moderations\n title: Moderations\n description: |\n Given a input text, outputs if the model classifies it as violating OpenAI's content policy.\n\n Related guide: [Moderations](/docs/guides/moderation)\n - id: searches\n title: Searches\n warning:\n title: This endpoint is deprecated and will be removed on December 3rd, 2022\n message: We’ve developed new methods with better performance. [Learn more](https://help.openai.com/en/articles/6272952-search-transition-guide).\n description: |\n Given a query and a set of documents or labels, the model ranks each document based on its semantic similarity to the provided query.\n\n Related guide: [Search](/docs/guides/search)\n - id: classifications\n title: Classifications\n warning:\n title: This endpoint is deprecated and will be removed on December 3rd, 2022\n message: We’ve developed new methods with better performance. [Learn more](https://help.openai.com/en/articles/6272941-classifications-transition-guide).\n description: |\n Given a query and a set of labeled examples, the model will predict the most likely label for the query. Useful as a drop-in replacement for any ML classification or text-to-label task.\n\n Related guide: [Classification](/docs/guides/classifications)\n - id: answers\n title: Answers\n warning:\n title: This endpoint is deprecated and will be removed on December 3rd, 2022\n message: We’ve developed new methods with better performance. [Learn more](https://help.openai.com/en/articles/6233728-answers-transition-guide).\n description: |\n Given a question, a set of documents, and some examples, the API generates an answer to the question based on the information in the set of documents. This is useful for question-answering applications on sources of truth, like company documentation or a knowledge base.\n\n Related guide: [Question answering](/docs/guides/answers)\n - id: engines\n title: Engines\n description: These endpoints describe and provide access to the various engines available in the API.\n warning:\n title: The Engines endpoints are deprecated.\n message: Please use their replacement, [Models](/docs/api-reference/models), instead. [Learn more](https://help.openai.com/TODO).\n", 7 | "url": "https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml", 8 | "output": null, 9 | "newLineBehavior": "Auto" 10 | } 11 | }, 12 | "codeGenerators": { 13 | "openApiToCSharpClient": { 14 | "clientBaseClass": null, 15 | "configurationClass": null, 16 | "generateClientClasses": true, 17 | "generateClientInterfaces": false, 18 | "clientBaseInterface": null, 19 | "injectHttpClient": true, 20 | "disposeHttpClient": true, 21 | "protectedMethods": [], 22 | "generateExceptionClasses": true, 23 | "exceptionClass": "OpenAiApiException", 24 | "wrapDtoExceptions": true, 25 | "useHttpClientCreationMethod": false, 26 | "httpClientType": "System.Net.Http.HttpClient", 27 | "useHttpRequestMessageCreationMethod": false, 28 | "useBaseUrl": true, 29 | "generateBaseUrlProperty": true, 30 | "generateSyncMethods": true, 31 | "generatePrepareRequestAndProcessResponseAsAsyncMethods": false, 32 | "exposeJsonSerializerSettings": false, 33 | "clientClassAccessModifier": "public", 34 | "typeAccessModifier": "public", 35 | "generateContractsOutput": false, 36 | "contractsNamespace": null, 37 | "contractsOutputFilePath": null, 38 | "parameterDateTimeFormat": "s", 39 | "parameterDateFormat": "yyyy-MM-dd", 40 | "generateUpdateJsonSerializerSettingsMethod": true, 41 | "useRequestAndResponseSerializationSettings": false, 42 | "serializeTypeInformation": false, 43 | "queryNullValue": "", 44 | "className": "{controller}Client", 45 | "operationGenerationMode": "MultipleClientsFromOperationId", 46 | "additionalNamespaceUsages": [], 47 | "additionalContractNamespaceUsages": [], 48 | "generateOptionalParameters": true, 49 | "generateJsonMethods": false, 50 | "enforceFlagEnums": false, 51 | "parameterArrayType": "System.Collections.Generic.IEnumerable", 52 | "parameterDictionaryType": "System.Collections.Generic.IDictionary", 53 | "responseArrayType": "System.Collections.Generic.ICollection", 54 | "responseDictionaryType": "System.Collections.Generic.IDictionary", 55 | "wrapResponses": false, 56 | "wrapResponseMethods": [], 57 | "generateResponseClasses": true, 58 | "responseClass": "SwaggerResponse", 59 | "namespace": "OpenAI", 60 | "requiredPropertiesMustBeDefined": false, 61 | "dateType": "System.DateTimeOffset", 62 | "jsonConverters": null, 63 | "anyType": "object", 64 | "dateTimeType": "System.DateTimeOffset", 65 | "timeType": "System.TimeSpan", 66 | "timeSpanType": "System.TimeSpan", 67 | "arrayType": "System.Collections.Generic.ICollection", 68 | "arrayInstanceType": "System.Collections.ObjectModel.Collection", 69 | "dictionaryType": "System.Collections.Generic.IDictionary", 70 | "dictionaryInstanceType": "System.Collections.Generic.Dictionary", 71 | "arrayBaseType": "System.Collections.ObjectModel.Collection", 72 | "dictionaryBaseType": "System.Collections.Generic.Dictionary", 73 | "classStyle": "Record", 74 | "jsonLibrary": "SystemTextJson", 75 | "generateDefaultValues": true, 76 | "generateDataAnnotations": false, 77 | "excludedTypeNames": [], 78 | "excludedParameterNames": [], 79 | "handleReferences": false, 80 | "generateImmutableArrayProperties": false, 81 | "generateImmutableDictionaryProperties": false, 82 | "jsonSerializerSettingsTransformationMethod": null, 83 | "inlineNamedArrays": false, 84 | "inlineNamedDictionaries": false, 85 | "inlineNamedTuples": true, 86 | "inlineNamedAny": false, 87 | "generateDtoTypes": true, 88 | "generateOptionalPropertiesAsNullable": false, 89 | "generateNullableReferenceTypes": false, 90 | "templateDirectory": null, 91 | "typeNameGeneratorType": null, 92 | "propertyNameGeneratorType": null, 93 | "enumNameGeneratorType": null, 94 | "serviceHost": null, 95 | "serviceSchemes": null, 96 | "output": "OpenAI.cs", 97 | "newLineBehavior": "Auto" 98 | } 99 | } 100 | } --------------------------------------------------------------------------------