├── OpenAI_API
├── nuget_logo.png
├── Usage.cs
├── Audio
│ ├── AudioResult.cs
│ ├── AudioRequest.cs
│ ├── TextToSpeechRequest.cs
│ ├── ITextToSpeechEndpoint.cs
│ ├── TextToSpeechEndpoint.cs
│ └── ITranscriptionEndpoint.cs
├── Images
│ ├── IImageGenerationEndpoint.cs
│ ├── ImageResult.cs
│ ├── ImageResponseFormat.cs
│ ├── ImageGenerationEndpoint.cs
│ ├── ImageSize.cs
│ └── ImageGenerationRequest.cs
├── Moderation
│ ├── IModerationEndpoint.cs
│ ├── ModerationEndpoint.cs
│ ├── ModerationRequest.cs
│ └── ModerationResult.cs
├── Model
│ ├── IModelsEndpoint.cs
│ └── ModelsEndpoint.cs
├── Embedding
│ ├── EmbeddingResult.cs
│ ├── IEmbeddingEndpoint.cs
│ ├── EmbeddingRequest.cs
│ └── EmbeddingEndpoint.cs
├── Files
│ ├── File.cs
│ ├── IFilesEndpoint.cs
│ └── FilesEndpoint.cs
├── ApiResultBase.cs
├── OpenAI_API.csproj
├── Completions
│ ├── CompletionResult.cs
│ └── CompletionRequest.cs
├── Chat
│ ├── ChatResult.cs
│ ├── ChatMessageRole.cs
│ ├── ChatRequest.cs
│ ├── ChatMessage.cs
│ └── IChatEndpoint.cs
├── IOpenAIAPI.cs
├── OpenAIAPI.cs
└── APIAuthentication.cs
├── OpenAI_Tests
├── chinese-test.m4a
├── english-test.m4a
├── ChatMessageRoleTests.cs
├── OpenAI_Tests.csproj
├── HttpClientResolutionTests.cs
├── FilesEndpointTests.cs
├── TextToSpeechTests.cs
├── ModelEndpointTests.cs
├── EmbeddingEndpointTests.cs
├── AuthTests.cs
├── ModerationEndpointTests.cs
├── TranscriptionTests.cs
├── fine-tuning-data.jsonl
├── ImageGenerationEndpointTests.cs
└── ChatVisionTests.cs
├── LICENSE.md
├── OpenAI_API.sln
├── .gitattributes
└── .gitignore
/OpenAI_API/nuget_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OkGoDoIt/OpenAI-API-dotnet/HEAD/OpenAI_API/nuget_logo.png
--------------------------------------------------------------------------------
/OpenAI_Tests/chinese-test.m4a:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OkGoDoIt/OpenAI-API-dotnet/HEAD/OpenAI_Tests/chinese-test.m4a
--------------------------------------------------------------------------------
/OpenAI_Tests/english-test.m4a:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OkGoDoIt/OpenAI-API-dotnet/HEAD/OpenAI_Tests/english-test.m4a
--------------------------------------------------------------------------------
/OpenAI_Tests/ChatMessageRoleTests.cs:
--------------------------------------------------------------------------------
1 | using NUnit.Framework;
2 | using OpenAI_API.Chat;
3 |
4 | namespace OpenAI_Tests
5 | {
6 | public class ChatMessageRoleTests
7 | {
8 | [Test]
9 | public void TestImplicitConversionNotThrowing()
10 | {
11 | // ReSharper disable once UnusedVariable
12 | string result = ChatMessageRole.System;
13 | }
14 |
15 | [Test]
16 | public void TestImplicitConversionValue()
17 | {
18 | string result = ChatMessageRole.System;
19 | Assert.AreEqual("system", result);
20 | }
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/OpenAI_API/Usage.cs:
--------------------------------------------------------------------------------
1 | using Newtonsoft.Json;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Text;
5 |
6 | namespace OpenAI_API
7 | {
8 | ///
9 | /// Usage statistics of how many tokens have been used for this request.
10 | ///
11 | public class Usage
12 | {
13 | ///
14 | /// How many tokens did the prompt consist of
15 | ///
16 | [JsonProperty("prompt_tokens")]
17 | public int PromptTokens { get; set; }
18 |
19 | ///
20 | /// How many tokens did the request consume total
21 | ///
22 | [JsonProperty("total_tokens")]
23 | public int TotalTokens { get; set; }
24 |
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | [](http://creativecommons.org/publicdomain/zero/1.0/)
2 |
3 | *To the extent possible under law, Roger Pincombe and all other contributors have waived all copyright and related or neighboring rights to OpenAI-API-dotnet. This work is published from: United States.*
4 |
5 | This library is licensed CC-0, in the public domain. You can use it for whatever you want, publicly or privately, without worrying about permission or licensing or whatever. It's just a wrapper around the OpenAI API, so you still need to get access to OpenAI from them directly. I am not affiliated with OpenAI and this library is not endorsed by them, I just have beta access and wanted to make a C# library to access it more easily. Hopefully others find this useful as well. Feel free to open a PR if there's anything you want to contribute.
6 |
--------------------------------------------------------------------------------
/OpenAI_API/Audio/AudioResult.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Text;
4 |
5 | namespace OpenAI_API.Audio
6 | {
7 | ///
8 | /// Represents a verbose_json output from the OpenAI Transcribe or Translate endpoints.
9 | ///
10 | public class AudioResultVerbose : ApiResultBase
11 | {
12 | public double duration { get; set; }
13 | public string language { get; set; }
14 | public List segments { get; set; }
15 | public string task { get; set; }
16 | public string text { get; set; }
17 |
18 | public class Segment
19 | {
20 | public double avg_logprob { get; set; }
21 | public double compression_ratio { get; set; }
22 | public double end { get; set; }
23 | public int id { get; set; }
24 | public double no_speech_prob { get; set; }
25 | public int seek { get; set; }
26 | public double start { get; set; }
27 | public double temperature { get; set; }
28 | public string text { get; set; }
29 | public List tokens { get; set; }
30 | }
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/OpenAI_Tests/OpenAI_Tests.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | netcoreapp3.1
5 |
6 | false
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 | PreserveNewest
24 |
25 |
26 | PreserveNewest
27 |
28 |
29 | PreserveNewest
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/OpenAI_API/Images/IImageGenerationEndpoint.cs:
--------------------------------------------------------------------------------
1 | using System.Threading.Tasks;
2 | using OpenAI_API.Models;
3 |
4 | namespace OpenAI_API.Images
5 | {
6 | ///
7 | /// An interface for . Given a prompt, the model will generate a new image.
8 | ///
9 | public interface IImageGenerationEndpoint
10 | {
11 | ///
12 | /// Ask the API to Creates an image given a prompt.
13 | ///
14 | /// Request to be send
15 | /// Asynchronously returns the image result. Look in its
16 | Task CreateImageAsync(ImageGenerationRequest request);
17 |
18 | ///
19 | /// Ask the API to Creates an image given a prompt.
20 | ///
21 | /// A text description of the desired image(s)
22 | /// The model to use for generating the image. Defaults to .
23 | /// Asynchronously returns the image result. Look in its
24 | Task CreateImageAsync(string input, Model model = null);
25 | }
26 | }
--------------------------------------------------------------------------------
/OpenAI_API/Moderation/IModerationEndpoint.cs:
--------------------------------------------------------------------------------
1 | using OpenAI_API.Models;
2 | using System.Threading.Tasks;
3 |
4 | namespace OpenAI_API.Moderation
5 | {
6 | ///
7 | /// An interface for , which classifies text against the OpenAI Content Policy
8 | ///
9 | public interface IModerationEndpoint
10 | {
11 | ///
12 | /// This allows you to send request to the recommended model without needing to specify. OpenAI recommends using the model
13 | ///
14 | ModerationRequest DefaultModerationRequestArgs { get; set; }
15 |
16 | ///
17 | /// Ask the API to classify the text using a custom request.
18 | ///
19 | /// Request to send to the API
20 | /// Asynchronously returns the classification result
21 | Task CallModerationAsync(ModerationRequest request);
22 |
23 | ///
24 | /// Ask the API to classify the text using the default model.
25 | ///
26 | /// Text to classify
27 | /// Asynchronously returns the classification result
28 | Task CallModerationAsync(string input);
29 | }
30 | }
--------------------------------------------------------------------------------
/OpenAI_API/Images/ImageResult.cs:
--------------------------------------------------------------------------------
1 | using Newtonsoft.Json;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Text;
5 |
6 | namespace OpenAI_API.Images
7 | {
8 | ///
9 | /// Represents an image result returned by the Image API.
10 | ///
11 | public class ImageResult : ApiResultBase
12 | {
13 | ///
14 | /// List of results of the embedding
15 | ///
16 | [JsonProperty("data")]
17 | public List Data { get; set; }
18 |
19 | ///
20 | /// Gets the url or base64-encoded image data of the first result, or null if there are no results
21 | ///
22 | ///
23 | public override string ToString()
24 | {
25 | if (Data?.Count > 0)
26 | {
27 | return Data[0].Url ?? Data[0].Base64Data;
28 | }
29 | else
30 | {
31 | return null;
32 | }
33 | }
34 | }
35 |
36 | ///
37 | /// Data returned from the Image API.
38 | ///
39 | public class Data
40 | {
41 | ///
42 | /// The url of the image result
43 | ///
44 | [JsonProperty("url")]
45 |
46 | public string Url { get; set; }
47 |
48 | ///
49 | /// The base64-encoded image data as returned by the API
50 | ///
51 | [JsonProperty("b64_json")]
52 | public string Base64Data { get; set; }
53 |
54 | ///
55 | /// The prompt that was used to generate the image, if there was any revision to the prompt.
56 | ///
57 | public string RevisedPrompt { get; set; }
58 |
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/OpenAI_API/Model/IModelsEndpoint.cs:
--------------------------------------------------------------------------------
1 | using System.Collections.Generic;
2 | using System.Threading.Tasks;
3 |
4 | namespace OpenAI_API.Models
5 | {
6 | ///
7 | /// An interface for , for ease of mock testing, etc
8 | ///
9 | public interface IModelsEndpoint
10 | {
11 | ///
12 | /// Get details about a particular Model from the API, specifically properties such as and permissions.
13 | ///
14 | /// The id/name of the model to get more details about
15 | /// Asynchronously returns the with all available properties
16 | Task RetrieveModelDetailsAsync(string id);
17 |
18 | ///
19 | /// Get details about a particular Model from the API, specifically properties such as and permissions.
20 | ///
21 | /// The id/name of the model to get more details about
22 | /// Obsolete: IGNORED
23 | /// Asynchronously returns the with all available properties
24 | Task RetrieveModelDetailsAsync(string id, APIAuthentication auth = null);
25 |
26 | ///
27 | /// List all models via the API
28 | ///
29 | /// Asynchronously returns the list of all s
30 | Task> GetModelsAsync();
31 | }
32 | }
--------------------------------------------------------------------------------
/OpenAI_API/Embedding/EmbeddingResult.cs:
--------------------------------------------------------------------------------
1 | using Newtonsoft.Json;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 |
5 | namespace OpenAI_API.Embedding
6 | {
7 | ///
8 | /// Represents an embedding result returned by the Embedding API.
9 | ///
10 | public class EmbeddingResult : ApiResultBase
11 | {
12 | ///
13 | /// List of results of the embedding
14 | ///
15 | [JsonProperty("data")]
16 | public List Data { get; set; }
17 |
18 | ///
19 | /// Usage statistics of how many tokens have been used for this request
20 | ///
21 | [JsonProperty("usage")]
22 | public Usage Usage { get; set; }
23 |
24 | ///
25 | /// Allows an EmbeddingResult to be implicitly cast to the array of floats repsresenting the first ebmedding result
26 | ///
27 | /// The to cast to an array of floats.
28 | public static implicit operator float[](EmbeddingResult embeddingResult)
29 | {
30 | return embeddingResult.Data.FirstOrDefault()?.Embedding;
31 | }
32 | }
33 |
34 | ///
35 | /// Data returned from the Embedding API.
36 | ///
37 | public class Data
38 | {
39 | ///
40 | /// Type of the response. In case of Data, this will be "embedding"
41 | ///
42 | [JsonProperty("object")]
43 |
44 | public string Object { get; set; }
45 |
46 | ///
47 | /// The input text represented as a vector (list) of floating point numbers
48 | ///
49 | [JsonProperty("embedding")]
50 | public float[] Embedding { get; set; }
51 |
52 | ///
53 | /// Index
54 | ///
55 | [JsonProperty("index")]
56 | public int Index { get; set; }
57 |
58 | }
59 |
60 | }
61 |
--------------------------------------------------------------------------------
/OpenAI_API/Files/File.cs:
--------------------------------------------------------------------------------
1 | using Newtonsoft.Json;
2 |
3 | namespace OpenAI_API.Files
4 | {
5 | ///
6 | /// Represents a single file used with the OpenAI Files endpoint. Files are used to upload and manage documents that can be used with features like Fine-tuning.
7 | ///
8 | public class File : ApiResultBase
9 | {
10 | ///
11 | /// Unique id for this file, so that it can be referenced in other operations
12 | ///
13 | [JsonProperty("id")]
14 | public string Id { get; set; }
15 |
16 | ///
17 | /// The name of the file
18 | ///
19 | [JsonProperty("filename")]
20 | public string Name { get; set; }
21 |
22 | ///
23 | /// What is the purpose of this file, fine-tune, search, etc
24 | ///
25 | [JsonProperty("purpose")]
26 | public string Purpose { get; set; }
27 |
28 | ///
29 | /// The size of the file in bytes
30 | ///
31 | [JsonProperty("bytes")]
32 | public long Bytes { get; set; }
33 |
34 | ///
35 | /// Timestamp for the creation time of this file
36 | ///
37 | [JsonProperty("created_at")]
38 | public long CreatedAt { get; set; }
39 |
40 | ///
41 | /// When the object is deleted, this attribute is used in the Delete file operation
42 | ///
43 | [JsonProperty("deleted")]
44 | public bool Deleted { get; set; }
45 |
46 | ///
47 | /// The status of the File (ie when an upload operation was done: "uploaded")
48 | ///
49 | [JsonProperty("status")]
50 | public string Status { get; set; }
51 |
52 | ///
53 | /// The status details, it could be null
54 | ///
55 | [JsonProperty("status_details")]
56 | public string StatusDetails { get; set; }
57 |
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/OpenAI_API/Images/ImageResponseFormat.cs:
--------------------------------------------------------------------------------
1 | using Newtonsoft.Json;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Text;
5 |
6 | namespace OpenAI_API.Images
7 | {
8 | ///
9 | /// Represents available response formats for image generation endpoints
10 | ///
11 | public class ImageResponseFormat
12 | {
13 | private ImageResponseFormat(string value) { Value = value; }
14 |
15 | private string Value { get; set; }
16 |
17 | ///
18 | /// Returns a URL to the image on a server
19 | ///
20 | public static ImageResponseFormat Url { get { return new ImageResponseFormat("url"); } }
21 | ///
22 | /// Gets the image base as a base64-encoded JSON string
23 | ///
24 | public static ImageResponseFormat B64_json { get { return new ImageResponseFormat("b64_json"); } }
25 |
26 |
27 | ///
28 | /// Gets the string value for this response format to pass to the API
29 | ///
30 | /// The response format as a string
31 | public override string ToString()
32 | {
33 | return Value;
34 | }
35 |
36 | ///
37 | /// Gets the string value for this response format to pass to the API
38 | ///
39 | /// The ImageResponseFormat to convert
40 | public static implicit operator String(ImageResponseFormat value) { return value; }
41 |
42 | internal class ImageResponseJsonConverter : JsonConverter
43 | {
44 | public override ImageResponseFormat ReadJson(JsonReader reader, Type objectType, ImageResponseFormat existingValue, bool hasExistingValue, JsonSerializer serializer)
45 | {
46 | return new ImageResponseFormat(reader.ReadAsString());
47 | }
48 |
49 | public override void WriteJson(JsonWriter writer, ImageResponseFormat value, JsonSerializer serializer)
50 | {
51 | writer.WriteValue(value.ToString());
52 | }
53 | }
54 | }
55 |
56 | }
57 |
--------------------------------------------------------------------------------
/OpenAI_API.sln:
--------------------------------------------------------------------------------
1 |
2 | Microsoft Visual Studio Solution File, Format Version 12.00
3 | # Visual Studio Version 17
4 | VisualStudioVersion = 17.2.32616.157
5 | MinimumVisualStudioVersion = 10.0.40219.1
6 | Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "OpenAI_API", "OpenAI_API\OpenAI_API.csproj", "{99C80D3E-3F0F-4ACC-900D-7AAE6230A780}"
7 | EndProject
8 | Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "OpenAI_Tests", "OpenAI_Tests\OpenAI_Tests.csproj", "{066EC5A5-47CE-4B91-B924-F236644037C1}"
9 | EndProject
10 | Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{1B37C1A8-1122-49FD-A3C1-C8F697714C1B}"
11 | ProjectSection(SolutionItems) = preProject
12 | LICENSE.md = LICENSE.md
13 | README.md = README.md
14 | EndProjectSection
15 | EndProject
16 | Global
17 | GlobalSection(SolutionConfigurationPlatforms) = preSolution
18 | Debug|Any CPU = Debug|Any CPU
19 | Release|Any CPU = Release|Any CPU
20 | EndGlobalSection
21 | GlobalSection(ProjectConfigurationPlatforms) = postSolution
22 | {99C80D3E-3F0F-4ACC-900D-7AAE6230A780}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
23 | {99C80D3E-3F0F-4ACC-900D-7AAE6230A780}.Debug|Any CPU.Build.0 = Debug|Any CPU
24 | {99C80D3E-3F0F-4ACC-900D-7AAE6230A780}.Release|Any CPU.ActiveCfg = Release|Any CPU
25 | {99C80D3E-3F0F-4ACC-900D-7AAE6230A780}.Release|Any CPU.Build.0 = Release|Any CPU
26 | {066EC5A5-47CE-4B91-B924-F236644037C1}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
27 | {066EC5A5-47CE-4B91-B924-F236644037C1}.Debug|Any CPU.Build.0 = Debug|Any CPU
28 | {066EC5A5-47CE-4B91-B924-F236644037C1}.Release|Any CPU.ActiveCfg = Release|Any CPU
29 | {066EC5A5-47CE-4B91-B924-F236644037C1}.Release|Any CPU.Build.0 = Release|Any CPU
30 | EndGlobalSection
31 | GlobalSection(SolutionProperties) = preSolution
32 | HideSolutionNode = FALSE
33 | EndGlobalSection
34 | GlobalSection(ExtensibilityGlobals) = postSolution
35 | SolutionGuid = {971477B1-6BBA-40CD-8B76-AEBC01D99130}
36 | EndGlobalSection
37 | EndGlobal
38 |
--------------------------------------------------------------------------------
/OpenAI_API/Images/ImageGenerationEndpoint.cs:
--------------------------------------------------------------------------------
1 | using OpenAI_API.Models;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Text;
5 | using System.Threading.Tasks;
6 |
7 | namespace OpenAI_API.Images
8 | {
9 | ///
10 | /// Given a prompt, the model will generate a new image.
11 | ///
12 | public class ImageGenerationEndpoint : EndpointBase, IImageGenerationEndpoint
13 | {
14 | ///
15 | /// The name of the endpoint, which is the final path segment in the API URL. For example, "image".
16 | ///
17 | protected override string Endpoint { get { return "images/generations"; } }
18 |
19 | ///
20 | /// Constructor of the api endpoint. Rather than instantiating this yourself, access it through an instance of as .
21 | ///
22 | ///
23 | internal ImageGenerationEndpoint(OpenAIAPI api) : base(api) { }
24 |
25 | ///
26 | /// Ask the API to Creates an image given a prompt.
27 | ///
28 | /// A text description of the desired image(s)
29 | /// The model to use for generating the image. Defaults to .
30 | /// Asynchronously returns the image result. Look in its
31 | public async Task CreateImageAsync(string input, Model model = null)
32 | {
33 | ImageGenerationRequest req = new ImageGenerationRequest(prompt: input, model: model);
34 | return await CreateImageAsync(req);
35 | }
36 |
37 | ///
38 | /// Ask the API to Creates an image given a prompt.
39 | ///
40 | /// Request to be send
41 | /// Asynchronously returns the image result. Look in its
42 | public async Task CreateImageAsync(ImageGenerationRequest request)
43 | {
44 | return await HttpPost(postData: request);
45 | }
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/OpenAI_API/Files/IFilesEndpoint.cs:
--------------------------------------------------------------------------------
1 | using System.Collections.Generic;
2 | using System.Threading.Tasks;
3 | using System.Net.Http;
4 |
5 | namespace OpenAI_API.Files
6 | {
7 | ///
8 | /// An interface for , for ease of mock testing, etc
9 | ///
10 | public interface IFilesEndpoint
11 | {
12 | ///
13 | /// Get the list of all files
14 | ///
15 | ///
16 | ///
17 | Task> GetFilesAsync();
18 |
19 | ///
20 | /// Returns information about a specific file
21 | ///
22 | /// The ID of the file to use for this request
23 | ///
24 | Task GetFileAsync(string fileId);
25 |
26 | ///
27 | /// Returns the contents of the specific file as string
28 | ///
29 | /// The ID of the file to use for this request
30 | ///
31 | Task GetFileContentAsStringAsync(string fileId);
32 |
33 | ///
34 | /// Delete a file
35 | ///
36 | /// The ID of the file to use for this request
37 | ///
38 | Task DeleteFileAsync(string fileId);
39 |
40 | ///
41 | /// Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact OpenAI if you need to increase the storage limit
42 | ///
43 | /// The name of the file to use for this request
44 | /// The intendend purpose of the uploaded documents. Use "fine-tune" for Fine-tuning. This allows us to validate the format of the uploaded file.
45 | Task UploadFileAsync(string filePath, string purpose = "fine-tune");
46 | }
47 | }
--------------------------------------------------------------------------------
/OpenAI_API/ApiResultBase.cs:
--------------------------------------------------------------------------------
1 | using Newtonsoft.Json;
2 | using OpenAI_API.Models;
3 | using System;
4 |
5 | namespace OpenAI_API
6 | {
7 | ///
8 | /// Represents a result from calling the OpenAI API, with all the common metadata returned from every endpoint
9 | ///
10 | abstract public class ApiResultBase
11 | {
12 |
13 | /// The time when the result was generated
14 | [JsonIgnore]
15 | public DateTime? Created => CreatedUnixTime.HasValue ? (DateTime?)(DateTimeOffset.FromUnixTimeSeconds(CreatedUnixTime.Value).DateTime) : null;
16 |
17 | ///
18 | /// The time when the result was generated in unix epoch format
19 | ///
20 | [JsonProperty("created")]
21 | public long? CreatedUnixTime { get; set; }
22 |
23 | ///
24 | /// Which model was used to generate this result.
25 | ///
26 | [JsonProperty("model")]
27 | public Model Model { get; set; }
28 |
29 | ///
30 | /// Object type, ie: text_completion, file, fine-tune, list, etc
31 | ///
32 | [JsonProperty("object")]
33 | public string Object { get; set; }
34 |
35 | ///
36 | /// The organization associated with the API request, as reported by the API.
37 | ///
38 | [JsonIgnore]
39 | public string Organization { get; internal set; }
40 |
41 | ///
42 | /// The server-side processing time as reported by the API. This can be useful for debugging where a delay occurs.
43 | ///
44 | [JsonIgnore]
45 | public TimeSpan ProcessingTime { get; internal set; }
46 |
47 | ///
48 | /// The request id of this API call, as reported in the response headers. This may be useful for troubleshooting or when contacting OpenAI support in reference to a specific request.
49 | ///
50 | [JsonIgnore]
51 | public string RequestId { get; internal set; }
52 |
53 | ///
54 | /// The Openai-Version used to generate this response, as reported in the response headers. This may be useful for troubleshooting or when contacting OpenAI support in reference to a specific request.
55 | ///
56 | [JsonIgnore]
57 | public string OpenaiVersion { get; internal set; }
58 | }
59 | }
--------------------------------------------------------------------------------
/OpenAI_API/Moderation/ModerationEndpoint.cs:
--------------------------------------------------------------------------------
1 | using OpenAI_API.Models;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Text;
5 | using System.Threading.Tasks;
6 |
7 | namespace OpenAI_API.Moderation
8 | {
9 | ///
10 | /// This endpoint classifies text against the OpenAI Content Policy
11 | ///
12 | public class ModerationEndpoint : EndpointBase, IModerationEndpoint
13 | {
14 | ///
15 | /// This allows you to send request to the recommended model without needing to specify. OpenAI recommends using the model
16 | ///
17 | public ModerationRequest DefaultModerationRequestArgs { get; set; } = new ModerationRequest() { Model = Model.TextModerationLatest };
18 |
19 | ///
20 | /// The name of the endpoint, which is the final path segment in the API URL. For example, "completions".
21 | ///
22 | protected override string Endpoint { get { return "moderations"; } }
23 |
24 | ///
25 | /// Constructor of the api endpoint. Rather than instantiating this yourself, access it through an instance of as .
26 | ///
27 | ///
28 | internal ModerationEndpoint(OpenAIAPI api) : base(api) { }
29 |
30 | ///
31 | /// Ask the API to classify the text using the default model.
32 | ///
33 | /// Text to classify
34 | /// Asynchronously returns the classification result
35 | public async Task CallModerationAsync(string input)
36 | {
37 | ModerationRequest req = new ModerationRequest(input, DefaultModerationRequestArgs.Model);
38 | return await CallModerationAsync(req);
39 | }
40 |
41 | ///
42 | /// Ask the API to classify the text using a custom request.
43 | ///
44 | /// Request to send to the API
45 | /// Asynchronously returns the classification result
46 | public async Task CallModerationAsync(ModerationRequest request)
47 | {
48 | return await HttpPost(postData: request);
49 | }
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/OpenAI_API/Embedding/IEmbeddingEndpoint.cs:
--------------------------------------------------------------------------------
1 | using OpenAI_API.Models;
2 | using System.Threading.Tasks;
3 |
4 | namespace OpenAI_API.Embedding
5 | {
6 | ///
7 | /// An interface for , for ease of mock testing, etc
8 | ///
9 | public interface IEmbeddingEndpoint
10 | {
11 | ///
12 | /// This allows you to send request to a default model without needing to specify for each request
13 | ///
14 | EmbeddingRequest DefaultEmbeddingRequestArgs { get; set; }
15 |
16 | ///
17 | /// Ask the API to embed text using the default embedding model
18 | ///
19 | /// Text to be embedded
20 | /// Asynchronously returns the embedding result. Look in its property of to find the vector of floating point numbers
21 | Task CreateEmbeddingAsync(string input);
22 |
23 | ///
24 | /// Ask the API to embed text using a custom request
25 | ///
26 | /// Request to be send
27 | /// Asynchronously returns the embedding result. Look in its property of to find the vector of floating point numbers
28 | Task CreateEmbeddingAsync(EmbeddingRequest request);
29 |
30 | ///
31 | /// Ask the API to embed text
32 | ///
33 | /// Text to be embedded
34 | /// The model to use. You can use to see all of your available models, or use a standard model like .
35 | /// The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models.
36 | /// Asynchronously returns the first embedding result as an array of floats.
37 | Task GetEmbeddingsAsync(string input, Model model = null, int? dimensions = null);
38 | }
39 | }
--------------------------------------------------------------------------------
/OpenAI_Tests/HttpClientResolutionTests.cs:
--------------------------------------------------------------------------------
1 | using Microsoft.Extensions.Options;
2 | using Moq;
3 | using NUnit.Framework;
4 | using OpenAI_API;
5 | using System;
6 | using System.Linq;
7 | using System.Net.Http;
8 |
9 | namespace OpenAI_Tests
10 | {
11 | public class HttpClientResolutionTests
12 | {
13 | [Test]
14 | public void GetHttpClient_NoFactory()
15 | {
16 | var api = new OpenAIAPI(new APIAuthentication("fake-key"));
17 | var endpoint = new TestEndpoint(api);
18 |
19 | var client = endpoint.GetHttpClient();
20 | Assert.IsNotNull(client);
21 | }
22 |
23 | [Test]
24 | public void GetHttpClient_WithFactory()
25 | {
26 | var expectedClient1 = new HttpClient();
27 | var mockedFactory1 = Mock.Of(f => f.CreateClient(Options.DefaultName) == expectedClient1);
28 |
29 | var expectedClient2 = new HttpClient();
30 | var mockedFactory2 = Mock.Of(f => f.CreateClient(Options.DefaultName) == expectedClient2);
31 |
32 | var api = new OpenAIAPI(new APIAuthentication("fake-key"));
33 | var endpoint = new TestEndpoint(api);
34 |
35 | api.HttpClientFactory = mockedFactory1;
36 | var actualClient1 = endpoint.GetHttpClient();
37 |
38 | api.HttpClientFactory = mockedFactory2;
39 | var actualClient2 = endpoint.GetHttpClient();
40 |
41 | Assert.AreSame(expectedClient1, actualClient1);
42 | Assert.AreSame(expectedClient2, actualClient2);
43 |
44 | api.HttpClientFactory = null;
45 | var actualClient3 = endpoint.GetHttpClient();
46 |
47 | Assert.NotNull(actualClient3);
48 | Assert.AreNotSame(expectedClient1, actualClient3);
49 | Assert.AreNotSame(expectedClient2, actualClient3);
50 | }
51 |
52 | private class TestEndpoint : EndpointBase
53 | {
54 | public TestEndpoint(OpenAIAPI api) : base(api)
55 | {
56 | }
57 |
58 | protected override string Endpoint => throw new System.NotSupportedException();
59 |
60 | public HttpClient GetHttpClient()
61 | {
62 | return base.GetClient();
63 | }
64 | }
65 | }
66 | }
67 |
--------------------------------------------------------------------------------
/OpenAI_API/Embedding/EmbeddingRequest.cs:
--------------------------------------------------------------------------------
1 | using Newtonsoft.Json;
2 | using OpenAI_API.Models;
3 |
4 | namespace OpenAI_API.Embedding
5 | {
6 | ///
7 | /// Represents a request to the Completions API. Matches with the docs at the OpenAI docs
8 | ///
9 | public class EmbeddingRequest
10 | {
11 | ///
12 | /// ID of the model to use. You can use to see all of your available models, or use a standard model like .
13 | ///
14 | [JsonProperty("model")]
15 | public string Model { get; set; }
16 |
17 | ///
18 | /// Main text to be embedded
19 | ///
20 | [JsonProperty("input")]
21 | public string Input { get; set; }
22 |
23 | ///
24 | /// The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models.
25 | ///
26 | [JsonProperty("dimensions", NullValueHandling =NullValueHandling.Ignore)]
27 | public int? Dimensions { get; set; }
28 |
29 | ///
30 | /// Cretes a new, empty
31 | ///
32 | public EmbeddingRequest()
33 | {
34 |
35 | }
36 |
37 | ///
38 | /// Creates a new with the specified parameters
39 | ///
40 | /// The model to use. You can use to see all of your available models, or use a standard model like .
41 | /// The prompt to transform
42 | /// The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models.
43 | public EmbeddingRequest(Model model, string input, int? dimensions = null)
44 | {
45 | this.Model = model;
46 | this.Input = input;
47 | this.Dimensions = dimensions;
48 | }
49 |
50 | ///
51 | /// Creates a new with the specified input and the model.
52 | ///
53 | /// The prompt to transform
54 | public EmbeddingRequest(string input)
55 | {
56 | this.Model = OpenAI_API.Models.Model.DefaultEmbeddingModel;
57 | this.Input = input;
58 | }
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/OpenAI_Tests/FilesEndpointTests.cs:
--------------------------------------------------------------------------------
1 | using NUnit.Framework;
2 | using System;
3 | using System.Threading;
4 | using System.Threading.Tasks;
5 |
6 | namespace OpenAI_Tests
7 | {
8 | public class FilesEndpointTests
9 | {
10 | [SetUp]
11 | public void Setup()
12 | {
13 | OpenAI_API.APIAuthentication.Default = new OpenAI_API.APIAuthentication(Environment.GetEnvironmentVariable("TEST_OPENAI_SECRET_KEY"));
14 | }
15 |
16 | [Test]
17 | [Order(1)]
18 | public async Task UploadFile()
19 | {
20 | var api = new OpenAI_API.OpenAIAPI();
21 | var response = await api.Files.UploadFileAsync("fine-tuning-data.jsonl");
22 | Assert.IsNotNull(response);
23 | Assert.IsTrue(response.Id.Length > 0);
24 | Assert.IsTrue(response.Object == "file");
25 | Assert.IsTrue(response.Bytes > 0);
26 | Assert.IsTrue(response.CreatedAt > 0);
27 | Assert.IsTrue(response.Status == "uploaded");
28 | // The file must be processed before it can be used in other operations, so for testing purposes we just sleep awhile.
29 | Thread.Sleep(10000);
30 | }
31 |
32 | [Test]
33 | [Order(2)]
34 | public async Task ListFiles()
35 | {
36 | var api = new OpenAI_API.OpenAIAPI();
37 | var response = await api.Files.GetFilesAsync();
38 |
39 | foreach (var file in response)
40 | {
41 | Assert.IsNotNull(file);
42 | Assert.IsTrue(file.Id.Length > 0);
43 | }
44 | }
45 |
46 |
47 | [Test]
48 | [Order(3)]
49 | public async Task GetFile()
50 | {
51 | var api = new OpenAI_API.OpenAIAPI();
52 | var response = await api.Files.GetFilesAsync();
53 | foreach (var file in response)
54 | {
55 | Assert.IsNotNull(file);
56 | Assert.IsTrue(file.Id.Length > 0);
57 | string id = file.Id;
58 | if (file.Name == "fine-tuning-data.jsonl")
59 | {
60 | var fileResponse = await api.Files.GetFileAsync(file.Id);
61 | Assert.IsNotNull(fileResponse);
62 | Assert.IsTrue(fileResponse.Id == id);
63 | }
64 | }
65 | }
66 |
67 | [Test]
68 | [Order(4)]
69 | public async Task DeleteFiles()
70 | {
71 | var api = new OpenAI_API.OpenAIAPI();
72 | var response = await api.Files.GetFilesAsync();
73 | foreach (var file in response)
74 | {
75 | Assert.IsNotNull(file);
76 | Assert.IsTrue(file.Id.Length > 0);
77 | if (file.Name == "fine-tuning-data.jsonl")
78 | {
79 | var deleteResponse = await api.Files.DeleteFileAsync(file.Id);
80 | Assert.IsNotNull(deleteResponse);
81 | Assert.IsTrue(deleteResponse.Deleted);
82 | }
83 | }
84 | }
85 |
86 | }
87 | }
88 |
--------------------------------------------------------------------------------
/OpenAI_API/OpenAI_API.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | netstandard2.0
5 | 8.0
6 | true
7 | OkGoDoIt (Roger Pincombe)
8 | OpenAI API
9 | A simple C# / .NET library to use with OpenAI's APIs, including GPT 3.5, GPT 4, ChatGPT, DALL-E, Whisper, etc. Independently developed, this is not an official library and I am not affiliated with OpenAI. An OpenAI or Azure OpenAI account is required.
10 | This library is licensed CC-0, in the public domain
11 | CC0-1.0
12 | https://github.com/OkGoDoIt/OpenAI-API-dotnet
13 | https://github.com/OkGoDoIt/OpenAI-API-dotnet
14 | OpenAI, AI, ML, API, ChatGPT, DALLE, GPT3, GPT-3, GPT4, GPT-4, DALL-E, Whisper, TTS
15 | OpenAI API
16 | Adds new embedding models as of March 2024
17 | OpenAI
18 | 1.11
19 | 1.11.0.0
20 | 1.11.0.0
21 | True
22 | README.md
23 | True
24 | git
25 |
26 |
27 | true
28 |
29 | true
30 | snupkg
31 |
32 | true
33 |
34 |
35 |
36 |
37 | nuget_logo.png
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 | True
47 | \
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | # Set default behavior to automatically normalize line endings.
3 | ###############################################################################
4 | * text=auto
5 |
6 | ###############################################################################
7 | # Set default behavior for command prompt diff.
8 | #
9 | # This is need for earlier builds of msysgit that does not have it on by
10 | # default for csharp files.
11 | # Note: This is only used by command line
12 | ###############################################################################
13 | #*.cs diff=csharp
14 |
15 | ###############################################################################
16 | # Set the merge driver for project and solution files
17 | #
18 | # Merging from the command prompt will add diff markers to the files if there
19 | # are conflicts (Merging from VS is not affected by the settings below, in VS
20 | # the diff markers are never inserted). Diff markers may cause the following
21 | # file extensions to fail to load in VS. An alternative would be to treat
22 | # these files as binary and thus will always conflict and require user
23 | # intervention with every merge. To do so, just uncomment the entries below
24 | ###############################################################################
25 | #*.sln merge=binary
26 | #*.csproj merge=binary
27 | #*.vbproj merge=binary
28 | #*.vcxproj merge=binary
29 | #*.vcproj merge=binary
30 | #*.dbproj merge=binary
31 | #*.fsproj merge=binary
32 | #*.lsproj merge=binary
33 | #*.wixproj merge=binary
34 | #*.modelproj merge=binary
35 | #*.sqlproj merge=binary
36 | #*.wwaproj merge=binary
37 |
38 | ###############################################################################
39 | # behavior for image files
40 | #
41 | # image files are treated as binary by default.
42 | ###############################################################################
43 | #*.jpg binary
44 | #*.png binary
45 | #*.gif binary
46 |
47 | ###############################################################################
48 | # diff behavior for common document formats
49 | #
50 | # Convert binary document formats to text before diffing them. This feature
51 | # is only available from the command line. Turn it on by uncommenting the
52 | # entries below.
53 | ###############################################################################
54 | #*.doc diff=astextplain
55 | #*.DOC diff=astextplain
56 | #*.docx diff=astextplain
57 | #*.DOCX diff=astextplain
58 | #*.dot diff=astextplain
59 | #*.DOT diff=astextplain
60 | #*.pdf diff=astextplain
61 | #*.PDF diff=astextplain
62 | #*.rtf diff=astextplain
63 | #*.RTF diff=astextplain
64 |
--------------------------------------------------------------------------------
/OpenAI_Tests/TextToSpeechTests.cs:
--------------------------------------------------------------------------------
1 | using Newtonsoft.Json;
2 | using NUnit.Framework;
3 | using OpenAI_API.Audio;
4 | using OpenAI_API.Chat;
5 | using OpenAI_API.Completions;
6 | using OpenAI_API.Models;
7 | using OpenAI_API.Moderation;
8 | using System;
9 | using System.Collections.Generic;
10 | using System.Diagnostics;
11 | using System.IO;
12 | using System.Linq;
13 | using System.Threading;
14 | using System.Threading.Tasks;
15 | using static OpenAI_API.Audio.TextToSpeechRequest;
16 | using static OpenAI_API.Chat.ChatMessage;
17 |
18 | namespace OpenAI_Tests
19 | {
20 | public class TextToSpeechTests
21 | {
22 | [SetUp]
23 | public void Setup()
24 | {
25 | OpenAI_API.APIAuthentication.Default = new OpenAI_API.APIAuthentication(Environment.GetEnvironmentVariable("TEST_OPENAI_SECRET_KEY"));
26 | }
27 |
28 | [TestCase("alloy", false, null)]
29 | [TestCase("echo", true, null)]
30 | [TestCase("fable", false, 1)]
31 | [TestCase("onyx", true, 1.25)]
32 | [TestCase("nova", false, 0.5)]
33 | public async Task SimpleTTSStreamTest(string voice, bool hd, double? speed)
34 | {
35 | var api = new OpenAI_API.OpenAIAPI();
36 | using (Stream result = await api.TextToSpeech.GetSpeechAsStreamAsync("Hello, brave new world! This is a test.", voice, speed, TextToSpeechRequest.ResponseFormats.FLAC, hd ? Model.TTS_HD : null))
37 | {
38 | Assert.IsNotNull(result);
39 | using (StreamReader reader = new StreamReader(result))
40 | {
41 | Assert.Greater(result.Length, 10000);
42 | string asString = await reader.ReadToEndAsync();
43 | Assert.AreEqual("fLaC", asString.Substring(0, 4));
44 | }
45 | }
46 | }
47 |
48 | [Test]
49 | public async Task SimpleTTSFileTest()
50 | {
51 | string tempPath = Path.GetTempFileName();
52 |
53 | var api = new OpenAI_API.OpenAIAPI();
54 | var result = await api.TextToSpeech.SaveSpeechToFileAsync("Hello, brave new world! This is a test.", tempPath, responseFormat: TextToSpeechRequest.ResponseFormats.FLAC);
55 | Assert.IsNotNull(result);
56 | Assert.Greater(result.Length, 10000);
57 | string asString = File.ReadAllText(tempPath);
58 | Assert.AreEqual("fLaC", asString.Substring(0, 4));
59 | }
60 |
61 | [TestCase(null)]
62 | [TestCase("mp3")]
63 | [TestCase("opus")]
64 | [TestCase("aac")]
65 | public async Task ManualTTSStreamTest(string format)
66 | {
67 | var api = new OpenAI_API.OpenAIAPI();
68 |
69 | var request = new TextToSpeechRequest()
70 | {
71 | Input = "Hello, brave new world! This is a test.",
72 | ResponseFormat = format,
73 | };
74 | using (var result = await api.TextToSpeech.GetSpeechAsStreamAsync(request))
75 | {
76 | Assert.IsNotNull(result);
77 | Assert.Greater(result.Length, 10000);
78 | }
79 | }
80 | }
81 | }
82 |
--------------------------------------------------------------------------------
/OpenAI_API/Model/ModelsEndpoint.cs:
--------------------------------------------------------------------------------
1 | using Newtonsoft.Json;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Threading.Tasks;
5 |
6 | namespace OpenAI_API.Models
7 | {
8 | ///
9 | /// The API endpoint for querying available models
10 | ///
11 | public class ModelsEndpoint : EndpointBase, IModelsEndpoint
12 | {
13 | ///
14 | /// The name of the endpoint, which is the final path segment in the API URL. For example, "models".
15 | ///
16 | protected override string Endpoint { get { return "models"; } }
17 |
18 | ///
19 | /// Constructor of the api endpoint. Rather than instantiating this yourself, access it through an instance of as .
20 | ///
21 | ///
22 | internal ModelsEndpoint(OpenAIAPI api) : base(api) { }
23 |
24 | ///
25 | /// Get details about a particular Model from the API, specifically properties such as and permissions.
26 | ///
27 | /// The id/name of the model to get more details about
28 | /// Asynchronously returns the with all available properties
29 | public async Task RetrieveModelDetailsAsync(string id)
30 | {
31 | string resultAsString = await HttpGetContent($"{Url}/{id}");
32 | var model = JsonConvert.DeserializeObject(resultAsString);
33 | return model;
34 | }
35 |
36 | ///
37 | /// List all models via the API
38 | ///
39 | /// Asynchronously returns the list of all s
40 | public async Task> GetModelsAsync()
41 | {
42 | return (await HttpGet()).data;
43 | }
44 |
45 | ///
46 | /// Get details about a particular Model from the API, specifically properties such as and permissions.
47 | ///
48 | /// The id/name of the model to get more details about
49 | /// Obsolete: IGNORED
50 | /// Asynchronously returns the with all available properties
51 | [Obsolete("Use the overload without the APIAuthentication parameter instead, as custom auth is no longer used.", false)]
52 | public async Task RetrieveModelDetailsAsync(string id, APIAuthentication auth = null)
53 | {
54 | return await this.RetrieveModelDetailsAsync(id);
55 | }
56 |
57 | ///
58 | /// A helper class to deserialize the JSON API responses. This should not be used directly.
59 | ///
60 | private class JsonHelperRoot : ApiResultBase
61 | {
62 | [JsonProperty("data")]
63 | public List data { get; set; }
64 | [JsonProperty("object")]
65 | public string obj { get; set; }
66 |
67 | }
68 | }
69 | }
70 |
--------------------------------------------------------------------------------
/OpenAI_Tests/ModelEndpointTests.cs:
--------------------------------------------------------------------------------
1 | using FluentAssertions;
2 | using NUnit.Framework;
3 | using OpenAI_API;
4 | using OpenAI_API.Models;
5 | using System;
6 | using System.Linq;
7 | using System.Security.Authentication;
8 | using System.Threading.Tasks;
9 |
10 | namespace OpenAI_Tests
11 | {
12 | public class ModelEndpointTests
13 | {
14 | [SetUp]
15 | public void Setup()
16 | {
17 | OpenAI_API.APIAuthentication.Default = new OpenAI_API.APIAuthentication(Environment.GetEnvironmentVariable("TEST_OPENAI_SECRET_KEY"));
18 | }
19 |
20 | [Test]
21 | public void GetAllModels()
22 | {
23 | var api = new OpenAI_API.OpenAIAPI();
24 |
25 | Assert.IsNotNull(api.Models);
26 |
27 | var results = api.Models.GetModelsAsync().Result;
28 | Assert.IsNotNull(results);
29 | Assert.NotZero(results.Count);
30 | Assert.That(results.Any(c => c.ModelID.ToLower().StartsWith("text-davinci")));
31 | }
32 |
33 | [Test]
34 | public void GetModelDetails()
35 | {
36 | var api = new OpenAI_API.OpenAIAPI();
37 |
38 | Assert.IsNotNull(api.Models);
39 |
40 | var result = api.Models.RetrieveModelDetailsAsync(Model.DefaultModel).Result;
41 | Assert.IsNotNull(result);
42 |
43 | Assert.NotNull(result.CreatedUnixTime);
44 | Assert.NotZero(result.CreatedUnixTime.Value);
45 | Assert.NotNull(result.Created);
46 | Assert.Greater(result.Created.Value, new DateTime(2018, 1, 1));
47 | Assert.Less(result.Created.Value, DateTime.Now.AddDays(1));
48 |
49 | Assert.IsNotNull(result.ModelID);
50 | Assert.IsNotNull(result.OwnedBy);
51 | Assert.AreEqual(Model.DefaultModel.ModelID.ToLower(), result.ModelID.ToLower());
52 | }
53 |
54 |
55 | [Test]
56 | public async Task GetEnginesAsync_ShouldReturnTheEngineList()
57 | {
58 | var api = new OpenAI_API.OpenAIAPI();
59 | var models = await api.Models.GetModelsAsync();
60 | models.Count.Should().BeGreaterOrEqualTo(5, "most engines should be returned");
61 | }
62 |
63 | [Test]
64 | public void GetEnginesAsync_ShouldFailIfInvalidAuthIsProvided()
65 | {
66 | var api = new OpenAIAPI(new APIAuthentication(Guid.NewGuid().ToString()));
67 | Func act = () => api.Models.GetModelsAsync();
68 | act.Should()
69 | .ThrowAsync()
70 | .Where(exc => exc.Message.Contains("Incorrect API key provided"));
71 | }
72 |
73 | [TestCase("ada")]
74 | [TestCase("babbage")]
75 | [TestCase("curie")]
76 | [TestCase("davinci")]
77 | public async Task RetrieveEngineDetailsAsync_ShouldRetrieveEngineDetails(string modelId)
78 | {
79 | var api = new OpenAI_API.OpenAIAPI();
80 | var modelData = await api.Models.RetrieveModelDetailsAsync(modelId);
81 | modelData?.ModelID?.Should()?.Be(modelId);
82 | modelData.Created.Should().BeAfter(new DateTime(2018, 1, 1), "the model has a created date no earlier than 2018");
83 | modelData.Created.Should().BeBefore(DateTime.Now.AddDays(1), "the model has a created date before today");
84 | }
85 | }
86 | }
--------------------------------------------------------------------------------
/OpenAI_API/Audio/AudioRequest.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Text;
4 | using Newtonsoft.Json;
5 | using static OpenAI_API.Audio.TextToSpeechRequest;
6 |
7 | namespace OpenAI_API.Audio
8 | {
9 | ///
10 | /// Parameters for requests made by the .
11 | ///
12 | public class AudioRequest
13 | {
14 | ///
15 | /// The model to use for this request. Currently only is supported.
16 | ///
17 | [JsonProperty("model")]
18 | public string Model { get; set; } = OpenAI_API.Models.Model.DefaultTranscriptionModel;
19 |
20 | ///
21 | /// An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language for transcriptions, or English for translations.
22 | ///
23 | [JsonProperty("prompt", DefaultValueHandling = DefaultValueHandling.Ignore)]
24 | public string Prompt { get; set; } = null;
25 |
26 | ///
27 | /// The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
28 | ///
29 | [JsonProperty("language", DefaultValueHandling = DefaultValueHandling.Ignore)]
30 | public string Language { get; set; } = null;
31 |
32 | ///
33 | /// The format of the transcript output, should be one of the options in . See
34 | ///
35 | [JsonProperty("response_format", DefaultValueHandling = DefaultValueHandling.Ignore)]
36 | public string ResponseFormat { get; set; } = null;
37 |
38 | ///
39 | /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
40 | ///
41 | [JsonProperty("temperature", DefaultValueHandling = DefaultValueHandling.Ignore)]
42 | public double Temperature { get; set; } = 0;
43 |
44 |
45 | ///
46 | /// The format of the transcript output. See
47 | ///
48 | public static class ResponseFormats
49 | {
50 | #pragma warning disable CS1591 // Missing XML comment for publicly visible type or member
51 | public const string JSON = "json";
52 | public const string Text = "text";
53 | public const string SRT = "srt";
54 | public const string VerboseJson = "verbose_json";
55 | public const string VTT = "vtt";
56 | #pragma warning restore CS1591 // Missing XML comment for publicly visible type or member
57 | }
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/OpenAI_API/Embedding/EmbeddingEndpoint.cs:
--------------------------------------------------------------------------------
1 | using OpenAI_API.Models;
2 | using System.Threading.Tasks;
3 |
4 | namespace OpenAI_API.Embedding
5 | {
6 | ///
7 | /// OpenAI’s text embeddings measure the relatedness of text strings by generating an embedding, which is a vector (list) of floating point numbers. The distance between two vectors measures their relatedness. Small distances suggest high relatedness and large distances suggest low relatedness.
8 | ///
9 | public class EmbeddingEndpoint : EndpointBase, IEmbeddingEndpoint
10 | {
11 | ///
12 | /// This allows you to send request to a default model without needing to specify for each request.
13 | ///
14 | public EmbeddingRequest DefaultEmbeddingRequestArgs { get; set; } = new EmbeddingRequest() { Model = Model.DefaultEmbeddingModel };
15 |
16 | ///
17 | /// The name of the endpoint, which is the final path segment in the API URL. For example, "embeddings".
18 | ///
19 | protected override string Endpoint { get { return "embeddings"; } }
20 |
21 | ///
22 | /// Constructor of the api endpoint. Rather than instantiating this yourself, access it through an instance of as .
23 | ///
24 | ///
25 | internal EmbeddingEndpoint(OpenAIAPI api) : base(api) { }
26 |
27 | ///
28 | /// Ask the API to embed text using the default embedding model
29 | ///
30 | /// Text to be embedded
31 | /// Asynchronously returns the embedding result. Look in its property of to find the vector of floating point numbers
32 | public async Task CreateEmbeddingAsync(string input)
33 | {
34 | EmbeddingRequest req = new EmbeddingRequest(DefaultEmbeddingRequestArgs.Model, input);
35 | return await CreateEmbeddingAsync(req);
36 | }
37 |
38 | ///
39 | /// Ask the API to embed text using a custom request
40 | ///
41 | /// Request to be send
42 | /// Asynchronously returns the embedding result. Look in its property of to find the vector of floating point numbers
43 | public async Task CreateEmbeddingAsync(EmbeddingRequest request)
44 | {
45 | return await HttpPost(postData: request);
46 | }
47 |
48 | ///
49 | public async Task GetEmbeddingsAsync(string input)
50 | {
51 | EmbeddingRequest req = new EmbeddingRequest(DefaultEmbeddingRequestArgs.Model, input);
52 | var embeddingResult = await CreateEmbeddingAsync(req);
53 | return embeddingResult?.Data?[0]?.Embedding;
54 | }
55 |
56 | ///
57 | public async Task GetEmbeddingsAsync(string input, Model model=null, int? dimensions = null)
58 | {
59 | EmbeddingRequest req = new EmbeddingRequest(model ?? Model.DefaultEmbeddingModel, input, dimensions);
60 | var embeddingResult = await CreateEmbeddingAsync(req);
61 | return embeddingResult?.Data?[0]?.Embedding;
62 | }
63 | }
64 | }
65 |
--------------------------------------------------------------------------------
/OpenAI_API/Moderation/ModerationRequest.cs:
--------------------------------------------------------------------------------
1 | using Newtonsoft.Json;
2 | using OpenAI_API.Models;
3 | using System;
4 | using System.Collections.Generic;
5 | using System.Linq;
6 | using System.Reflection;
7 | using System.Text;
8 |
9 | namespace OpenAI_API.Moderation
10 | {
11 | ///
12 | /// Represents a request to the Moderations API.
13 | ///
14 | public class ModerationRequest
15 | {
16 |
17 | ///
18 | /// Which Moderation model to use for this request. Two content moderations models are available: and . The default is which will be automatically upgraded over time.This ensures you are always using our most accurate model.If you use , we will provide advanced notice before updating the model. Accuracy of may be slightly lower than for .
19 | ///
20 | [JsonProperty("model")]
21 | public string Model { get; set; }
22 |
23 | ///
24 | /// The input text to classify
25 | ///
26 | [JsonIgnore]
27 | public string Input
28 | {
29 | get
30 | {
31 | if (Inputs == null)
32 | return null;
33 | else
34 | return Inputs.FirstOrDefault();
35 | }
36 | set
37 | {
38 | Inputs = new string[] { value };
39 | }
40 | }
41 |
42 | ///
43 | /// An array of inputs to classify
44 | ///
45 | [JsonProperty("input")]
46 | public string[] Inputs { get; set; }
47 |
48 | ///
49 | /// Cretes a new, empty
50 | ///
51 | public ModerationRequest()
52 | {
53 |
54 | }
55 |
56 | ///
57 | /// Creates a new with the specified parameters
58 | ///
59 | /// The prompt to classify
60 | /// The model to use. You can use to see all of your available models, or use a standard model like .
61 | public ModerationRequest(string input, Model model)
62 | {
63 | Model = model;
64 | this.Input = input;
65 | }
66 |
67 | ///
68 | /// Creates a new with the specified parameters
69 | ///
70 | /// An array of prompts to classify
71 | /// The model to use. You can use to see all of your available models, or use a standard model like .
72 | public ModerationRequest(string[] inputs, Model model)
73 | {
74 | Model = model;
75 | this.Inputs = inputs;
76 | }
77 |
78 | ///
79 | /// Creates a new with the specified input(s) and the model.
80 | ///
81 | /// One or more prompts to classify
82 | public ModerationRequest(params string[] input)
83 | {
84 | Model = OpenAI_API.Models.Model.TextModerationLatest;
85 | this.Inputs = input;
86 | }
87 | }
88 | }
89 |
--------------------------------------------------------------------------------
/OpenAI_API/Audio/TextToSpeechRequest.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Text;
4 | using Newtonsoft.Json;
5 |
6 | namespace OpenAI_API.Audio
7 | {
8 | ///
9 | /// A request to the .
10 | ///
11 | public class TextToSpeechRequest
12 | {
13 | ///
14 | /// The model to use for this request
15 | ///
16 | [JsonProperty("model")]
17 | public string Model { get; set; } = OpenAI_API.Models.Model.DefaultTTSModel;
18 |
19 | ///
20 | /// The text to generate audio for. The maximum length is 4096 characters.
21 | ///
22 | [JsonProperty("input")]
23 | public string Input { get; set; }
24 |
25 | ///
26 | /// The voice to use when generating the audio. Supported voices can be found in .
27 | ///
28 | [JsonProperty("voice")]
29 | public string Voice { get; set; } = Voices.Alloy;
30 |
31 | ///
32 | /// The default response format is "mp3", but other formats are available in . See
33 | ///
34 | [JsonProperty("response_format", DefaultValueHandling=DefaultValueHandling.Ignore)]
35 | public string ResponseFormat { get; set; } = null;
36 |
37 | ///
38 | /// The speed of the generated audio. Select a value from 0.25 to 4.0. 1.0 is the default.
39 | ///
40 | [JsonProperty("speed", DefaultValueHandling = DefaultValueHandling.Ignore)]
41 | public double? Speed { get; set; } = null;
42 |
43 | ///
44 | /// Supported voices are alloy, echo, fable, onyx, nova, and shimmer. Previews of the voices are available in the Text to speech guide. See .
45 | ///
46 | public static class Voices
47 | {
48 | #pragma warning disable CS1591 // Missing XML comment for publicly visible type or member
49 | public const string Alloy = "alloy";
50 | public const string Echo = "echo";
51 | public const string Fable = "fable";
52 | public const string Onyx = "onyx";
53 | public const string Nova = "nova";
54 | public const string Shimmer = "shimmer";
55 | #pragma warning restore CS1591 // Missing XML comment for publicly visible type or member
56 | }
57 |
58 | ///
59 | /// The format to return for the generated audio. See
60 | ///
61 | public static class ResponseFormats
62 | {
63 | ///
64 | /// The default, industry-standard audio format
65 | ///
66 | public const string MP3 = "mp3";
67 | ///
68 | /// For lossless audio compression, favored by audio enthusiasts for archiving
69 | ///
70 | public const string FLAC = "flac";
71 | ///
72 | /// For digital audio compression, preferred by YouTube, Android, iOS
73 | ///
74 | public const string AAC = "aac";
75 | ///
76 | /// For internet streaming and communication, low latency.
77 | ///
78 | public const string OPUS = "opus";
79 | }
80 | }
81 | }
82 |
--------------------------------------------------------------------------------
/OpenAI_API/Completions/CompletionResult.cs:
--------------------------------------------------------------------------------
1 | using Newtonsoft.Json;
2 | using OpenAI_API.Embedding;
3 | using System.Collections.Generic;
4 |
5 | namespace OpenAI_API.Completions
6 | {
7 | ///
8 | /// Represents a completion choice returned by the Completion API.
9 | ///
10 | public class Choice
11 | {
12 | ///
13 | /// The main text of the completion
14 | ///
15 | [JsonProperty("text")]
16 | public string Text { get; set; }
17 |
18 | ///
19 | /// If multiple completion choices we returned, this is the index withing the various choices
20 | ///
21 | [JsonProperty("index")]
22 | public int Index { get; set; }
23 |
24 | ///
25 | /// If the request specified , this contains the list of the most likely tokens.
26 | ///
27 | [JsonProperty("logprobs")]
28 | public Logprobs Logprobs { get; set; }
29 |
30 | ///
31 | /// If this is the last segment of the completion result, this specifies why the completion has ended.
32 | ///
33 | [JsonProperty("finish_reason")]
34 | public string FinishReason { get; set; }
35 |
36 | ///
37 | /// Gets the main text of this completion
38 | ///
39 | public override string ToString()
40 | {
41 | return Text;
42 | }
43 | }
44 |
45 | ///
46 | /// API usage as reported by the OpenAI API for this request
47 | ///
48 | public class CompletionUsage : Usage
49 | {
50 | ///
51 | /// How many tokens are in the completion(s)
52 | ///
53 | [JsonProperty("completion_tokens")]
54 | public short CompletionTokens { get; set; }
55 | }
56 |
57 | ///
58 | /// Represents a result from calling the Completion API
59 | ///
60 | public class CompletionResult : ApiResultBase
61 | {
62 | ///
63 | /// The identifier of the result, which may be used during troubleshooting
64 | ///
65 | [JsonProperty("id")]
66 | public string Id { get; set; }
67 |
68 | ///
69 | /// The completions returned by the API. Depending on your request, there may be 1 or many choices.
70 | ///
71 | [JsonProperty("choices")]
72 | public List Completions { get; set; }
73 |
74 | ///
75 | /// API token usage as reported by the OpenAI API for this request
76 | ///
77 | [JsonProperty("usage")]
78 | public CompletionUsage Usage { get; set; }
79 |
80 | ///
81 | /// Gets the text of the first completion, representing the main result
82 | ///
83 | public override string ToString()
84 | {
85 | if (Completions != null && Completions.Count > 0)
86 | return Completions[0].ToString();
87 | else
88 | return $"CompletionResult {Id} has no valid output";
89 | }
90 | }
91 |
92 |
93 | public class Logprobs
94 | {
95 | [JsonProperty("tokens")]
96 | public List Tokens { get; set; }
97 |
98 | [JsonProperty("token_logprobs")]
99 | public List TokenLogprobs { get; set; }
100 |
101 | [JsonProperty("top_logprobs")]
102 | public IList> TopLogprobs { get; set; }
103 |
104 | [JsonProperty("text_offset")]
105 | public List TextOffsets { get; set; }
106 | }
107 |
108 | }
109 |
--------------------------------------------------------------------------------
/OpenAI_API/Images/ImageSize.cs:
--------------------------------------------------------------------------------
1 | using Newtonsoft.Json;
2 | using OpenAI_API.Models;
3 | using System;
4 | using System.Collections.Generic;
5 | using System.Text;
6 |
7 | namespace OpenAI_API.Images
8 | {
9 | ///
10 | /// Represents available sizes for image generation endpoints
11 | ///
12 | public class ImageSize
13 | {
14 | internal ImageSize(string value) { Value = value; }
15 |
16 | private string Value { get; set; }
17 |
18 | ///
19 | /// Only for DALL-E 2. Requests an image that is 256x256
20 | ///
21 | public static ImageSize _256 { get { return new ImageSize("256x256"); } }
22 | ///
23 | /// Only for DALL-E 2. Requests an image that is 512x512
24 | ///
25 | public static ImageSize _512 { get { return new ImageSize("512x512"); } }
26 | ///
27 | /// Works with both DALL-E 2 and 3. Requests and image that is 1024x1024.
28 | ///
29 | public static ImageSize _1024 { get { return new ImageSize("1024x1024"); } }
30 |
31 | ///
32 | /// Only for DALL-E 3. Requests a tall image that is 1024x1792.
33 | ///
34 | public static ImageSize _1024x1792 { get { return new ImageSize("1024x1792"); } }
35 |
36 | ///
37 | /// Only for DALL-E 3. Requests a wide image that is 1792x1024.
38 | ///
39 | public static ImageSize _1792x1024 { get { return new ImageSize("1792x1024"); } }
40 |
41 | ///
42 | /// Gets the string value for this size to pass to the API
43 | ///
44 | /// The size as a string
45 | public override string ToString()
46 | {
47 | return Value;
48 | }
49 |
50 | ///
51 | /// Returns true is the string value of the sizes match
52 | ///
53 | /// The other object to compare to
54 | /// True is the sizes are the same
55 | public override bool Equals(object obj)
56 | {
57 | if (obj is null)
58 | return false;
59 | else if (obj is ImageSize)
60 | return this.Value.Equals(((ImageSize)obj).Value);
61 | else if (obj is string)
62 | return this.Value.Equals((string)obj);
63 | else
64 | return false;
65 | }
66 |
67 | ///
68 | public override int GetHashCode()
69 | {
70 | return Value.GetHashCode();
71 | }
72 |
73 | public static bool operator ==(ImageSize a, ImageSize b)
74 | {
75 | return a.Equals(b);
76 | }
77 | public static bool operator !=(ImageSize a, ImageSize b)
78 | {
79 | return !a.Equals(b);
80 | }
81 |
82 | ///
83 | /// Gets the string value for this size to pass to the API
84 | ///
85 | /// The ImageSize to convert
86 | public static implicit operator String(ImageSize value) { return value; }
87 |
88 | internal class ImageSizeJsonConverter : JsonConverter
89 | {
90 | public override void WriteJson(JsonWriter writer, ImageSize value, JsonSerializer serializer)
91 | {
92 | writer.WriteValue(value.ToString());
93 | }
94 |
95 | public override ImageSize ReadJson(JsonReader reader, Type objectType, ImageSize existingValue, bool hasExistingValue, JsonSerializer serializer)
96 | {
97 | return new ImageSize(reader.ReadAsString());
98 | }
99 | }
100 | }
101 |
102 | }
103 |
--------------------------------------------------------------------------------
/OpenAI_Tests/EmbeddingEndpointTests.cs:
--------------------------------------------------------------------------------
1 | using NUnit.Framework;
2 | using OpenAI_API.Embedding;
3 | using OpenAI_API.Models;
4 | using System;
5 | using System.Linq;
6 |
7 | namespace OpenAI_Tests
8 | {
9 | public class EmbeddingEndpointTests
10 | {
11 | [SetUp]
12 | public void Setup()
13 | {
14 | OpenAI_API.APIAuthentication.Default = new OpenAI_API.APIAuthentication(Environment.GetEnvironmentVariable("TEST_OPENAI_SECRET_KEY"));
15 | }
16 |
17 | [Test]
18 | public void GetBasicEmbedding()
19 | {
20 | var api = new OpenAI_API.OpenAIAPI();
21 |
22 | Assert.IsNotNull(api.Embeddings);
23 |
24 | var results = api.Embeddings.CreateEmbeddingAsync(new EmbeddingRequest(Model.AdaTextEmbedding, "A test text for embedding")).Result;
25 | Assert.IsNotNull(results);
26 | if (results.CreatedUnixTime.HasValue)
27 | {
28 | Assert.NotZero(results.CreatedUnixTime.Value);
29 | Assert.NotNull(results.Created);
30 | Assert.Greater(results.Created.Value, new DateTime(2018, 1, 1));
31 | Assert.Less(results.Created.Value, DateTime.Now.AddDays(1));
32 | }
33 | else
34 | {
35 | Assert.Null(results.Created);
36 | }
37 | Assert.NotNull(results.Object);
38 | Assert.NotZero(results.Data.Count);
39 | Assert.That(results.Data.First().Embedding.Length == 1536);
40 | }
41 |
42 | [Test]
43 | public void ReturnedUsage()
44 | {
45 | var api = new OpenAI_API.OpenAIAPI();
46 |
47 | Assert.IsNotNull(api.Embeddings);
48 |
49 | var results = api.Embeddings.CreateEmbeddingAsync(new EmbeddingRequest(Model.AdaTextEmbedding, "A test text for embedding")).Result;
50 | Assert.IsNotNull(results);
51 |
52 | Assert.IsNotNull(results.Usage);
53 | Assert.GreaterOrEqual(results.Usage.PromptTokens, 5);
54 | Assert.GreaterOrEqual(results.Usage.TotalTokens, results.Usage.PromptTokens);
55 | }
56 |
57 | [Test]
58 | public void GetSimpleEmbedding()
59 | {
60 | var api = new OpenAI_API.OpenAIAPI();
61 |
62 | Assert.IsNotNull(api.Embeddings);
63 |
64 | var results = api.Embeddings.GetEmbeddingsAsync("A test text for embedding").Result;
65 | Assert.IsNotNull(results);
66 | Assert.That(results.Length == 1536);
67 | }
68 |
69 | [Test]
70 | public void GetEmbeddingWithLargeModel()
71 | {
72 | var api = new OpenAI_API.OpenAIAPI();
73 |
74 | Assert.IsNotNull(api.Embeddings);
75 |
76 | var results = api.Embeddings.GetEmbeddingsAsync("A test text for embedding", Model.TextEmbedding3Large).Result;
77 | Assert.IsNotNull(results);
78 | Assert.That(results.Length == 3072);
79 | }
80 |
81 | [Test]
82 | public void GetEmbeddingWithSmallModel()
83 | {
84 | var api = new OpenAI_API.OpenAIAPI();
85 |
86 | Assert.IsNotNull(api.Embeddings);
87 |
88 | var results = api.Embeddings.GetEmbeddingsAsync("A test text for embedding", Model.TextEmbedding3Small).Result;
89 | Assert.IsNotNull(results);
90 | Assert.That(results.Length == 1536);
91 | }
92 |
93 |
94 | [Test]
95 | public void GetEmbeddingWithDimensions()
96 | {
97 | var api = new OpenAI_API.OpenAIAPI();
98 |
99 | Assert.IsNotNull(api.Embeddings);
100 |
101 | var results = api.Embeddings.GetEmbeddingsAsync("A test text for embedding", Model.TextEmbedding3Small, 350).Result;
102 | Assert.IsNotNull(results);
103 | Assert.That(results.Length == 350);
104 | }
105 | }
106 | }
107 |
--------------------------------------------------------------------------------
/OpenAI_API/Files/FilesEndpoint.cs:
--------------------------------------------------------------------------------
1 | using Newtonsoft.Json;
2 | using System.Collections.Generic;
3 | using System.IO;
4 | using System.Net.Http;
5 | using System.Threading.Tasks;
6 |
7 | namespace OpenAI_API.Files
8 | {
9 | ///
10 | /// The API endpoint for operations List, Upload, Delete, Retrieve files
11 | ///
12 | public class FilesEndpoint : EndpointBase, IFilesEndpoint
13 | {
14 | ///
15 | /// Constructor of the api endpoint. Rather than instantiating this yourself, access it through an instance of as .
16 | ///
17 | ///
18 | internal FilesEndpoint(OpenAIAPI api) : base(api) { }
19 |
20 | ///
21 | /// The name of the endpoint, which is the final path segment in the API URL. For example, "files".
22 | ///
23 | protected override string Endpoint { get { return "files"; } }
24 |
25 | ///
26 | /// Get the list of all files
27 | ///
28 | ///
29 | ///
30 | public async Task> GetFilesAsync()
31 | {
32 | return (await HttpGet()).Data;
33 | }
34 |
35 | ///
36 | /// Returns information about a specific file
37 | ///
38 | /// The ID of the file to use for this request
39 | ///
40 | public async Task GetFileAsync(string fileId)
41 | {
42 | return await HttpGet($"{Url}/{fileId}");
43 | }
44 |
45 |
46 | ///
47 | /// Returns the contents of the specific file as string
48 | ///
49 | /// The ID of the file to use for this request
50 | ///
51 | public async Task GetFileContentAsStringAsync(string fileId)
52 | {
53 | return await HttpGetContent($"{Url}/{fileId}/content");
54 | }
55 |
56 | ///
57 | /// Delete a file
58 | ///
59 | /// The ID of the file to use for this request
60 | ///
61 | public async Task DeleteFileAsync(string fileId)
62 | {
63 | return await HttpDelete($"{Url}/{fileId}");
64 | }
65 |
66 |
67 | ///
68 | /// Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact OpenAI if you need to increase the storage limit
69 | ///
70 | /// The name of the file to use for this request
71 | /// The intendend purpose of the uploaded documents. Use "fine-tune" for Fine-tuning. This allows us to validate the format of the uploaded file.
72 | public async Task UploadFileAsync(string filePath, string purpose = "fine-tune")
73 | {
74 | var content = new MultipartFormDataContent
75 | {
76 | { new StringContent(purpose), "purpose" },
77 | { new ByteArrayContent(System.IO.File.ReadAllBytes(filePath)), "file", Path.GetFileName(filePath) }
78 | };
79 |
80 | return await HttpPost(Url, content);
81 | }
82 |
83 | ///
84 | /// A helper class to deserialize the JSON API responses. This should not be used directly.
85 | ///
86 | private class FilesData : ApiResultBase
87 | {
88 | [JsonProperty("data")]
89 | public List Data { get; set; }
90 | [JsonProperty("object")]
91 | public string Obj { get; set; }
92 | }
93 | }
94 |
95 |
96 | }
97 |
--------------------------------------------------------------------------------
/OpenAI_API/Chat/ChatResult.cs:
--------------------------------------------------------------------------------
1 | using Newtonsoft.Json;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Text;
5 |
6 | namespace OpenAI_API.Chat
7 | {
8 | ///
9 | /// Represents a result from calling the Chat API
10 | ///
11 | public class ChatResult : ApiResultBase
12 | {
13 | ///
14 | /// The identifier of the result, which may be used during troubleshooting
15 | ///
16 | [JsonProperty("id")]
17 | public string Id { get; set; }
18 |
19 | ///
20 | /// The list of choices that the user was presented with during the chat interaction
21 | ///
22 | [JsonProperty("choices")]
23 | public IReadOnlyList Choices { get; set; }
24 |
25 | ///
26 | /// The usage statistics for the chat interaction
27 | ///
28 | [JsonProperty("usage")]
29 | public ChatUsage Usage { get; set; }
30 |
31 | ///
32 | /// A convenience method to return the content of the message in the first choice of this response
33 | ///
34 | /// The content of the message, not including .
35 | public override string ToString()
36 | {
37 | if (Choices != null && Choices.Count > 0)
38 | return Choices[0].ToString();
39 | else
40 | return null;
41 | }
42 |
43 | ///
44 | /// This fingerprint represents the backend configuration that the model runs with. It can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.This is the indicator on whether users should expect "almost always the same result".
45 | ///
46 | [JsonProperty("system_fingerprint")]
47 | public string SystemFingerprint { get; set; }
48 | }
49 |
50 | ///
51 | /// A message received from the API, including the message text, index, and reason why the message finished.
52 | ///
53 | public class ChatChoice
54 | {
55 | ///
56 | /// The index of the choice in the list of choices
57 | ///
58 | [JsonProperty("index")]
59 | public int Index { get; set; }
60 |
61 | ///
62 | /// The message that was presented to the user as the choice
63 | ///
64 | [JsonProperty("message")]
65 | public ChatMessage Message { get; set; }
66 |
67 | ///
68 | /// The reason why the chat interaction ended after this choice was presented to the user
69 | ///
70 | [JsonProperty("finish_reason")]
71 | public string FinishReason { get; set; }
72 |
73 | ///
74 | /// Partial message "delta" from a stream. For example, the result from StreamChatEnumerableAsync.
75 | /// If this result object is not from a stream, this will be null
76 | ///
77 | [JsonProperty("delta")]
78 | public ChatMessage Delta { get; set; }
79 |
80 | ///
81 | /// A convenience method to return the content of the message in this response
82 | ///
83 | /// The content of the message in this response, not including .
84 | public override string ToString()
85 | {
86 | if (Message == null && Delta != null)
87 | return Delta.TextContent;
88 | else
89 | return Message.TextContent;
90 | }
91 | }
92 |
93 | ///
94 | /// How many tokens were used in this chat message.
95 | ///
96 | public class ChatUsage : Usage
97 | {
98 | ///
99 | /// The number of completion tokens used during the chat interaction
100 | ///
101 | [JsonProperty("completion_tokens")]
102 | public int CompletionTokens { get; set; }
103 | }
104 | }
105 |
--------------------------------------------------------------------------------
/OpenAI_API/IOpenAIAPI.cs:
--------------------------------------------------------------------------------
1 | using OpenAI_API.Audio;
2 | using OpenAI_API.Chat;
3 | using OpenAI_API.Completions;
4 | using OpenAI_API.Embedding;
5 | using OpenAI_API.Files;
6 | using OpenAI_API.Images;
7 | using OpenAI_API.Models;
8 | using OpenAI_API.Moderation;
9 |
10 | namespace OpenAI_API
11 | {
12 | ///
13 | /// An interface for , for ease of mock testing, etc
14 | ///
15 | public interface IOpenAIAPI
16 | {
17 | ///
18 | /// Base url for OpenAI
19 | /// for OpenAI, should be "https://api.openai.com/{0}/{1}"
20 | /// for Azure, should be "https://(your-resource-name.openai.azure.com/openai/deployments/(deployment-id)/{1}?api-version={0}"
21 | ///
22 | string ApiUrlFormat { get; set; }
23 |
24 | ///
25 | /// Version of the Rest Api
26 | ///
27 | string ApiVersion { get; set; }
28 |
29 | ///
30 | /// The API authentication information to use for API calls
31 | ///
32 | APIAuthentication Auth { get; set; }
33 |
34 | ///
35 | /// Text generation in the form of chat messages. This interacts with the ChatGPT API.
36 | ///
37 | IChatEndpoint Chat { get; }
38 |
39 | ///
40 | /// Classify text against the OpenAI Content Policy.
41 | ///
42 | IModerationEndpoint Moderation { get; }
43 |
44 | ///
45 | /// Text generation is the core function of the API. You give the API a prompt, and it generates a completion. The way you “program” the API to do a task is by simply describing the task in plain english or providing a few written examples. This simple approach works for a wide range of use cases, including summarization, translation, grammar correction, question answering, chatbots, composing emails, and much more (see the prompt library for inspiration).
46 | ///
47 | ICompletionEndpoint Completions { get; }
48 |
49 | ///
50 | /// The API lets you transform text into a vector (list) of floating point numbers. The distance between two vectors measures their relatedness. Small distances suggest high relatedness and large distances suggest low relatedness.
51 | ///
52 | IEmbeddingEndpoint Embeddings { get; }
53 |
54 | ///
55 | /// The API endpoint for querying available Engines/models
56 | ///
57 | IModelsEndpoint Models { get; }
58 |
59 | ///
60 | /// The API lets you do operations with files. You can upload, delete or retrieve files. Files can be used for fine-tuning, search, etc.
61 | ///
62 | IFilesEndpoint Files { get; }
63 |
64 | ///
65 | /// The API lets you do operations with images. You can Given a prompt and/or an input image, the model will generate a new image.
66 | ///
67 | IImageGenerationEndpoint ImageGenerations { get; }
68 |
69 | ///
70 | /// The endpoint for the Text to Speech API. This allows you to generate audio from text. See
71 | ///
72 | ITextToSpeechEndpoint TextToSpeech { get; }
73 |
74 | ///
75 | /// The endpoint for the audio transcription API. This allows you to generate text from audio. See
76 | ///
77 | ITranscriptionEndpoint Transcriptions { get; }
78 |
79 | ///
80 | /// The endpoint for the audio translation API. This allows you to generate English text from audio in other languages. See
81 | ///
82 | ITranscriptionEndpoint Translations { get; }
83 | }
84 | }
--------------------------------------------------------------------------------
/OpenAI_Tests/AuthTests.cs:
--------------------------------------------------------------------------------
1 | using NUnit.Framework;
2 | using System;
3 | using System.IO;
4 | using System.Threading.Tasks;
5 |
6 | namespace OpenAI_Tests
7 | {
8 | public class AuthTests
9 | {
10 | [SetUp]
11 | public void Setup()
12 | {
13 | File.WriteAllText(".openai", "OPENAI_KEY=pk-test12" + Environment.NewLine + "OPENAI_ORGANIZATION=org-testing123");
14 | Environment.SetEnvironmentVariable("OPENAI_API_KEY", "pk-test-env");
15 | Environment.SetEnvironmentVariable("OPENAI_ORGANIZATION", "org-testing123");
16 | }
17 |
18 | [Test]
19 | public void GetAuthFromEnv()
20 | {
21 | var auth = OpenAI_API.APIAuthentication.LoadFromEnv();
22 | Assert.IsNotNull(auth);
23 | Assert.IsNotNull(auth.ApiKey);
24 | Assert.IsNotEmpty(auth.ApiKey);
25 | Assert.AreEqual("pk-test-env", auth.ApiKey);
26 | }
27 |
28 | [Test]
29 | public void GetAuthFromFile()
30 | {
31 | var auth = OpenAI_API.APIAuthentication.LoadFromPath();
32 | Assert.IsNotNull(auth);
33 | Assert.IsNotNull(auth.ApiKey);
34 | Assert.AreEqual("pk-test12", auth.ApiKey);
35 | }
36 |
37 |
38 | [Test]
39 | public void GetAuthFromNonExistantFile()
40 | {
41 | var auth = OpenAI_API.APIAuthentication.LoadFromPath(filename: "bad.config");
42 | Assert.IsNull(auth);
43 | }
44 |
45 |
46 | [Test]
47 | public void GetDefault()
48 | {
49 | var auth = OpenAI_API.APIAuthentication.Default;
50 | var envAuth = OpenAI_API.APIAuthentication.LoadFromEnv();
51 | Assert.IsNotNull(auth);
52 | Assert.IsNotNull(auth.ApiKey);
53 | Assert.IsNotNull(envAuth);
54 | Assert.IsNotNull(envAuth.ApiKey);
55 | Assert.AreEqual(envAuth.ApiKey, auth.ApiKey);
56 | Assert.IsNotNull(auth.OpenAIOrganization);
57 | Assert.IsNotNull(envAuth.OpenAIOrganization);
58 | Assert.AreEqual(envAuth.OpenAIOrganization, auth.OpenAIOrganization);
59 |
60 | }
61 |
62 |
63 |
64 | [Test]
65 | public void testHelper()
66 | {
67 | OpenAI_API.APIAuthentication defaultAuth = OpenAI_API.APIAuthentication.Default;
68 | OpenAI_API.APIAuthentication manualAuth = new OpenAI_API.APIAuthentication("pk-testAA");
69 | OpenAI_API.OpenAIAPI api = new OpenAI_API.OpenAIAPI();
70 | OpenAI_API.APIAuthentication shouldBeDefaultAuth = api.Auth;
71 | Assert.IsNotNull(shouldBeDefaultAuth);
72 | Assert.IsNotNull(shouldBeDefaultAuth.ApiKey);
73 | Assert.AreEqual(defaultAuth.ApiKey, shouldBeDefaultAuth.ApiKey);
74 |
75 | OpenAI_API.APIAuthentication.Default = new OpenAI_API.APIAuthentication("pk-testAA");
76 | api = new OpenAI_API.OpenAIAPI();
77 | OpenAI_API.APIAuthentication shouldBeManualAuth = api.Auth;
78 | Assert.IsNotNull(shouldBeManualAuth);
79 | Assert.IsNotNull(shouldBeManualAuth.ApiKey);
80 | Assert.AreEqual(manualAuth.ApiKey, shouldBeManualAuth.ApiKey);
81 | }
82 |
83 | [Test]
84 | public void GetKey()
85 | {
86 | var auth = new OpenAI_API.APIAuthentication("pk-testAA");
87 | Assert.IsNotNull(auth.ApiKey);
88 | Assert.AreEqual("pk-testAA", auth.ApiKey);
89 | }
90 |
91 | [Test]
92 | public void ParseKey()
93 | {
94 | var auth = new OpenAI_API.APIAuthentication("pk-testAA");
95 | Assert.IsNotNull(auth.ApiKey);
96 | Assert.AreEqual("pk-testAA", auth.ApiKey);
97 | Assert.IsNull(auth.OpenAIOrganization);
98 | auth = "pk-testCC";
99 | Assert.IsNotNull(auth.ApiKey);
100 | Assert.AreEqual("pk-testCC", auth.ApiKey);
101 |
102 | auth = new OpenAI_API.APIAuthentication("sk-testBB", "orgTest");
103 | Assert.IsNotNull(auth.ApiKey);
104 | Assert.AreEqual("sk-testBB", auth.ApiKey);
105 | Assert.IsNotNull(auth.OpenAIOrganization);
106 | Assert.AreEqual("orgTest", auth.OpenAIOrganization);
107 | }
108 |
109 | [Test]
110 | public async Task TestBadKey()
111 | {
112 | var auth = new OpenAI_API.APIAuthentication("pk-testAA");
113 | Assert.IsFalse(await auth.ValidateAPIKey());
114 |
115 | auth = new OpenAI_API.APIAuthentication(null);
116 | Assert.IsFalse(await auth.ValidateAPIKey());
117 | }
118 |
119 | [Test]
120 | public async Task TestValidateGoodKey()
121 | {
122 | var auth = new OpenAI_API.APIAuthentication(Environment.GetEnvironmentVariable("TEST_OPENAI_SECRET_KEY"));
123 | Assert.IsTrue(await auth.ValidateAPIKey());
124 | }
125 |
126 | }
127 | }
--------------------------------------------------------------------------------
/OpenAI_API/Audio/ITextToSpeechEndpoint.cs:
--------------------------------------------------------------------------------
1 | using System.IO;
2 | using System.Threading.Tasks;
3 | using OpenAI_API.Models;
4 |
5 | namespace OpenAI_API.Audio
6 | {
7 | ///
8 | /// The Endpoint for the Text to Speech API. This allows you to generate audio from text. See
9 | ///
10 | public interface ITextToSpeechEndpoint
11 | {
12 | ///
13 | /// This allows you to set default parameters for every request, for example to set a default voice or model. For every request, if you do not have a parameter set on the request but do have it set here as a default, the request will automatically pick up the default value.
14 | ///
15 | TextToSpeechRequest DefaultTTSRequestArgs { get; set; }
16 |
17 | ///
18 | /// Calls the API to create speech from text, and returns the raw stream of the audio file.
19 | ///
20 | /// The text to speech request to submit to the API
21 | /// A stream of the audio file in the requested format.
22 | Task GetSpeechAsStreamAsync(TextToSpeechRequest request);
23 |
24 | ///
25 | /// Calls the API to create speech from text, and returns the raw stream of the audio file.
26 | ///
27 | /// The text to generate audio for. The maximum length is 4096 characters.
28 | /// The voice to use when generating the audio. Supported voices can be found in .
29 | /// The speed of the generated audio. Select a value from 0.25 to 4.0. 1.0 is the default.
30 | /// The default response format is "mp3", but other formats are available in . See
31 | /// TTS is an AI model that converts text to natural sounding spoken text. OpenAI offers two different model variates, is optimized for real time text to speech use cases and is optimized for quality.
32 | /// A stream of the audio file in the requested format.
33 | Task GetSpeechAsStreamAsync(string input, string voice = null, double? speed = null, string responseFormat = null, Model model = null);
34 |
35 | ///
36 | /// Calls the API to create speech from text, and saves the audio file to disk.
37 | ///
38 | /// The text to speech request to submit to the API
39 | /// The local path to save the audio file to.
40 | /// A representing the saved speech file.
41 | Task SaveSpeechToFileAsync(TextToSpeechRequest request, string localPath);
42 |
43 | ///
44 | /// Calls the API to create speech from text, and saves the audio file to disk.
45 | ///
46 | /// The text to generate audio for. The maximum length is 4096 characters.
47 | /// The local path to save the audio file to.
48 | /// The voice to use when generating the audio. Supported voices can be found in .
49 | /// The speed of the generated audio. Select a value from 0.25 to 4.0. 1.0 is the default.
50 | /// The default response format is "mp3", but other formats are available in . See
51 | /// TTS is an AI model that converts text to natural sounding spoken text. OpenAI offers two different model variates, is optimized for real time text to speech use cases and is optimized for quality.
52 | /// A stream of the audio file in the requested format.
53 | Task SaveSpeechToFileAsync(string input, string localPath, string voice = null, double? speed = null, string responseFormat = null, Model model = null);
54 |
55 |
56 | }
57 | }
--------------------------------------------------------------------------------
/OpenAI_Tests/ModerationEndpointTests.cs:
--------------------------------------------------------------------------------
1 | using NUnit.Framework;
2 | using OpenAI_API.Models;
3 | using System;
4 | using System.Linq;
5 | using OpenAI_API.Moderation;
6 |
7 | namespace OpenAI_Tests
8 | {
9 | public class ModerationEndpointTests
10 | {
11 | [SetUp]
12 | public void Setup()
13 | {
14 | OpenAI_API.APIAuthentication.Default = new OpenAI_API.APIAuthentication(Environment.GetEnvironmentVariable("TEST_OPENAI_SECRET_KEY"));
15 | }
16 |
17 | [Test]
18 | public void NoViolations()
19 | {
20 | var api = new OpenAI_API.OpenAIAPI();
21 |
22 | Assert.IsNotNull(api.Moderation);
23 |
24 | var results = api.Moderation.CallModerationAsync(new ModerationRequest("Hello world")).Result;
25 | Assert.IsNotNull(results);
26 | if (results.CreatedUnixTime.HasValue)
27 | {
28 | Assert.NotZero(results.CreatedUnixTime.Value);
29 | Assert.NotNull(results.Created);
30 | Assert.Greater(results.Created.Value, new DateTime(2018, 1, 1));
31 | Assert.Less(results.Created.Value, DateTime.Now.AddDays(1));
32 | }
33 | else
34 | {
35 | Assert.Null(results.Created);
36 | }
37 | Assert.NotNull(results.Results);
38 | Assert.NotZero(results.Results.Count);
39 | var result = results.Results[0];
40 | Assert.False(result.Flagged);
41 | Assert.Zero(result.FlaggedCategories.Count);
42 | Assert.Greater(result.HighestFlagScore, 0d);
43 | Assert.Null(result.MainContentFlag);
44 | }
45 |
46 |
47 | [Test]
48 | public void MultipleInputs()
49 | {
50 | var api = new OpenAI_API.OpenAIAPI();
51 |
52 | Assert.IsNotNull(api.Moderation);
53 |
54 | var results = api.Moderation.CallModerationAsync(new ModerationRequest("Hello world", "Good morning")).Result;
55 | Assert.IsNotNull(results);
56 | if (results.CreatedUnixTime.HasValue)
57 | {
58 | Assert.NotZero(results.CreatedUnixTime.Value);
59 | Assert.NotNull(results.Created);
60 | Assert.Greater(results.Created.Value, new DateTime(2018, 1, 1));
61 | Assert.Less(results.Created.Value, DateTime.Now.AddDays(1));
62 | }
63 | else
64 | {
65 | Assert.Null(results.Created);
66 | }
67 | Assert.NotNull(results.Results);
68 | Assert.AreEqual(2, results.Results.Count);
69 | foreach (var result in results.Results)
70 | {
71 | Assert.False(result.Flagged);
72 | Assert.Zero(result.FlaggedCategories.Count);
73 | Assert.Greater(result.HighestFlagScore, 0d);
74 | Assert.Null(result.MainContentFlag);
75 | }
76 | }
77 |
78 |
79 |
80 | [Test]
81 | public void MultipleInputsFailing()
82 | {
83 | var api = new OpenAI_API.OpenAIAPI();
84 |
85 | Assert.IsNotNull(api.Moderation);
86 |
87 | var results = api.Moderation.CallModerationAsync(new ModerationRequest("You are going to die, you scum", "I want to kill them")).Result;
88 | Assert.IsNotNull(results);
89 | if (results.CreatedUnixTime.HasValue)
90 | {
91 | Assert.NotZero(results.CreatedUnixTime.Value);
92 | Assert.NotNull(results.Created);
93 | Assert.Greater(results.Created.Value, new DateTime(2018, 1, 1));
94 | Assert.Less(results.Created.Value, DateTime.Now.AddDays(1));
95 | }
96 | else
97 | {
98 | Assert.Null(results.Created);
99 | }
100 | Assert.NotNull(results.Results);
101 | Assert.AreEqual(2, results.Results.Count);
102 | foreach (var result in results.Results)
103 | {
104 | Assert.True(result.Flagged);
105 | Assert.NotZero(result.FlaggedCategories.Count);
106 | Assert.Greater(result.HighestFlagScore, 0.5d);
107 | Assert.NotNull(result.MainContentFlag);
108 | }
109 | }
110 |
111 | [Test]
112 | public void ViolenceExample()
113 | {
114 | var api = new OpenAI_API.OpenAIAPI();
115 |
116 | Assert.IsNotNull(api.Moderation);
117 |
118 | var results = api.Moderation.CallModerationAsync("I want to kill them.").Result;
119 | Assert.IsNotNull(results);
120 | if (results.CreatedUnixTime.HasValue)
121 | {
122 | Assert.NotZero(results.CreatedUnixTime.Value);
123 | Assert.NotNull(results.Created);
124 | Assert.Greater(results.Created.Value, new DateTime(2018, 1, 1));
125 | Assert.Less(results.Created.Value, DateTime.Now.AddDays(1));
126 | }
127 | else
128 | {
129 | Assert.Null(results.Created);
130 | }
131 | Assert.NotNull(results.Results);
132 | Assert.NotZero(results.Results.Count);
133 | var result = results.Results[0];
134 | Assert.True(result.Flagged);
135 | Assert.NotZero(result.FlaggedCategories.Count);
136 | Assert.Greater(result.HighestFlagScore, 0.5d);
137 | Assert.AreEqual("violence", result.MainContentFlag);
138 | Assert.AreEqual(result.HighestFlagScore, result.CategoryScores["violence"]);
139 | Assert.AreEqual("violence", result.FlaggedCategories.First());
140 | }
141 |
142 | }
143 | }
144 |
--------------------------------------------------------------------------------
/OpenAI_API/Chat/ChatMessageRole.cs:
--------------------------------------------------------------------------------
1 | using Newtonsoft.Json;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.ComponentModel.Design;
5 | using System.Text;
6 |
7 | namespace OpenAI_API.Chat
8 | {
9 | ///
10 | /// Represents the Role of a . Typically, a conversation is formatted with a system message first, followed by alternating user and assistant messages. See the OpenAI docs for more details about usage.
11 | ///
12 | public class ChatMessageRole : IEquatable
13 | {
14 | ///
15 | /// Contructor is private to force usage of strongly typed values
16 | ///
17 | ///
18 | private ChatMessageRole(string value) { Value = value; }
19 |
20 | ///
21 | /// Gets the singleton instance of based on the string value.
22 | ///
23 | /// Muse be one of "system", "user", or "assistant"
24 | ///
25 | public static ChatMessageRole FromString(string roleName)
26 | {
27 | switch (roleName)
28 | {
29 | case "system":
30 | return ChatMessageRole.System;
31 | case "user":
32 | return ChatMessageRole.User;
33 | case "assistant":
34 | return ChatMessageRole.Assistant;
35 | default:
36 | return null;
37 | }
38 | }
39 |
40 | private string Value { get; }
41 |
42 | ///
43 | /// The system message helps set the behavior of the assistant.
44 | ///
45 | public static ChatMessageRole System { get; } = new ChatMessageRole("system");
46 | ///
47 | /// The user messages help instruct the assistant. They can be generated by the end users of an application, or set by a developer as an instruction.
48 | ///
49 | public static ChatMessageRole User { get; } = new ChatMessageRole("user");
50 | ///
51 | /// The assistant messages help store prior responses. They can also be written by a developer to help give examples of desired behavior.
52 | ///
53 | public static ChatMessageRole Assistant { get; } = new ChatMessageRole("assistant");
54 |
55 | ///
56 | /// Gets the string value for this role to pass to the API
57 | ///
58 | /// The size as a string
59 | public override string ToString()
60 | {
61 | return Value;
62 | }
63 |
64 | ///
65 | /// Determines whether this instance and a specified object have the same value.
66 | ///
67 | /// The ChatMessageRole to compare to this instance
68 | /// true if obj is a ChatMessageRole and its value is the same as this instance; otherwise, false. If obj is null, the method returns false
69 | public override bool Equals(object obj)
70 | {
71 | return Value.Equals((obj as ChatMessageRole).Value);
72 | }
73 |
74 | ///
75 | /// Returns the hash code for this object
76 | ///
77 | /// A 32-bit signed integer hash code
78 | public override int GetHashCode()
79 | {
80 | return Value.GetHashCode();
81 | }
82 |
83 | ///
84 | /// Determines whether this instance and a specified object have the same value.
85 | ///
86 | /// The ChatMessageRole to compare to this instance
87 | /// true if other's value is the same as this instance; otherwise, false. If other is null, the method returns false
88 | public bool Equals(ChatMessageRole other)
89 | {
90 | return Value.Equals(other.Value);
91 | }
92 |
93 | ///
94 | /// Gets the string value for this role to pass to the API
95 | ///
96 | /// The ChatMessageRole to convert
97 | public static implicit operator String(ChatMessageRole value) { return value.Value; }
98 |
99 | /////
100 | ///// Used during the Json serialization process
101 | /////
102 | //internal class ChatMessageRoleJsonConverter : JsonConverter
103 | //{
104 | // public override void WriteJson(JsonWriter writer, ChatMessageRole value, JsonSerializer serializer)
105 | // {
106 | // writer.WriteValue(value.ToString());
107 | // }
108 |
109 | // public override ChatMessageRole ReadJson(JsonReader reader, Type objectType, ChatMessageRole existingValue, bool hasExistingValue, JsonSerializer serializer)
110 | // {
111 | // if (reader.TokenType != JsonToken.String)
112 | // {
113 | // throw new JsonSerializationException();
114 | // }
115 | // return new ChatMessageRole(reader.ReadAsString());
116 | // }
117 | //}
118 | }
119 | }
120 |
--------------------------------------------------------------------------------
/OpenAI_Tests/TranscriptionTests.cs:
--------------------------------------------------------------------------------
1 | using Newtonsoft.Json;
2 | using NUnit.Framework;
3 | using OpenAI_API.Audio;
4 | using OpenAI_API.Chat;
5 | using OpenAI_API.Completions;
6 | using OpenAI_API.Models;
7 | using OpenAI_API.Moderation;
8 | using System;
9 | using System.Collections.Generic;
10 | using System.IO;
11 | using System.Linq;
12 | using System.Threading;
13 | using System.Threading.Tasks;
14 | using static OpenAI_API.Audio.TextToSpeechRequest;
15 | using static OpenAI_API.Chat.ChatMessage;
16 |
17 | namespace OpenAI_Tests
18 | {
19 | public class TranscriptionTests
20 | {
21 | [SetUp]
22 | public void Setup()
23 | {
24 | OpenAI_API.APIAuthentication.Default = new OpenAI_API.APIAuthentication(Environment.GetEnvironmentVariable("TEST_OPENAI_SECRET_KEY"));
25 | }
26 |
27 | [Test]
28 | public async Task EnglishTranscribeToText()
29 | {
30 | var api = new OpenAI_API.OpenAIAPI();
31 |
32 | string result = await api.Transcriptions.GetTextAsync("english-test.m4a");
33 | Assert.IsNotNull(result);
34 | Assert.AreEqual("Hello, this is a test of the transcription function. Is it coming out okay?", result.Trim());
35 |
36 | result = await api.Transcriptions.GetTextAsync("english-test.m4a", "en");
37 | Assert.IsNotNull(result);
38 | Assert.AreEqual("Hello, this is a test of the transcription function. Is it coming out okay?", result.Trim());
39 | }
40 |
41 | [Test]
42 | public async Task ChineseTranscribeToText()
43 | {
44 | var api = new OpenAI_API.OpenAIAPI();
45 | string result = await api.Transcriptions.GetTextAsync("chinese-test.m4a");
46 | Assert.IsNotNull(result);
47 | Assert.AreEqual("你好,我的名字是初培。我会说一点点普通话。你呢?", result.Trim());
48 |
49 | result = await api.Transcriptions.GetTextAsync("chinese-test.m4a", "zh");
50 | Assert.IsNotNull(result);
51 | Assert.AreEqual("你好,我的名字是初培。我会说一点点普通话。你呢?", result.Trim());
52 | }
53 |
54 | [Test]
55 | public async Task ChineseTranslateToEnglishText()
56 | {
57 | var api = new OpenAI_API.OpenAIAPI();
58 | string result = await api.Translations.GetTextAsync("chinese-test.m4a");
59 | Assert.IsNotNull(result);
60 | Assert.AreEqual("Hello, my name is Chu Pei. I can speak a little Mandarin. How about you?", result.Trim());
61 | }
62 |
63 | [TestCase("json", "\"text\": ")]
64 | [TestCase("srt", "00:00:00,000")]
65 | [TestCase("vtt", "00:00:00.000")]
66 | public async Task TranscribeToFormat(string format, string searchFor)
67 | {
68 | var api = new OpenAI_API.OpenAIAPI();
69 | string result = await api.Transcriptions.GetAsFormatAsync("english-test.m4a", format);
70 | Assert.IsNotNull(result);
71 | Assert.IsNotEmpty(result);
72 | Assert.True(result.Contains("Hello, this is a test of the transcription function. Is it coming out okay?"));
73 | Assert.True(result.Contains(searchFor), "Did not contain the format indicator: "+searchFor);
74 | result = await api.Transcriptions.GetAsFormatAsync("chinese-test.m4a",format, "zh");
75 | Assert.IsNotNull(result);
76 | Assert.IsNotEmpty(result);
77 | Assert.True(result.Contains("你好,我的名字是初培。我会说一点点普通话。你呢?"));
78 | Assert.True(result.Contains(searchFor), "Did not contain the format indicator: " + searchFor);
79 | }
80 |
81 | [Test]
82 | public async Task GetDetailedTranscribeJson()
83 | {
84 | var api = new OpenAI_API.OpenAIAPI();
85 | AudioResultVerbose result = await api.Transcriptions.GetWithDetailsAsync("english-test.m4a");
86 | Assert.IsNotNull(result);
87 | Assert.IsNotEmpty(result.RequestId);
88 | Assert.Greater(result.ProcessingTime.TotalMilliseconds, 100);
89 | Assert.AreEqual(6.99, result.duration, 0.05);
90 | Assert.AreEqual("english", result.language);
91 | Assert.AreEqual("transcribe", result.task);
92 | Assert.AreEqual("Hello, this is a test of the transcription function. Is it coming out okay?", result.text.Trim());
93 | Assert.AreEqual(1,result.segments.Count);
94 | Assert.AreEqual("Hello, this is a test of the transcription function. Is it coming out okay?", result.segments[0].text.Trim());
95 | Assert.AreEqual(19, result.segments[0].tokens.Count);
96 | }
97 |
98 |
99 | [Test]
100 | public async Task GetDetailedTranslateJson()
101 | {
102 | var api = new OpenAI_API.OpenAIAPI();
103 | var result = await api.Translations.GetWithDetailsAsync("chinese-test.m4a");
104 | Assert.IsNotNull(result);
105 | Assert.IsNotEmpty(result.RequestId);
106 | Assert.Greater(result.ProcessingTime.TotalMilliseconds, 100);
107 | Assert.AreEqual(10.62, result.duration, 0.05);
108 | Assert.AreEqual("translate", result.task);
109 | Assert.AreEqual("Hello, my name is Chu Pei. I can speak a little Mandarin. How about you?", result.text.Trim());
110 | Assert.AreEqual(1, result.segments.Count);
111 | Assert.AreEqual("Hello, my name is Chu Pei. I can speak a little Mandarin. How about you?", result.segments[0].text.Trim());
112 | Assert.AreEqual(22, result.segments[0].tokens.Count);
113 | }
114 | }
115 | }
116 |
--------------------------------------------------------------------------------
/OpenAI_Tests/fine-tuning-data.jsonl:
--------------------------------------------------------------------------------
1 | { "prompt": "type for FilterRelationType", "completion":"Numeric(4.0).###"}
2 | { "prompt": "type for FilterOperation", "completion":"Numeric(4.0).###"}
3 | { "prompt": "type for TargetType", "completion":"Numeric(4.0).###"}
4 | { "prompt": "type for RuntimeEnvironment", "completion":"Numeric(4.0).###"}
5 | { "prompt": "type for MapType", "completion":"Numeric(4.0).###"}
6 | { "prompt": "type for LogLevel", "completion":"Numeric(4.0).###"}
7 | { "prompt": "type for StorePurchaseState", "completion":"Numeric(1.0).###"}
8 | { "prompt": "type for StorePurchasePlatform", "completion":"Numeric(4.0).###"}
9 | { "prompt": "type for StoreProductType", "completion":"Numeric(4.0).###"}
10 | { "prompt": "type for StorePurchaseStatus", "completion":"Numeric(4.0).###"}
11 | { "prompt": "type for MediaMetadataKey", "completion":"VarChar(50).###"}
12 | { "prompt": "type for MediaStreamType", "completion":"Numeric(4.0).###"}
13 | { "prompt": "type for DeviceAuthenticationPolicy", "completion":"Numeric(1.0).###"}
14 | { "prompt": "type for Url", "completion":"VarChar(1000).###"}
15 | { "prompt": "type for IMEMode", "completion":"Character(40).###"}
16 | { "prompt": "type for Time", "completion":"DateTime.###"}
17 | { "prompt": "type for Encoding", "completion":"Character(256).###"}
18 | { "prompt": "type for Timezones", "completion":"Character(60).###"}
19 | { "prompt": "type for Effect", "completion":"Character(20).###"}
20 | { "prompt": "type for CallType", "completion":"Character(20).###"}
21 | { "prompt": "type for CryptoEncryptAlgorithm", "completion":"Character(40).###"}
22 | { "prompt": "type for CryptoHashAlgorithm", "completion":"Character(40).###"}
23 | { "prompt": "type for CryptoSignAlgorithm", "completion":"Character(40).###"}
24 | { "prompt": "type for TrnMode", "completion":"Character(3).###"}
25 | { "prompt": "type for Address", "completion":"VarChar(1K).###"}
26 | { "prompt": "type for Component", "completion":"VarChar(1000).###"}
27 | { "prompt": "type for Email", "completion":"VarChar(100).###"}
28 | { "prompt": "type for Geolocation", "completion":"Character(50).###"}
29 | { "prompt": "type for Html", "completion":"LongVarChar(2M).###"}
30 | { "prompt": "type for Phone", "completion":"Character(20).###"}
31 | { "prompt": "type for APIAuthorizationStatus", "completion":"Numeric(1.0).###"}
32 | { "prompt": "type for MessageTypes", "completion":"Numeric(2.0).###"}
33 | { "prompt": "type for ProgressIndicatorType", "completion":"Numeric(1.0).###"}
34 | { "prompt": "type for RecentLinksOptions", "completion":"Numeric(4.0).###"}
35 | { "prompt": "type for ObjectName", "completion":"VarChar(256).###"}
36 | { "prompt": "type for CallTargetSize", "completion":"Character(10).###"}
37 | { "prompt": "type for EventExecution", "completion":"Numeric(1.0).###"}
38 | { "prompt": "type for PushNotificationPriority", "completion":"Character(20).###"}
39 | { "prompt": "type for SmartDeviceType", "completion":"Numeric(1.0).###"}
40 | { "prompt": "type for CameraAPIQuality", "completion":"Numeric(1.0).###"}
41 | { "prompt": "type for AudioAPISessionType", "completion":"Numeric(1.0).###"}
42 | { "prompt": "type for MediaDuration", "completion":"Numeric(12.0).###"}
43 | { "prompt": "type for PlaybackState", "completion":"Numeric(4.0).###"}
44 | { "prompt": "type for NetworkAPIConnectionType", "completion":"Numeric(1.0).###"}
45 | { "prompt": "type for EventAction", "completion":"Numeric(4.0).###"}
46 | { "prompt": "type for EventStatus", "completion":"Numeric(4.0).###"}
47 | { "prompt": "type for EventData", "completion":"LongVarChar(2M).###"}
48 | { "prompt": "type for EventErrors", "completion":"LongVarChar(2M).###"}
49 | { "prompt": "type for ApplicationState", "completion":"Numeric(1.0).###"}
50 | { "prompt": "type for SynchronizationReceiveResult", "completion":"Numeric(4.0).###"}
51 | { "prompt": "type for RegionState", "completion":"Numeric(1.0).###"}
52 | { "prompt": "type for BeaconProximity", "completion":"Numeric(1.0).###"}
53 | { "prompt": "type for MediaFinishReason", "completion":"Numeric(4.0).###"}
54 | { "prompt": "type for HttpMethod", "completion":"Character(7).###"}
55 | { "prompt": "type for HttpAuthenticationType", "completion":"Numeric(4.0).###"}
56 | { "prompt": "type for CommonCallTarget", "completion":"Character(20).###"}
57 | { "prompt": "type for BarcodeType", "completion":"VarChar(40).###"}
58 | { "prompt": "type for Name", "completion":"VarChar(100).###"}
59 | { "prompt": "type for ContactData", "completion":"VarChar(80).###"}
60 | { "prompt": "type for Lang", "completion":"Character(3).###"}
61 | { "prompt": "type for Bio", "completion":"LongVarChar(2M).###"}
62 | { "prompt": "type for FullName", "completion":"VarChar(150).###"}
63 | { "prompt": "type for Status", "completion":"Character(1).###"}
64 | { "prompt": "type for Id", "completion":"Numeric(8.0).###"}
65 | { "prompt": "type for SessionType", "completion":"Character(1).###"}
66 | { "prompt": "type for Title", "completion":"VarChar(160).###"}
67 | { "prompt": "type for Abstract", "completion":"VarChar(1000).###"}
68 | { "prompt": "type for Position", "completion":"Numeric(4.0).###"}
69 | { "prompt": "type for Hashtag", "completion":"VarChar(40).###"}
70 | { "prompt": "type for Duration", "completion":"Numeric(3.0).###"}
71 | { "prompt": "type for ColorTrack", "completion":"Character(3).###"}
72 | { "prompt": "type for Description", "completion":"LongVarChar(2M).###"}
73 | { "prompt": "type for SponsorType", "completion":"Character(1).###"}
74 | { "prompt": "type for Count", "completion":"Numeric(4.0).###"}
75 | { "prompt": "type for ListType", "completion":"Character(1).###"}
76 |
--------------------------------------------------------------------------------
/OpenAI_Tests/ImageGenerationEndpointTests.cs:
--------------------------------------------------------------------------------
1 | using NUnit.Framework;
2 | using OpenAI_API.Images;
3 | using System;
4 | using System.Collections.Generic;
5 | using System.Linq;
6 | using System.Text;
7 |
8 | namespace OpenAI_Tests
9 | {
10 | public class ImageGenerationEndpointTests
11 | {
12 | [SetUp]
13 | public void Setup()
14 | {
15 | OpenAI_API.APIAuthentication.Default = new OpenAI_API.APIAuthentication(Environment.GetEnvironmentVariable("TEST_OPENAI_SECRET_KEY"));
16 | }
17 |
18 | [TestCase(null)]
19 | [TestCase("dall-e-2")]
20 | [TestCase("dall-e-3")]
21 | public void SimpleImageCreation(string model)
22 | {
23 | var api = new OpenAI_API.OpenAIAPI();
24 | Assert.IsNotNull(api.ImageGenerations);
25 | var results = api.ImageGenerations.CreateImageAsync("A drawing of a computer writing a test", model).Result;
26 | Assert.IsNotNull(results);
27 | if (results.CreatedUnixTime.HasValue)
28 | {
29 | Assert.NotZero(results.CreatedUnixTime.Value);
30 | Assert.NotNull(results.Created);
31 | Assert.Greater(results.Created.Value, new DateTime(2023, 1, 1));
32 | Assert.Less(results.Created.Value, DateTime.Now.AddDays(1));
33 | }
34 | else
35 | {
36 | Assert.Null(results.Created);
37 | }
38 |
39 | Assert.NotZero(results.Data.Count);
40 | Assert.AreEqual(results.Data.Count, 1);
41 | Assert.NotNull(results.Data.First().Url);
42 | Assert.That(results.Data.First().Url.Length > 0);
43 | Assert.That(results.Data.First().Url.StartsWith("https://"));
44 | }
45 |
46 | [TestCase("256x256")]
47 | [TestCase("512x512")]
48 | [TestCase("1024x1024")]
49 | public void CreateDALLE2ImageWithUrl(string size)
50 | {
51 | var api = new OpenAI_API.OpenAIAPI();
52 |
53 | Assert.IsNotNull(api.ImageGenerations);
54 |
55 | var results = api.ImageGenerations.CreateImageAsync(new ImageGenerationRequest("A cyberpunk monkey hacker dreaming of a beautiful bunch of bananas, digital art", 2, new ImageSize(size))).Result;
56 | Assert.IsNotNull(results);
57 | if (results.CreatedUnixTime.HasValue)
58 | {
59 | Assert.NotZero(results.CreatedUnixTime.Value);
60 | Assert.NotNull(results.Created);
61 | Assert.Greater(results.Created.Value, new DateTime(2023, 1, 1));
62 | Assert.Less(results.Created.Value, DateTime.Now.AddDays(1));
63 | }
64 | else
65 | {
66 | Assert.Null(results.Created);
67 | }
68 |
69 | Assert.NotZero(results.Data.Count);
70 | Assert.AreEqual(results.Data.Count, 2);
71 | Assert.NotNull(results.Data.First().Url);
72 | Assert.That(results.Data.First().Url.Length > 0);
73 | Assert.That(results.Data.First().Url.StartsWith("https://"));
74 | }
75 |
76 |
77 | [Test]
78 | public void CreateDALLE2ImageBase64Enc()
79 | {
80 | var api = new OpenAI_API.OpenAIAPI();
81 |
82 | Assert.IsNotNull(api.ImageGenerations);
83 |
84 | var results = api.ImageGenerations.CreateImageAsync(new ImageGenerationRequest("A cyberpunk monkey hacker dreaming of a beautiful bunch of bananas, digital art", 1, ImageSize._256, responseFormat: ImageResponseFormat.B64_json)).Result;
85 | Assert.IsNotNull(results);
86 | if (results.CreatedUnixTime.HasValue)
87 | {
88 | Assert.NotZero(results.CreatedUnixTime.Value);
89 | Assert.NotNull(results.Created);
90 | Assert.Greater(results.Created.Value, new DateTime(2023, 1, 1));
91 | Assert.Less(results.Created.Value, DateTime.Now.AddDays(1));
92 | }
93 | else
94 | {
95 | Assert.Null(results.Created);
96 | }
97 |
98 | Assert.NotZero(results.Data.Count);
99 | Assert.NotNull(results.Data.First().Base64Data);
100 | Assert.That(results.Data.First().Base64Data.Length > 0);
101 | }
102 |
103 | [TestCase("standard", "1024x1024")]
104 | [TestCase("hd", "1024x1024")]
105 | [TestCase("standard", "1024x1792")]
106 | [TestCase("standard", "1792x1024")]
107 | public void CreateDALLE3ImageWithUrl(string quality, string size)
108 | {
109 | var api = new OpenAI_API.OpenAIAPI();
110 |
111 | Assert.IsNotNull(api.ImageGenerations);
112 |
113 | var results = api.ImageGenerations.CreateImageAsync(new ImageGenerationRequest("A cyberpunk monkey hacker dreaming of a beautiful bunch of bananas, digital art", OpenAI_API.Models.Model.DALLE3, new ImageSize(size), quality)).Result;
114 | Assert.IsNotNull(results);
115 | if (results.CreatedUnixTime.HasValue)
116 | {
117 | Assert.NotZero(results.CreatedUnixTime.Value);
118 | Assert.NotNull(results.Created);
119 | Assert.Greater(results.Created.Value, new DateTime(2023, 1, 1));
120 | Assert.Less(results.Created.Value, DateTime.Now.AddDays(1));
121 | }
122 | else
123 | {
124 | Assert.Null(results.Created);
125 | }
126 |
127 | Assert.NotZero(results.Data.Count);
128 | Assert.NotNull(results.Data.First().Url);
129 | Assert.That(results.Data.First().Url.Length > 0);
130 | Assert.That(results.Data.First().Url.StartsWith("https://"));
131 | }
132 |
133 | [TestCase("dall-e-2", "hd", "1024x1024")]
134 | [TestCase("dall-e-2", "invalid-quality", "1024x1024")]
135 | [TestCase("dall-e-2", "standard", "1024x1792")]
136 | [TestCase("dall-e-3", "standard", "256x256")]
137 | [TestCase("dall-e-3", "invalid-quality", "256x256")]
138 | public void BadParameterCombosShouldFail(string model, string quality, string size)
139 | {
140 | var api = new OpenAI_API.OpenAIAPI();
141 |
142 | Assert.IsNotNull(api.ImageGenerations);
143 |
144 | Assert.ThrowsAsync(async () => await api.ImageGenerations.CreateImageAsync(new ImageGenerationRequest("A cyberpunk monkey hacker dreaming of a beautiful bunch of bananas, digital art", model, new ImageSize(size), quality)));
145 | }
146 |
147 | [Test]
148 | public void BadNumImagesWithDalle3ShouldFail()
149 | {
150 | var api = new OpenAI_API.OpenAIAPI();
151 |
152 | Assert.IsNotNull(api.ImageGenerations);
153 |
154 | var req = new ImageGenerationRequest("A cyberpunk monkey hacker dreaming of a beautiful bunch of bananas, digital art", OpenAI_API.Models.Model.DALLE3);
155 | req.NumOfImages = 2;
156 |
157 | Assert.ThrowsAsync(async () => await api.ImageGenerations.CreateImageAsync(req));
158 | }
159 |
160 |
161 | }
162 | }
163 |
--------------------------------------------------------------------------------
/OpenAI_API/OpenAIAPI.cs:
--------------------------------------------------------------------------------
1 | using OpenAI_API.Audio;
2 | using OpenAI_API.Chat;
3 | using OpenAI_API.Completions;
4 | using OpenAI_API.Embedding;
5 | using OpenAI_API.Files;
6 | using OpenAI_API.Images;
7 | using OpenAI_API.Models;
8 | using OpenAI_API.Moderation;
9 | using System.Net.Http;
10 |
11 | namespace OpenAI_API
12 | {
13 | ///
14 | /// Entry point to the OpenAPI API, handling auth and allowing access to the various API endpoints
15 | ///
16 | public class OpenAIAPI : IOpenAIAPI
17 | {
18 | ///
19 | /// Base url for OpenAI
20 | /// for OpenAI, should be "https://api.openai.com/{0}/{1}"
21 | /// for Azure, should be "https://(your-resource-name.openai.azure.com/openai/deployments/(deployment-id)/{1}?api-version={0}"
22 | ///
23 | public string ApiUrlFormat { get; set; } = "https://api.openai.com/{0}/{1}";
24 |
25 | ///
26 | /// Version of the Rest Api
27 | ///
28 | public string ApiVersion { get; set; } = "v1";
29 |
30 | ///
31 | /// The API authentication information to use for API calls
32 | ///
33 | public APIAuthentication Auth { get; set; }
34 |
35 | ///
36 | /// Optionally provide an IHttpClientFactory to create the client to send requests.
37 | ///
38 | public IHttpClientFactory HttpClientFactory { get; set; }
39 |
40 | ///
41 | /// Creates a new entry point to the OpenAPI API, handling auth and allowing access to the various API endpoints
42 | ///
43 | /// The API authentication information to use for API calls, or to attempt to use the , potentially loading from environment vars or from a config file.
44 | public OpenAIAPI(APIAuthentication apiKeys = null)
45 | {
46 | this.Auth = apiKeys.ThisOrDefault();
47 | Completions = new CompletionEndpoint(this);
48 | Models = new ModelsEndpoint(this);
49 | Files = new FilesEndpoint(this);
50 | Embeddings = new EmbeddingEndpoint(this);
51 | Chat = new ChatEndpoint(this);
52 | Moderation = new ModerationEndpoint(this);
53 | ImageGenerations = new ImageGenerationEndpoint(this);
54 | TextToSpeech = new TextToSpeechEndpoint(this);
55 | Transcriptions = new TranscriptionEndpoint(this, false);
56 | Translations = new TranscriptionEndpoint(this, true);
57 | }
58 |
59 | ///
60 | /// Instantiates a version of the API for connecting to the Azure OpenAI endpoint instead of the main OpenAI endpoint.
61 | ///
62 | /// The name of your Azure OpenAI Resource
63 | /// The name of your model deployment. You're required to first deploy a model before you can make calls.
64 | /// The API authentication information to use for API calls, or to attempt to use the , potentially loading from environment vars or from a config file. Currently this library only supports the api-key flow, not the AD-Flow.
65 | ///
66 | public static OpenAIAPI ForAzure(string YourResourceName, string deploymentId, APIAuthentication apiKey = null)
67 | {
68 | OpenAIAPI api = new OpenAIAPI(apiKey);
69 | api.ApiVersion = "2023-05-15";
70 | api.ApiUrlFormat = $"https://{YourResourceName}.openai.azure.com/openai/deployments/{deploymentId}/" + "{1}?api-version={0}";
71 | return api;
72 | }
73 |
74 | ///
75 | /// Text generation is the core function of the API. You give the API a prompt, and it generates a completion. The way you “program” the API to do a task is by simply describing the task in plain english or providing a few written examples. This simple approach works for a wide range of use cases, including summarization, translation, grammar correction, question answering, chatbots, composing emails, and much more (see the prompt library for inspiration).
76 | ///
77 | public ICompletionEndpoint Completions { get; }
78 |
79 | ///
80 | /// The API lets you transform text into a vector (list) of floating point numbers. The distance between two vectors measures their relatedness. Small distances suggest high relatedness and large distances suggest low relatedness.
81 | ///
82 | public IEmbeddingEndpoint Embeddings { get; }
83 |
84 | ///
85 | /// Text generation in the form of chat messages. This interacts with the ChatGPT API.
86 | ///
87 | public IChatEndpoint Chat { get; }
88 |
89 | ///
90 | /// Classify text against the OpenAI Content Policy.
91 | ///
92 | public IModerationEndpoint Moderation { get; }
93 |
94 | ///
95 | /// The API endpoint for querying available Engines/models
96 | ///
97 | public IModelsEndpoint Models { get; }
98 |
99 | ///
100 | /// The API lets you do operations with files. You can upload, delete or retrieve files. Files can be used for fine-tuning, search, etc.
101 | ///
102 | public IFilesEndpoint Files { get; }
103 |
104 | ///
105 | /// The API lets you do operations with images. Given a prompt and/or an input image, the model will generate a new image.
106 | ///
107 | public IImageGenerationEndpoint ImageGenerations { get; }
108 |
109 | ///
110 | /// The Endpoint for the Text to Speech API. This allows you to generate audio from text. See
111 | ///
112 | public ITextToSpeechEndpoint TextToSpeech { get; }
113 |
114 | ///
115 | /// The endpoint for the audio transcription API. This allows you to generate text from audio. See
116 | ///
117 | public ITranscriptionEndpoint Transcriptions { get; }
118 |
119 | ///
120 | /// The endpoint for the audio translation API. This allows you to generate English text from audio in other languages. See
121 | ///
122 | public ITranscriptionEndpoint Translations { get; }
123 | }
124 | }
125 |
--------------------------------------------------------------------------------
/OpenAI_API/Audio/TextToSpeechEndpoint.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.IO;
4 | using System.Net.Http;
5 | using System.Text;
6 | using System.Threading.Tasks;
7 | using OpenAI_API.Chat;
8 | using OpenAI_API.Models;
9 | using static System.Net.WebRequestMethods;
10 |
11 | namespace OpenAI_API.Audio
12 | {
13 | ///
14 | /// The Endpoint for the Text to Speech API. This allows you to generate audio from text. See
15 | ///
16 | public class TextToSpeechEndpoint : EndpointBase, ITextToSpeechEndpoint
17 | {
18 | ///
19 | protected override string Endpoint => "audio/speech";
20 |
21 | ///
22 | /// This allows you to set default parameters for every request, for example to set a default voice or model. For every request, if you do not have a parameter set on the request but do have it set here as a default, the request will automatically pick up the default value.
23 | ///
24 | public TextToSpeechRequest DefaultTTSRequestArgs { get; set; } = new TextToSpeechRequest();
25 |
26 | ///
27 | /// Constructor of the api endpoint. Rather than instantiating this yourself, access it through an instance of as .
28 | ///
29 | /// Pass in the instance of the api
30 | internal TextToSpeechEndpoint(OpenAIAPI api) : base(api) { }
31 |
32 | ///
33 | /// Calls the API to create speech from text, and returns the raw stream of the audio file.
34 | ///
35 | /// The text to speech request to submit to the API
36 | /// A stream of the audio file in the requested format.
37 | public async Task GetSpeechAsStreamAsync(TextToSpeechRequest request)
38 | {
39 | return await HttpRequest(verb: HttpMethod.Post, postData: request);
40 | }
41 |
42 | ///
43 | /// Calls the API to create speech from text, and returns the raw stream of the audio file.
44 | ///
45 | /// The text to generate audio for. The maximum length is 4096 characters.
46 | /// The voice to use when generating the audio. Supported voices can be found in .
47 | /// The speed of the generated audio. Select a value from 0.25 to 4.0. 1.0 is the default.
48 | /// The default response format is "mp3", but other formats are available in . See
49 | /// TTS is an AI model that converts text to natural sounding spoken text. OpenAI offers two different model variates, is optimized for real time text to speech use cases and is optimized for quality.
50 | /// A stream of the audio file in the requested format.
51 | public async Task GetSpeechAsStreamAsync(string input, string voice = null, double? speed = null, string responseFormat = null, Model model = null)
52 | {
53 | var request = new TextToSpeechRequest()
54 | {
55 | Input = input,
56 | Voice = voice ?? DefaultTTSRequestArgs.Voice,
57 | Speed = speed ?? DefaultTTSRequestArgs.Speed,
58 | Model = model ?? DefaultTTSRequestArgs.Model,
59 | ResponseFormat = responseFormat ?? DefaultTTSRequestArgs.ResponseFormat
60 | };
61 | return await HttpRequest(verb: HttpMethod.Post, postData: request);
62 | }
63 |
64 | ///
65 | /// Calls the API to create speech from text, and saves the audio file to disk.
66 | ///
67 | /// The text to speech request to submit to the API
68 | /// The local path to save the audio file to.
69 | /// A representing the saved speech file.
70 | public async Task SaveSpeechToFileAsync(TextToSpeechRequest request, string localPath)
71 | {
72 | using (var stream = await GetSpeechAsStreamAsync(request))
73 | using (var outputFileStream = new FileStream(localPath, FileMode.Create))
74 | {
75 | await stream.CopyToAsync(outputFileStream);
76 | }
77 | return new FileInfo(localPath);
78 | }
79 |
80 | ///
81 | /// Calls the API to create speech from text, and saves the audio file to disk.
82 | ///
83 | /// The text to generate audio for. The maximum length is 4096 characters.
84 | /// The local path to save the audio file to.
85 | /// The voice to use when generating the audio. Supported voices can be found in .
86 | /// The speed of the generated audio. Select a value from 0.25 to 4.0. 1.0 is the default.
87 | /// The default response format is "mp3", but other formats are available in . See
88 | /// TTS is an AI model that converts text to natural sounding spoken text. OpenAI offers two different model variates, is optimized for real time text to speech use cases and is optimized for quality.
89 | /// A stream of the audio file in the requested format.
90 | public async Task SaveSpeechToFileAsync(string input, string localPath, string voice = null, double? speed = null, string responseFormat = null, Model model = null)
91 | {
92 | var request = new TextToSpeechRequest()
93 | {
94 | Input = input,
95 | Voice = voice ?? DefaultTTSRequestArgs.Voice,
96 | Speed = speed ?? DefaultTTSRequestArgs.Speed,
97 | Model = model ?? DefaultTTSRequestArgs.Model,
98 | ResponseFormat = responseFormat ?? DefaultTTSRequestArgs.ResponseFormat
99 | };
100 | return await SaveSpeechToFileAsync(request, localPath);
101 | }
102 |
103 |
104 |
105 | }
106 | }
107 |
--------------------------------------------------------------------------------
/OpenAI_API/Moderation/ModerationResult.cs:
--------------------------------------------------------------------------------
1 | using Newtonsoft.Json;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Linq;
5 | using System.Text;
6 |
7 | namespace OpenAI_API.Moderation
8 | {
9 | ///
10 | /// Represents a moderation result returned by the Moderations API
11 | ///
12 | public class ModerationResult : ApiResultBase
13 | {
14 | ///
15 | /// List of results returned from the Moderations API request
16 | ///
17 | [JsonProperty("results")]
18 | public List Results { get; set; }
19 |
20 | ///
21 | /// The unique identifier associated with a moderation request
22 | /// Consists of the prefix "modr-" followed by a randomly generated alphanumeric string
23 | ///
24 | [JsonProperty("id")]
25 | public string Id { get; set; }
26 |
27 | ///
28 | /// Convenience function to return the highest confidence category for which the content was flagged, or null if no content flags
29 | ///
30 | /// the highest confidence category for which the content was flagged, or null if no content flags
31 | public override string ToString()
32 | {
33 | return Results?.First()?.MainContentFlag;
34 | }
35 | }
36 |
37 | ///
38 | /// The result generated by the Moderations API request
39 | ///
40 | public class Result
41 | {
42 | ///
43 | /// A series of categories that the content could be flagged for. Values are bool's, indicating if the txt is flagged in that category
44 | ///
45 | [JsonProperty("categories")]
46 | public IDictionary Categories { get; set; }
47 |
48 | ///
49 | /// Confidence scores for the different category flags. Values are between 0 and 1, where 0 indicates low confidence
50 | ///
51 | [JsonProperty("category_scores")]
52 | public IDictionary CategoryScores { get; set; }
53 |
54 | ///
55 | /// True if the text was flagged in any of the categories
56 | ///
57 | [JsonProperty("flagged")]
58 | public bool Flagged { get; set; }
59 |
60 | ///
61 | /// Returns a list of all categories for which the content was flagged, sorted from highest confidence to lowest
62 | ///
63 | public IList FlaggedCategories
64 | {
65 | get
66 | {
67 | return Categories.Where(kv => kv.Value).OrderByDescending(kv => CategoryScores?[kv.Key]).Select(kv => kv.Key).ToList();
68 | }
69 | }
70 |
71 | ///
72 | /// Returns the highest confidence category for which the content was flagged, or null if no content flags
73 | ///
74 | public string MainContentFlag
75 | {
76 | get
77 | {
78 | return FlaggedCategories.FirstOrDefault();
79 | }
80 | }
81 |
82 | ///
83 | /// Returns the highest confidence flag score across all categories
84 | ///
85 | public double HighestFlagScore
86 | {
87 | get
88 | {
89 | return CategoryScores.OrderByDescending(kv => kv.Value).First().Value;
90 | }
91 | }
92 |
93 |
94 | }
95 |
96 | ///
97 | /// Series of boolean values indiciating what the text is flagged for
98 | ///
99 | public class Categories
100 | {
101 | ///
102 | /// If the text contains hate speech
103 | ///
104 | [JsonProperty("hate")]
105 | public bool Hate { get; set; }
106 |
107 | ///
108 | /// If the text contains hate / threatening speech
109 | ///
110 | [JsonProperty("hate/threatening")]
111 | public bool HateThreatening { get; set; }
112 | ///
113 | /// If the text contains content about self-harm
114 | ///
115 | [JsonProperty("self-harm")]
116 | public bool SelfHarm { get; set; }
117 |
118 | ///
119 | /// If the text contains sexual content
120 | ///
121 | [JsonProperty("sexual")]
122 | public bool Sexual { get; set; }
123 |
124 | ///
125 | /// If the text contains sexual content featuring minors
126 | ///
127 | [JsonProperty("sexual/minors")]
128 | public bool SexualMinors { get; set; }
129 |
130 | ///
131 | /// If the text contains violent content
132 | ///
133 | [JsonProperty("violence")]
134 | public bool Violence { get; set; }
135 |
136 | ///
137 | /// If the text contains violent and graphic content
138 | ///
139 | [JsonProperty("violence/graphic")]
140 | public bool ViolenceGraphic { get; set; }
141 | }
142 |
143 | ///
144 | /// Confidence scores for the different category flags
145 | ///
146 | public class CategoryScores
147 | {
148 | ///
149 | /// Confidence score indicating "hate" content is detected in the text
150 | /// A value between 0 and 1, where 0 indicates low confidence
151 | ///
152 | [JsonProperty("hate")]
153 | public double Hate { get; set; }
154 |
155 | ///
156 | /// Confidence score indicating "hate/threatening" content is detected in the text
157 | /// A value between 0 and 1, where 0 indicates low confidence
158 | ///
159 | [JsonProperty("hate/threatening")]
160 | public double HateThreatening { get; set; }
161 |
162 | ///
163 | /// Confidence score indicating "self-harm" content is detected in the text
164 | /// A value between 0 and 1, where 0 indicates low confidence
165 | ///
166 | [JsonProperty("self-harm")]
167 | public double SelfHarm { get; set; }
168 |
169 | ///
170 | /// Confidence score indicating "sexual" content is detected in the text
171 | /// A value between 0 and 1, where 0 indicates low confidence
172 | ///
173 | [JsonProperty("sexual")]
174 | public double Sexual { get; set; }
175 |
176 | ///
177 | /// Confidence score indicating "sexual/minors" content is detected in the text
178 | /// A value between 0 and 1, where 0 indicates low confidence
179 | ///
180 | [JsonProperty("sexual/minors")]
181 | public double SexualMinors { get; set; }
182 |
183 | ///
184 | /// Confidence score indicating "violence" content is detected in the text
185 | /// A value between 0 and 1, where 0 indicates low confidence
186 | ///
187 | [JsonProperty("violence")]
188 | public double Violence { get; set; }
189 |
190 | ///
191 | /// Confidence score indicating "violence/graphic" content is detected in the text
192 | /// A value between 0 and 1, where 0 indicates low confidence
193 | ///
194 | [JsonProperty("violence/graphic")]
195 | public double ViolenceGraphic { get; set; }
196 | }
197 | }
198 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | ## Ignore Visual Studio temporary files, build results, and
2 | ## files generated by popular Visual Studio add-ons.
3 | ##
4 | ## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
5 |
6 | # User-specific files
7 | *.rsuser
8 | *.suo
9 | *.user
10 | *.userosscache
11 | *.sln.docstates
12 |
13 | # User-specific files (MonoDevelop/Xamarin Studio)
14 | *.userprefs
15 |
16 | # Build results
17 | [Dd]ebug/
18 | [Dd]ebugPublic/
19 | [Rr]elease/
20 | [Rr]eleases/
21 | x64/
22 | x86/
23 | [Aa][Rr][Mm]/
24 | [Aa][Rr][Mm]64/
25 | bld/
26 | [Bb]in/
27 | [Oo]bj/
28 | [Ll]og/
29 |
30 | # Visual Studio 2015/2017 cache/options directory
31 | .vs/
32 | # Uncomment if you have tasks that create the project's static files in wwwroot
33 | #wwwroot/
34 |
35 | # Visual Studio 2017 auto generated files
36 | Generated\ Files/
37 |
38 | # MSTest test Results
39 | [Tt]est[Rr]esult*/
40 | [Bb]uild[Ll]og.*
41 |
42 | # NUNIT
43 | *.VisualState.xml
44 | TestResult.xml
45 |
46 | # Build Results of an ATL Project
47 | [Dd]ebugPS/
48 | [Rr]eleasePS/
49 | dlldata.c
50 |
51 | # Benchmark Results
52 | BenchmarkDotNet.Artifacts/
53 |
54 | # .NET Core
55 | project.lock.json
56 | project.fragment.lock.json
57 | artifacts/
58 |
59 | # StyleCop
60 | StyleCopReport.xml
61 |
62 | # Files built by Visual Studio
63 | *_i.c
64 | *_p.c
65 | *_h.h
66 | *.ilk
67 | *.meta
68 | *.obj
69 | *.iobj
70 | *.pch
71 | *.pdb
72 | *.ipdb
73 | *.pgc
74 | *.pgd
75 | *.rsp
76 | *.sbr
77 | *.tlb
78 | *.tli
79 | *.tlh
80 | *.tmp
81 | *.tmp_proj
82 | *_wpftmp.csproj
83 | *.log
84 | *.vspscc
85 | *.vssscc
86 | .builds
87 | *.pidb
88 | *.svclog
89 | *.scc
90 |
91 | # Chutzpah Test files
92 | _Chutzpah*
93 |
94 | # Visual C++ cache files
95 | ipch/
96 | *.aps
97 | *.ncb
98 | *.opendb
99 | *.opensdf
100 | *.sdf
101 | *.cachefile
102 | *.VC.db
103 | *.VC.VC.opendb
104 |
105 | # Visual Studio profiler
106 | *.psess
107 | *.vsp
108 | *.vspx
109 | *.sap
110 |
111 | # Visual Studio Trace Files
112 | *.e2e
113 |
114 | # TFS 2012 Local Workspace
115 | $tf/
116 |
117 | # Guidance Automation Toolkit
118 | *.gpState
119 |
120 | # ReSharper is a .NET coding add-in
121 | _ReSharper*/
122 | *.[Rr]e[Ss]harper
123 | *.DotSettings.user
124 |
125 | # JustCode is a .NET coding add-in
126 | .JustCode
127 |
128 | # TeamCity is a build add-in
129 | _TeamCity*
130 |
131 | # DotCover is a Code Coverage Tool
132 | *.dotCover
133 |
134 | # AxoCover is a Code Coverage Tool
135 | .axoCover/*
136 | !.axoCover/settings.json
137 |
138 | # Visual Studio code coverage results
139 | *.coverage
140 | *.coveragexml
141 |
142 | # NCrunch
143 | _NCrunch_*
144 | .*crunch*.local.xml
145 | nCrunchTemp_*
146 |
147 | # MightyMoose
148 | *.mm.*
149 | AutoTest.Net/
150 |
151 | # Web workbench (sass)
152 | .sass-cache/
153 |
154 | # Installshield output folder
155 | [Ee]xpress/
156 |
157 | # DocProject is a documentation generator add-in
158 | DocProject/buildhelp/
159 | DocProject/Help/*.HxT
160 | DocProject/Help/*.HxC
161 | DocProject/Help/*.hhc
162 | DocProject/Help/*.hhk
163 | DocProject/Help/*.hhp
164 | DocProject/Help/Html2
165 | DocProject/Help/html
166 |
167 | # Click-Once directory
168 | publish/
169 |
170 | # Publish Web Output
171 | *.[Pp]ublish.xml
172 | *.azurePubxml
173 | # Note: Comment the next line if you want to checkin your web deploy settings,
174 | # but database connection strings (with potential passwords) will be unencrypted
175 | *.pubxml
176 | *.publishproj
177 |
178 | # Microsoft Azure Web App publish settings. Comment the next line if you want to
179 | # checkin your Azure Web App publish settings, but sensitive information contained
180 | # in these scripts will be unencrypted
181 | PublishScripts/
182 |
183 | # NuGet Packages
184 | *.nupkg
185 | # The packages folder can be ignored because of Package Restore
186 | **/[Pp]ackages/*
187 | # except build/, which is used as an MSBuild target.
188 | !**/[Pp]ackages/build/
189 | # Uncomment if necessary however generally it will be regenerated when needed
190 | #!**/[Pp]ackages/repositories.config
191 | # NuGet v3's project.json files produces more ignorable files
192 | *.nuget.props
193 | *.nuget.targets
194 |
195 | # Microsoft Azure Build Output
196 | csx/
197 | *.build.csdef
198 |
199 | # Microsoft Azure Emulator
200 | ecf/
201 | rcf/
202 |
203 | # Windows Store app package directories and files
204 | AppPackages/
205 | BundleArtifacts/
206 | Package.StoreAssociation.xml
207 | _pkginfo.txt
208 | *.appx
209 |
210 | # Visual Studio cache files
211 | # files ending in .cache can be ignored
212 | *.[Cc]ache
213 | # but keep track of directories ending in .cache
214 | !?*.[Cc]ache/
215 |
216 | # Others
217 | ClientBin/
218 | ~$*
219 | *~
220 | *.dbmdl
221 | *.dbproj.schemaview
222 | *.jfm
223 | *.pfx
224 | *.publishsettings
225 | orleans.codegen.cs
226 |
227 | # Including strong name files can present a security risk
228 | # (https://github.com/github/gitignore/pull/2483#issue-259490424)
229 | #*.snk
230 |
231 | # Since there are multiple workflows, uncomment next line to ignore bower_components
232 | # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
233 | #bower_components/
234 |
235 | # RIA/Silverlight projects
236 | Generated_Code/
237 |
238 | # Backup & report files from converting an old project file
239 | # to a newer Visual Studio version. Backup files are not needed,
240 | # because we have git ;-)
241 | _UpgradeReport_Files/
242 | Backup*/
243 | UpgradeLog*.XML
244 | UpgradeLog*.htm
245 | ServiceFabricBackup/
246 | *.rptproj.bak
247 |
248 | # SQL Server files
249 | *.mdf
250 | *.ldf
251 | *.ndf
252 |
253 | # Business Intelligence projects
254 | *.rdl.data
255 | *.bim.layout
256 | *.bim_*.settings
257 | *.rptproj.rsuser
258 | *- Backup*.rdl
259 |
260 | # Microsoft Fakes
261 | FakesAssemblies/
262 |
263 | # GhostDoc plugin setting file
264 | *.GhostDoc.xml
265 |
266 | # Node.js Tools for Visual Studio
267 | .ntvs_analysis.dat
268 | node_modules/
269 |
270 | # Visual Studio 6 build log
271 | *.plg
272 |
273 | # Visual Studio 6 workspace options file
274 | *.opt
275 |
276 | # Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
277 | *.vbw
278 |
279 | # Visual Studio LightSwitch build output
280 | **/*.HTMLClient/GeneratedArtifacts
281 | **/*.DesktopClient/GeneratedArtifacts
282 | **/*.DesktopClient/ModelManifest.xml
283 | **/*.Server/GeneratedArtifacts
284 | **/*.Server/ModelManifest.xml
285 | _Pvt_Extensions
286 |
287 | # Paket dependency manager
288 | .paket/paket.exe
289 | paket-files/
290 |
291 | # FAKE - F# Make
292 | .fake/
293 |
294 | # JetBrains Rider
295 | .idea/
296 | *.sln.iml
297 |
298 | # CodeRush personal settings
299 | .cr/personal
300 |
301 | # Python Tools for Visual Studio (PTVS)
302 | __pycache__/
303 | *.pyc
304 |
305 | # Cake - Uncomment if you are using it
306 | # tools/**
307 | # !tools/packages.config
308 |
309 | # Tabs Studio
310 | *.tss
311 |
312 | # Telerik's JustMock configuration file
313 | *.jmconfig
314 |
315 | # BizTalk build output
316 | *.btp.cs
317 | *.btm.cs
318 | *.odx.cs
319 | *.xsd.cs
320 |
321 | # OpenCover UI analysis results
322 | OpenCover/
323 |
324 | # Azure Stream Analytics local run output
325 | ASALocalRun/
326 |
327 | # MSBuild Binary and Structured Log
328 | *.binlog
329 |
330 | # NVidia Nsight GPU debugger configuration file
331 | *.nvuser
332 |
333 | # MFractors (Xamarin productivity tool) working folder
334 | .mfractor/
335 |
336 | # Local History for Visual Studio
337 | .localhistory/
338 |
339 | # BeatPulse healthcheck temp database
340 | healthchecksdb
--------------------------------------------------------------------------------
/OpenAI_API/Images/ImageGenerationRequest.cs:
--------------------------------------------------------------------------------
1 | using Newtonsoft.Json;
2 | using OpenAI_API.Models;
3 | using System;
4 | using System.Collections.Generic;
5 | using System.Linq;
6 | using System.Text;
7 |
8 | namespace OpenAI_API.Images
9 | {
10 | ///
11 | /// Represents a request to the Images API. Mostly matches the parameters in the OpenAI docs, although some have been renamed or expanded into single/multiple properties for ease of use.
12 | ///
13 | public class ImageGenerationRequest
14 | {
15 | private int? numOfImages = 1;
16 | private ImageSize size = ImageSize._1024;
17 | private string quality = "standard";
18 |
19 | ///
20 | /// A text description of the desired image(s). The maximum length is 1000 characters.
21 | ///
22 | [JsonProperty("prompt")]
23 | public string Prompt { get; set; }
24 |
25 | ///
26 | /// How many different choices to request for each prompt. Defaults to 1. Only for DALL-E 2. For DALL-E 3, only 1 is allowed.
27 | ///
28 | [JsonProperty("n")]
29 | public int? NumOfImages
30 | {
31 | get
32 | {
33 | if (this.Model == OpenAI_API.Models.Model.DALLE3 && numOfImages != 1)
34 | throw new ArgumentException("For DALL-E 3, only 1 NumOfImages is allowed.");
35 | return numOfImages;
36 | }
37 | set => numOfImages = value;
38 | }
39 | ///
40 | /// The model to use for this request. Defaults to DALL-E 2.
41 | ///
42 | [JsonProperty("model")]
43 | public string Model { get; set; } = OpenAI_API.Models.Model.DALLE2;
44 |
45 | ///
46 | /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Optional.
47 | ///
48 | [JsonProperty("user")]
49 | public string User { get; set; }
50 |
51 | ///
52 | /// The size of the generated images. Defaults to 1024x1024.
53 | ///
54 | [JsonProperty("size"), JsonConverter(typeof(ImageSize.ImageSizeJsonConverter))]
55 | public ImageSize Size
56 | {
57 | get
58 | {
59 | if (this.Model == OpenAI_API.Models.Model.DALLE3 && (this.size == ImageSize._256 || this.size == ImageSize._512))
60 | throw new ArgumentException("For DALL-E 3, only 1024x1024, 1024x1792, or 1792x1024 is allowed.");
61 | if (this.Model == OpenAI_API.Models.Model.DALLE2 && (this.size == ImageSize._1792x1024 || this.size == ImageSize._1024x1792))
62 | throw new ArgumentException("For DALL-E 2, only 256x256, 512x512, or 1024x1024 is allowed.");
63 | return size;
64 | }
65 | set => size = value;
66 | }
67 |
68 | ///
69 | /// By default, images are generated at `standard` quality, but when using DALL·E 3 you can set quality to `hd` for enhanced detail. Square, standard quality images are the fastest to generate.
70 | ///
71 | [JsonProperty("quality", NullValueHandling=NullValueHandling.Ignore)]
72 | public string Quality
73 | {
74 | get
75 | {
76 | if (this.Model == OpenAI_API.Models.Model.DALLE2 && this.quality == "hd")
77 | throw new ArgumentException("For DALL-E 2, hd quality is not available.");
78 | if (this.Model == OpenAI_API.Models.Model.DALLE3 && this.quality == "standard")
79 | return null;
80 | return quality;
81 | }
82 | set
83 | {
84 | switch (value.ToLower().Trim())
85 | {
86 | case "standard":
87 | quality="standard";
88 | break;
89 | case "hd":
90 | quality = "hd";
91 | break;
92 | default:
93 | throw new ArgumentException("Quality must be either 'standard' or 'hd'.");
94 | }
95 | }
96 | }
97 |
98 | ///
99 | /// The format in which the generated images are returned. Must be one of url or b64_json. Defaults to Url.
100 | ///
101 | [JsonProperty("response_format"), JsonConverter(typeof(ImageResponseFormat.ImageResponseJsonConverter))]
102 | public ImageResponseFormat ResponseFormat { get; set; }
103 |
104 | ///
105 | /// Cretes a new, empty
106 | ///
107 | public ImageGenerationRequest()
108 | {
109 |
110 | }
111 |
112 | ///
113 | /// Creates a new with the specified parameters
114 | ///
115 | /// A text description of the desired image(s). The maximum length is 1000 characters.
116 | /// The model to use for this request. Defaults to DALL-E 2.
117 | /// The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.
118 | /// By default, images are generated at `standard` quality, but when using DALL·E 3 you can set quality to `hd` for enhanced detail.
119 | /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
120 | /// The format in which the generated images are returned. Must be one of url or b64_json.
121 | public ImageGenerationRequest(
122 | string prompt,
123 | Model model,
124 | ImageSize size = null,
125 | string quality = "standard",
126 | string user = null,
127 | ImageResponseFormat responseFormat = null)
128 | {
129 | this.Prompt = prompt;
130 | this.Model = model ?? OpenAI_API.Models.Model.DALLE2;
131 | this.Quality = quality ?? "standard";
132 | this.User = user;
133 | this.Size = size ?? ImageSize._1024;
134 | this.ResponseFormat = responseFormat ?? ImageResponseFormat.Url;
135 |
136 | // check for incompatible parameters
137 | if (this.Model == OpenAI_API.Models.Model.DALLE3)
138 | {
139 | if (this.Size == ImageSize._256 || this.Size == ImageSize._512)
140 | throw new ArgumentException("For DALL-E 3, only sizes 1024x1024, 1024x1792, or 1792x1024 are allowed.");
141 | if (this.quality != "standard" && this.quality != "hd")
142 | throw new ArgumentException("Quality must be one of 'standard' or 'hd'");
143 | }
144 | else
145 | {
146 | if (this.Size == ImageSize._1792x1024 || this.Size == ImageSize._1024x1792)
147 | throw new ArgumentException("For DALL-E 2, only sizes 256x256, 512x512, or 1024x1024 are allowed.");
148 | if (this.quality != "standard")
149 | throw new ArgumentException("For DALL-E 2, only 'standard' quality is available");
150 | }
151 | }
152 |
153 | ///
154 | /// Creates a new with the specified parameters
155 | ///
156 | /// A text description of the desired image(s). The maximum length is 1000 characters.
157 | /// How many different choices to request for each prompt. Defaults to 1.
158 | /// The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.
159 | /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
160 | /// The format in which the generated images are returned. Must be one of url or b64_json.
161 | public ImageGenerationRequest(
162 | string prompt,
163 | int? numOfImages = 1,
164 | ImageSize size = null,
165 | string user = null,
166 | ImageResponseFormat responseFormat = null)
167 | {
168 | this.Prompt = prompt;
169 | this.NumOfImages = numOfImages;
170 | this.User = user;
171 | this.Size = size ?? ImageSize._1024;
172 | this.ResponseFormat = responseFormat ?? ImageResponseFormat.Url;
173 | }
174 |
175 | }
176 | }
177 |
--------------------------------------------------------------------------------
/OpenAI_Tests/ChatVisionTests.cs:
--------------------------------------------------------------------------------
1 | using Newtonsoft.Json;
2 | using NUnit.Framework;
3 | using OpenAI_API.Chat;
4 | using OpenAI_API.Completions;
5 | using OpenAI_API.Models;
6 | using OpenAI_API.Moderation;
7 | using System;
8 | using System.Collections.Generic;
9 | using System.IO;
10 | using System.Linq;
11 | using System.Threading;
12 | using System.Threading.Tasks;
13 | using static OpenAI_API.Chat.ChatMessage;
14 |
15 | namespace OpenAI_Tests
16 | {
17 | public class ChatVisionTests
18 | {
19 | [SetUp]
20 | public void Setup()
21 | {
22 | OpenAI_API.APIAuthentication.Default = new OpenAI_API.APIAuthentication(Environment.GetEnvironmentVariable("TEST_OPENAI_SECRET_KEY"));
23 | OpenAI_API.Models.Model.DefaultChatModel = Model.GPT4_Vision;
24 | }
25 |
26 | [Test]
27 | public async Task SimpleVisionTest()
28 | {
29 | var api = new OpenAI_API.OpenAIAPI();
30 | var result = await api.Chat.CreateChatCompletionAsync("What is the primary non-white color in this logo's gradient? Just tell me the one main color.", ImageInput.FromFile("../../../../OpenAI_API/nuget_logo.png"));
31 | Assert.IsNotNull(result);
32 | Assert.IsNotNull(result.Choices);
33 | Assert.AreEqual(1, result.Choices.Count);
34 | Assert.That(result.Choices[0].Message.TextContent.ToLower().Contains("blue") || result.Choices[0].Message.TextContent.ToLower().Contains("purple"));
35 | }
36 |
37 | [Test]
38 | public void TestVisionFromPath()
39 | {
40 | var api = new OpenAI_API.OpenAIAPI();
41 | ChatRequest request = new ChatRequest()
42 | {
43 | Model = Model.GPT4_Vision,
44 | Temperature = 0.0,
45 | MaxTokens = 500,
46 | Messages = new ChatMessage[] {
47 | new ChatMessage(ChatMessageRole.System, "You are a helpful assistant"),
48 | new ChatMessage(ChatMessageRole.User, "What is the primary color in this logo?",ImageInput.FromFile("../../../../OpenAI_API/nuget_logo.png"))
49 | }
50 | };
51 | var result = api.Chat.CreateChatCompletionAsync(request).Result;
52 | Assert.IsNotNull(result);
53 | Assert.IsNotNull(result.Choices);
54 | Assert.AreEqual(1, result.Choices.Count);
55 | Assert.That(result.Choices[0].Message.TextContent.ToLower().Contains("blue") || result.Choices[0].Message.TextContent.ToLower().Contains("purple") || result.Choices[0].Message.TextContent.ToLower().Contains("pink"));
56 | }
57 |
58 | [Test]
59 | public void TestVisionFromUrl()
60 | {
61 | var api = new OpenAI_API.OpenAIAPI();
62 | ChatRequest request = new ChatRequest()
63 | {
64 | Model = Model.GPT4_Vision,
65 | Temperature = 0.0,
66 | MaxTokens = 500,
67 | Messages = new ChatMessage[] {
68 | new ChatMessage(ChatMessageRole.System, "You are a helpful assistant"),
69 | new ChatMessage(ChatMessageRole.User, "This logo consists of many small shapes. What shape are they?",ImageInput.FromImageUrl("https://rogerpincombe.com/templates/rp/center-aligned-no-shadow-small.png"))
70 | }
71 | };
72 | var result = api.Chat.CreateChatCompletionAsync(request).Result;
73 | Assert.IsNotNull(result);
74 | Assert.IsNotNull(result.Choices);
75 | Assert.AreEqual(1, result.Choices.Count);
76 | Assert.That(result.Choices[0].Message.TextContent.ToLower().Contains("circle") || result.Choices[0].Message.TextContent.ToLower().Contains("spiral"));
77 | }
78 |
79 | [Test]
80 | public void TestVisionWithMultipleImages()
81 | {
82 | var api = new OpenAI_API.OpenAIAPI();
83 | ChatRequest request = new ChatRequest()
84 | {
85 | Model = Model.GPT4_Vision,
86 | Temperature = 0.0,
87 | MaxTokens = 500,
88 | Messages = new ChatMessage[] {
89 | new ChatMessage(ChatMessageRole.User, "Here are two logos. What is the one common color (aside from white) that is used in both logos?",ImageInput.FromFile("../../../../OpenAI_API/nuget_logo.png"),ImageInput.FromImageUrl("https://rogerpincombe.com/templates/rp/center-aligned-no-shadow-small.png"))
90 | }
91 | };
92 | var result = api.Chat.CreateChatCompletionAsync(request).Result;
93 | Assert.IsNotNull(result);
94 | Assert.IsNotNull(result.Choices);
95 | Assert.AreEqual(1, result.Choices.Count);
96 | Assert.That(result.Choices[0].Message.TextContent.ToLower().Contains("blue") || result.Choices[0].Message.TextContent.ToLower().Contains("purple") || result.Choices[0].Message.TextContent.ToLower().Contains("pink"));
97 | }
98 |
99 | [Test]
100 | public void ChatBackAndForth()
101 | {
102 | var api = new OpenAI_API.OpenAIAPI();
103 |
104 | var chat = api.Chat.CreateConversation();
105 | chat.Model = Model.GPT4_Vision;
106 | chat.RequestParameters.Temperature = 0;
107 |
108 | chat.AppendSystemMessage("You are a graphic design assistant who helps identify colors.");
109 | chat.AppendUserInput("What are the primary non-white colors in this logo?", ImageInput.FromFile("../../../../OpenAI_API/nuget_logo.png"));
110 | chat.AppendExampleChatbotOutput("Blue and purple");
111 | chat.AppendUserInput("What are the primary non-white colors in this logo?", ImageInput.FromImageUrl("https://rogerpincombe.com/templates/rp/center-aligned-no-shadow-small.png"));
112 | string res = chat.GetResponseFromChatbotAsync().Result;
113 | Assert.NotNull(res);
114 | Assert.IsNotEmpty(res);
115 | Assert.That(res.ToLower().Contains("blue"));
116 | Assert.That(res.ToLower().Contains("red"));
117 | Assert.That(res.ToLower().Contains("yellow") || res.ToLower().Contains("gold"));
118 | chat.AppendUserInput("What are the primary non-white colors in this logo?", ImageInput.FromImageUrl("https://www.greatstartheater.org/images/logo.png"));
119 | res = chat.GetResponseFromChatbotAsync().Result;
120 | Assert.NotNull(res);
121 | Assert.IsNotEmpty(res);
122 | Assert.That(res.ToLower().Contains("red"));
123 | Assert.That(res.ToLower().Contains("black"));
124 | }
125 |
126 | [Test]
127 | public void VisionStreaming()
128 | {
129 | var api = new OpenAI_API.OpenAIAPI();
130 | ChatRequest request = new ChatRequest()
131 | {
132 | Model = Model.GPT4_Vision,
133 | Temperature = 0.0,
134 | MaxTokens = 500,
135 | Messages = new ChatMessage[] {
136 | new ChatMessage(ChatMessageRole.System, "You are a helpful assistant"),
137 | new ChatMessage(ChatMessageRole.User, "This logo consists of many small shapes. What shape are they?",ImageInput.FromImageUrl("https://rogerpincombe.com/templates/rp/center-aligned-no-shadow-small.png"))
138 | }
139 | };
140 | string resultText = "";
141 | api.Chat.StreamChatAsync(request, delta => resultText += delta?.ToString() ?? "").Wait();
142 | Assert.IsNotEmpty(resultText);
143 | Assert.That(resultText.ToLower().Contains("circle") || resultText.ToLower().Contains("spiral"));
144 | }
145 |
146 | [Test]
147 | public void VisionConversationStreaming()
148 | {
149 | var api = new OpenAI_API.OpenAIAPI();
150 |
151 | var chat = api.Chat.CreateConversation();
152 | chat.Model = Model.GPT4_Vision;
153 | chat.RequestParameters.Temperature = 0;
154 |
155 | chat.AppendSystemMessage("You are a graphic design assistant who helps identify colors.");
156 | chat.AppendUserInput("What are the primary non-white colors in this logo?", ImageInput.FromFile("../../../../OpenAI_API/nuget_logo.png"));
157 | chat.AppendExampleChatbotOutput("Blue and purple");
158 | chat.AppendUserInput("What are the primary non-white colors in this logo?", ImageInput.FromImageUrl("https://rogerpincombe.com/templates/rp/center-aligned-no-shadow-small.png"));
159 | string resultText = "";
160 | chat.StreamResponseFromChatbotAsync(delta=>resultText+=delta.ToString()).Wait();
161 | Assert.IsNotEmpty(resultText);
162 | Assert.That(resultText.ToLower().Contains("blue"));
163 | Assert.That(resultText.ToLower().Contains("red"));
164 | }
165 | }
166 | }
167 |
--------------------------------------------------------------------------------
/OpenAI_API/APIAuthentication.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Diagnostics;
4 | using System.IO;
5 | using System.Threading.Tasks;
6 |
7 | namespace OpenAI_API
8 | {
9 | ///
10 | /// Represents authentication to the OpenAPI API endpoint
11 | ///
12 | public class APIAuthentication
13 | {
14 | ///
15 | /// The API key, required to access the API endpoint.
16 | ///
17 | public string ApiKey { get; set; }
18 | ///
19 | /// The Organization ID to count API requests against. This can be found at https://beta.openai.com/account/org-settings.
20 | ///
21 | public string OpenAIOrganization { get; set; }
22 |
23 | ///
24 | /// Allows implicit casting from a string, so that a simple string API key can be provided in place of an instance of
25 | ///
26 | /// The API key to convert into a .
27 | public static implicit operator APIAuthentication(string key)
28 | {
29 | return new APIAuthentication(key);
30 | }
31 |
32 | ///
33 | /// Instantiates a new Authentication object with the given , which may be .
34 | ///
35 | /// The API key, required to access the API endpoint.
36 | public APIAuthentication(string apiKey)
37 | {
38 | this.ApiKey = apiKey;
39 | }
40 |
41 |
42 | ///
43 | /// Instantiates a new Authentication object with the given , which may be . For users who belong to multiple organizations, you can specify which organization is used. Usage from these API requests will count against the specified organization's subscription quota.
44 | ///
45 | /// The API key, required to access the API endpoint.
46 | /// The Organization ID to count API requests against. This can be found at https://beta.openai.com/account/org-settings.
47 | public APIAuthentication(string apiKey, string openAIOrganization)
48 | {
49 | this.ApiKey = apiKey;
50 | this.OpenAIOrganization = openAIOrganization;
51 | }
52 |
53 | private static APIAuthentication cachedDefault = null;
54 |
55 | ///
56 | /// The default authentication to use when no other auth is specified. This can be set manually, or automatically loaded via environment variables or a config file.
57 | ///
58 | public static APIAuthentication Default
59 | {
60 | get
61 | {
62 | if (cachedDefault != null)
63 | return cachedDefault;
64 |
65 | APIAuthentication auth = LoadFromEnv();
66 | if (auth == null)
67 | auth = LoadFromPath();
68 | if (auth == null)
69 | auth = LoadFromPath(Environment.GetFolderPath(Environment.SpecialFolder.UserProfile));
70 |
71 | cachedDefault = auth;
72 | return auth;
73 | }
74 | set
75 | {
76 | cachedDefault = value;
77 | }
78 | }
79 |
80 | ///
81 | /// Attempts to load api key from environment variables, as "OPENAI_KEY" or "OPENAI_API_KEY". Also loads org if from "OPENAI_ORGANIZATION" if present.
82 | ///
83 | /// Returns the loaded any api keys were found, or if there were no matching environment vars.
84 | public static APIAuthentication LoadFromEnv()
85 | {
86 | string key = Environment.GetEnvironmentVariable("OPENAI_KEY");
87 |
88 | if (string.IsNullOrEmpty(key))
89 | {
90 | key = Environment.GetEnvironmentVariable("OPENAI_API_KEY");
91 |
92 | if (string.IsNullOrEmpty(key))
93 | return null;
94 | }
95 |
96 | string org = Environment.GetEnvironmentVariable("OPENAI_ORGANIZATION");
97 |
98 | return new APIAuthentication(key, org);
99 | }
100 |
101 | ///
102 | /// Attempts to load api keys from a configuration file, by default ".openai" in the current directory, optionally traversing up the directory tree
103 | ///
104 | /// The directory to look in, or for the current directory
105 | /// The filename of the config file
106 | /// Whether to recursively traverse up the directory tree if the is not found in the
107 | /// Returns the loaded any api keys were found, or if it was not successful in finding a config (or if the config file didn't contain correctly formatted API keys)
108 | public static APIAuthentication LoadFromPath(string directory = null, string filename = ".openai", bool searchUp = true)
109 | {
110 | if (directory == null)
111 | directory = Environment.CurrentDirectory;
112 |
113 | string key = null;
114 | string org = null;
115 | var curDirectory = new DirectoryInfo(directory);
116 |
117 | while (key == null && curDirectory.Parent != null)
118 | {
119 | if (File.Exists(Path.Combine(curDirectory.FullName, filename)))
120 | {
121 | var lines = File.ReadAllLines(Path.Combine(curDirectory.FullName, filename));
122 | foreach (var l in lines)
123 | {
124 | var parts = l.Split('=', ':');
125 | if (parts.Length == 2)
126 | {
127 | switch (parts[0].ToUpper())
128 | {
129 | case "OPENAI_KEY":
130 | key = parts[1].Trim();
131 | break;
132 | case "OPENAI_API_KEY":
133 | key = parts[1].Trim();
134 | break;
135 | case "OPENAI_ORGANIZATION":
136 | org = parts[1].Trim();
137 | break;
138 | default:
139 | break;
140 | }
141 | }
142 | }
143 | }
144 |
145 | if (searchUp)
146 | {
147 | curDirectory = curDirectory.Parent;
148 | }
149 | else
150 | {
151 | break;
152 | }
153 | }
154 |
155 | if (string.IsNullOrEmpty(key))
156 | return null;
157 |
158 | return new APIAuthentication(key, org);
159 | }
160 |
161 |
162 | ///
163 | /// Tests the api key against the OpenAI API, to ensure it is valid. This hits the models endpoint so should not be charged for usage.
164 | ///
165 | /// if the api key is valid, or if empty or not accepted by the OpenAI API.
166 | public async Task ValidateAPIKey()
167 | {
168 | if (string.IsNullOrEmpty(ApiKey))
169 | return false;
170 |
171 | var api = new OpenAIAPI(this);
172 |
173 | List results;
174 |
175 | try
176 | {
177 | results = await api.Models.GetModelsAsync();
178 | }
179 | catch (Exception ex)
180 | {
181 | Debug.WriteLine(ex.ToString());
182 | return false;
183 | }
184 |
185 | return (results.Count > 0);
186 | }
187 |
188 | }
189 |
190 | internal static class AuthHelpers
191 | {
192 | ///
193 | /// A helper method to swap out objects with the authentication, possibly loaded from ENV or a config file.
194 | ///
195 | /// The specific authentication to use if not
196 | /// Either the provided or the
197 | public static APIAuthentication ThisOrDefault(this APIAuthentication auth)
198 | {
199 | if (auth == null)
200 | auth = APIAuthentication.Default;
201 |
202 | return auth;
203 | }
204 | }
205 | }
206 |
--------------------------------------------------------------------------------
/OpenAI_API/Audio/ITranscriptionEndpoint.cs:
--------------------------------------------------------------------------------
1 | using System.IO;
2 | using System.Threading.Tasks;
3 |
4 | namespace OpenAI_API.Audio
5 | {
6 | ///
7 | /// Transcribe audio into text, with optional translation into English.
8 | ///
9 | public interface ITranscriptionEndpoint
10 | {
11 | ///
12 | /// This allows you to set default parameters for every request, for example to set a default language. For every request, if you do not have a parameter set on the request but do have it set here as a default, the request will automatically pick up the default value.
13 | ///
14 | AudioRequest DefaultRequestArgs { get; set; }
15 |
16 | ///
17 | /// Gets the transcription of the audio stream, in the specified format
18 | ///
19 | /// The stream containing audio data, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
20 | /// The name of the audio file in the stream. This does not have to be real, but it must contain the correct file extension. For example, "file.mp3" if you are supplying an mp3 audio stream.
21 | /// The format of the response. Suggested value are or . For text and Json formats, try or instead.
22 | /// The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
23 | /// An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.
24 | /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
25 | /// A string of the transcribed text
26 | Task GetAsFormatAsync(Stream audioStream, string filename,string responseFormat, string language = null, string prompt = null, double? temperature = null);
27 |
28 | ///
29 | /// Gets the transcription of the audio file, in the specified format
30 | ///
31 | /// The local path to the audio file, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
32 | /// The format of the response. Suggested value are or . For text and Json formats, try or instead.
33 | /// The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
34 | /// An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.
35 | /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
36 | /// A string of the transcribed text
37 | Task GetAsFormatAsync(string audioFilePath, string responseFormat, string language = null, string prompt = null, double? temperature = null);
38 |
39 | ///
40 | /// Gets the transcription of the audio stream, with full metadata
41 | ///
42 | /// The stream containing audio data, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
43 | /// The name of the audio file in the stream. This does not have to be real, but it must contain the correct file extension. For example, "file.mp3" if you are supplying an mp3 audio stream.
44 | /// The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
45 | /// An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.
46 | /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
47 | /// A string of the transcribed text
48 | Task GetWithDetailsAsync(Stream audioStream, string filename,string language = null, string prompt = null, double? temperature = null);
49 |
50 | ///
51 | /// Gets the transcription of the audio file, with full metadata
52 | ///
53 | /// The local path to the audio file, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
54 | /// The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
55 | /// An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.
56 | /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
57 | /// A string of the transcribed text
58 | Task GetWithDetailsAsync(string audioFilePath, string language = null, string prompt = null, double? temperature = null);
59 |
60 | ///
61 | /// Gets the transcription of the audio stream as a text string
62 | ///
63 | /// The stream containing audio data, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
64 | /// The name of the audio file in the stream. This does not have to be real, but it must contain the correct file extension. For example, "file.mp3" if you are supplying an mp3 audio stream.
65 | /// The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
66 | /// An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.
67 | /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
68 | /// A string of the transcribed text
69 | Task GetTextAsync(Stream audioStream, string filename, string language = null, string prompt = null, double? temperature = null);
70 |
71 | ///
72 | /// Gets the transcription of the audio file as a text string
73 | ///
74 | /// The local path to the audio file, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
75 | /// The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
76 | /// An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.
77 | /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
78 | /// A string of the transcribed text
79 | Task GetTextAsync(string audioFilePath, string language = null, string prompt = null, double? temperature = null);
80 | }
81 | }
--------------------------------------------------------------------------------
/OpenAI_API/Chat/ChatRequest.cs:
--------------------------------------------------------------------------------
1 | using Newtonsoft.Json;
2 | using Newtonsoft.Json.Linq;
3 | using OpenAI_API.Models;
4 | using System;
5 | using System.Collections.Generic;
6 | using System.ComponentModel;
7 | using System.IO;
8 | using System.Linq;
9 | using System.Text;
10 |
11 | namespace OpenAI_API.Chat
12 | {
13 | ///
14 | /// A request to the Chat API. This is similar, but not exactly the same as the
15 | /// Based on the OpenAI API docs
16 | ///
17 | public class ChatRequest
18 | {
19 | ///
20 | /// The model to use for this request
21 | ///
22 | [JsonProperty("model")]
23 | public string Model { get; set; } = OpenAI_API.Models.Model.DefaultChatModel;
24 |
25 | ///
26 | /// The messages to send with this Chat Request
27 | ///
28 | [JsonProperty("messages")]
29 | public IList Messages { get; set; }
30 |
31 | ///
32 | /// What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or but not both.
33 | ///
34 | [JsonProperty("temperature")]
35 | public double? Temperature { get; set; }
36 |
37 | ///
38 | /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or but not both.
39 | ///
40 | [JsonProperty("top_p")]
41 | public double? TopP { get; set; }
42 |
43 | ///
44 | /// How many different choices to request for each message. Defaults to 1.
45 | ///
46 | [JsonProperty("n")]
47 | public int? NumChoicesPerMessage { get; set; }
48 |
49 | ///
50 | /// Specifies where the results should stream and be returned at one time. Do not set this yourself, use the appropriate methods on instead.
51 | ///
52 | [JsonProperty("stream")]
53 | public bool Stream { get; internal set; } = false;
54 |
55 | ///
56 | /// This is only used for serializing the request into JSON, do not use it directly.
57 | ///
58 | [JsonProperty("stop")]
59 | internal object CompiledStop
60 | {
61 | get
62 | {
63 | if (MultipleStopSequences?.Length == 1)
64 | return StopSequence;
65 | else if (MultipleStopSequences?.Length > 0)
66 | return MultipleStopSequences;
67 | else
68 | return null;
69 | }
70 | }
71 |
72 | ///
73 | /// One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
74 | ///
75 | [JsonIgnore]
76 | public string[] MultipleStopSequences { get; set; }
77 |
78 | ///
79 | /// The stop sequence where the API will stop generating further tokens. The returned text will not contain the stop sequence. For convenience, if you are only requesting a single stop sequence, set it here
80 | ///
81 | [JsonIgnore]
82 | public string StopSequence
83 | {
84 | get => MultipleStopSequences?.FirstOrDefault() ?? null;
85 | set
86 | {
87 | if (value != null)
88 | MultipleStopSequences = new string[] { value };
89 | }
90 | }
91 |
92 | ///
93 | /// How many tokens to complete to. Can return fewer if a stop sequence is hit. Defaults to 16.
94 | ///
95 | [JsonProperty("max_tokens")]
96 | public int? MaxTokens { get; set; }
97 |
98 | ///
99 | /// The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse. Defaults to 0.
100 | ///
101 | [JsonProperty("frequency_penalty")]
102 | public double? FrequencyPenalty { get; set; }
103 |
104 |
105 | ///
106 | /// The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse. Defaults to 0.
107 | ///
108 | [JsonProperty("presence_penalty")]
109 | public double? PresencePenalty { get; set; }
110 |
111 | ///
112 | /// Modify the likelihood of specified tokens appearing in the completion.
113 | /// Accepts a json object that maps tokens(specified by their token ID in the tokenizer) to an associated bias value from -100 to 100.
114 | /// Mathematically, the bias is added to the logits generated by the model prior to sampling.
115 | /// The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
116 | ///
117 | [JsonProperty("logit_bias")]
118 | public IReadOnlyDictionary LogitBias { get; set; }
119 |
120 | ///
121 | /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
122 | ///
123 | [JsonProperty("user")]
124 | public string user { get; set; }
125 |
126 | ///
127 | /// An object specifying the format that the model must output. Setting to enables JSON mode, which guarantees the message the model generates is valid JSON, assuming that the is not "length".
128 | /// Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request.Also note that the message content may be partially cut off if `finish_reason= "length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.
129 | ///
130 | [JsonIgnore]
131 | public string ResponseFormat { get; set; } = "text";
132 |
133 | ///
134 | /// This is only used for serializing the request into JSON, do not use it directly.
135 | ///
136 | [JsonProperty("response_format", DefaultValueHandling=DefaultValueHandling.Ignore)]
137 | internal Dictionary ResponseFormatRaw
138 | {
139 | get
140 | {
141 | if (ResponseFormat == null || ResponseFormat == ResponseFormats.Text)
142 | return null;
143 | else
144 | return new Dictionary() { { "type", ResponseFormat } };
145 | }
146 | }
147 |
148 | ///
149 | /// If specified, OpenAI will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the response parameter to monitor changes in the backend.
150 | ///
151 | [JsonProperty("seed", DefaultValueHandling=DefaultValueHandling.Ignore)]
152 | public int? Seed { get; set; }
153 |
154 | ///
155 | /// Creates a new, empty
156 | ///
157 | public ChatRequest()
158 | { }
159 |
160 | ///
161 | /// Create a new chat request using the data from the input chat request.
162 | ///
163 | ///
164 | public ChatRequest(ChatRequest basedOn)
165 | {
166 | if (basedOn == null)
167 | return;
168 | this.Model = basedOn.Model;
169 | this.Messages = basedOn.Messages;
170 | this.Temperature = basedOn.Temperature;
171 | this.TopP = basedOn.TopP;
172 | this.NumChoicesPerMessage = basedOn.NumChoicesPerMessage;
173 | this.MultipleStopSequences = basedOn.MultipleStopSequences;
174 | this.MaxTokens = basedOn.MaxTokens;
175 | this.FrequencyPenalty = basedOn.FrequencyPenalty;
176 | this.PresencePenalty = basedOn.PresencePenalty;
177 | this.LogitBias = basedOn.LogitBias;
178 | }
179 |
180 | ///
181 | /// Options for the property
182 | ///
183 | public static class ResponseFormats
184 | {
185 | ///
186 | /// The default response format, which is may be any type of text
187 | ///
188 | public const string Text = "text";
189 | ///
190 | /// The response format is guaranteed to be valid JSON, assuming that the is not "length"
191 | ///
192 | public const string JsonObject = "json_object";
193 | }
194 | }
195 | }
196 |
--------------------------------------------------------------------------------
/OpenAI_API/Completions/CompletionRequest.cs:
--------------------------------------------------------------------------------
1 | using Newtonsoft.Json;
2 | using OpenAI_API.Models;
3 | using System.Linq;
4 |
5 | namespace OpenAI_API.Completions
6 | {
7 | ///
8 | /// Represents a request to the Completions API. Mostly matches the parameters in the OpenAI docs, although some have been renamed or expanded into single/multiple properties for ease of use.
9 | ///
10 | public class CompletionRequest
11 | {
12 | ///
13 | /// ID of the model to use. You can use to see all of your available models, or use a standard model like .
14 | ///
15 | [JsonProperty("model")]
16 | public string Model { get; set; } = OpenAI_API.Models.Model.DefaultModel;
17 |
18 | ///
19 | /// This is only used for serializing the request into JSON, do not use it directly.
20 | ///
21 | [JsonProperty("prompt")]
22 | public object CompiledPrompt
23 | {
24 | get
25 | {
26 | if (MultiplePrompts?.Length == 1)
27 | return Prompt;
28 | else
29 | return MultiplePrompts;
30 | }
31 | }
32 |
33 | ///
34 | /// If you are requesting more than one prompt, specify them as an array of strings.
35 | ///
36 | [JsonIgnore]
37 | public string[] MultiplePrompts { get; set; }
38 |
39 | ///
40 | /// For convenience, if you are only requesting a single prompt, set it here
41 | ///
42 | [JsonIgnore]
43 | public string Prompt
44 | {
45 | get => MultiplePrompts.FirstOrDefault();
46 | set
47 | {
48 | MultiplePrompts = new string[] { value };
49 | }
50 | }
51 |
52 | ///
53 | /// The suffix that comes after a completion of inserted text. Defaults to null.
54 | ///
55 | [JsonProperty("suffix")]
56 | public string Suffix { get; set; }
57 |
58 | ///
59 | /// How many tokens to complete to. Can return fewer if a stop sequence is hit. Defaults to 16.
60 | ///
61 | [JsonProperty("max_tokens")]
62 | public int? MaxTokens { get; set; }
63 |
64 | ///
65 | /// What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or but not both.
66 | ///
67 | [JsonProperty("temperature")]
68 | public double? Temperature { get; set; }
69 |
70 | ///
71 | /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or but not both.
72 | ///
73 | [JsonProperty("top_p")]
74 | public double? TopP { get; set; }
75 |
76 | ///
77 | /// The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse. Defaults to 0.
78 | ///
79 | [JsonProperty("presence_penalty")]
80 | public double? PresencePenalty { get; set; }
81 |
82 | ///
83 | /// The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse. Defaults to 0.
84 | ///
85 | [JsonProperty("frequency_penalty")]
86 | public double? FrequencyPenalty { get; set; }
87 |
88 | ///
89 | /// How many different choices to request for each prompt. Defaults to 1.
90 | ///
91 | [JsonProperty("n")]
92 | public int? NumChoicesPerPrompt { get; set; }
93 |
94 | ///
95 | /// Specifies where the results should stream and be returned at one time. Do not set this yourself, use the appropriate methods on instead.
96 | ///
97 | [JsonProperty("stream")]
98 | public bool Stream { get; internal set; } = false;
99 |
100 | ///
101 | /// Include the log probabilities on the logprobs most likely tokens, which can be found in -> . So for example, if logprobs is 5, the API will return a list of the 5 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response. The maximum value for logprobs is 5.
102 | ///
103 | [JsonProperty("logprobs")]
104 | public int? Logprobs { get; set; }
105 |
106 | ///
107 | /// Echo back the prompt in addition to the completion. Defaults to false.
108 | ///
109 | [JsonProperty("echo")]
110 | public bool? Echo { get; set; }
111 |
112 | ///
113 | /// This is only used for serializing the request into JSON, do not use it directly.
114 | ///
115 | [JsonProperty("stop")]
116 | public object CompiledStop
117 | {
118 | get
119 | {
120 | if (MultipleStopSequences?.Length == 1)
121 | return StopSequence;
122 | else if (MultipleStopSequences?.Length > 0)
123 | return MultipleStopSequences;
124 | else
125 | return null;
126 | }
127 | }
128 |
129 | ///
130 | /// One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
131 | ///
132 | [JsonIgnore]
133 | public string[] MultipleStopSequences { get; set; }
134 |
135 |
136 | ///
137 | /// The stop sequence where the API will stop generating further tokens. The returned text will not contain the stop sequence. For convenience, if you are only requesting a single stop sequence, set it here
138 | ///
139 | [JsonIgnore]
140 | public string StopSequence
141 | {
142 | get => MultipleStopSequences?.FirstOrDefault() ?? null;
143 | set
144 | {
145 | if (value != null)
146 | MultipleStopSequences = new string[] { value };
147 | }
148 | }
149 |
150 | ///
151 | /// Generates best_of completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed.
152 | /// When used with n, best_of controls the number of candidate completions and n specifies how many to return – best_of must be greater than n.
153 | /// Note: Because this parameter generates many completions, it can quickly consume your token quota.Use carefully and ensure that you have reasonable settings for max_tokens and stop.
154 | ///
155 | [JsonProperty("best_of")]
156 | public int? BestOf { get; set; }
157 |
158 | ///
159 | /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
160 | ///
161 | [JsonProperty("user")]
162 | public string user { get; set; }
163 |
164 | ///
165 | /// Creates a new, empty
166 | ///
167 | public CompletionRequest()
168 | {
169 | this.Model = OpenAI_API.Models.Model.DefaultModel;
170 | }
171 |
172 | ///
173 | /// Creates a new , inheriting any parameters set in .
174 | ///
175 | /// The to copy
176 | public CompletionRequest(CompletionRequest basedOn)
177 | {
178 | this.Model = basedOn.Model;
179 | this.MultiplePrompts = basedOn.MultiplePrompts;
180 | this.MaxTokens = basedOn.MaxTokens;
181 | this.Temperature = basedOn.Temperature;
182 | this.TopP = basedOn.TopP;
183 | this.NumChoicesPerPrompt = basedOn.NumChoicesPerPrompt;
184 | this.PresencePenalty = basedOn.PresencePenalty;
185 | this.FrequencyPenalty = basedOn.FrequencyPenalty;
186 | this.Logprobs = basedOn.Logprobs;
187 | this.Echo = basedOn.Echo;
188 | this.MultipleStopSequences = basedOn.MultipleStopSequences;
189 | this.BestOf = basedOn.BestOf;
190 | this.user = basedOn.user;
191 | this.Suffix = basedOn.Suffix;
192 | }
193 |
194 | ///
195 | /// Creates a new , using the specified prompts
196 | ///
197 | /// One or more prompts to generate from
198 | public CompletionRequest(params string[] prompts)
199 | {
200 | this.MultiplePrompts = prompts;
201 | }
202 |
203 | ///
204 | /// Creates a new with the specified parameters
205 | ///
206 | /// The prompt to generate from
207 | /// The model to use. You can use to see all of your available models, or use a standard model like .
208 | /// How many tokens to complete to. Can return fewer if a stop sequence is hit.
209 | /// What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or but not both.
210 | /// The suffix that comes after a completion of inserted text
211 | /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or but not both.
212 | /// How many different choices to request for each prompt.
213 | /// The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.
214 | /// The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.
215 | /// Include the log probabilities on the logprobs most likely tokens, which can be found in -> . So for example, if logprobs is 10, the API will return a list of the 10 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.
216 | /// Echo back the prompt in addition to the completion.
217 | /// One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
218 | public CompletionRequest(
219 | string prompt,
220 | Model model = null,
221 | int? max_tokens = null,
222 | double? temperature = null,
223 | string suffix = null,
224 | double? top_p = null,
225 | int? numOutputs = null,
226 | double? presencePenalty = null,
227 | double? frequencyPenalty = null,
228 | int? logProbs = null,
229 | bool? echo = null,
230 | params string[] stopSequences)
231 | {
232 | this.Model = model;
233 | this.Prompt = prompt;
234 | this.MaxTokens = max_tokens;
235 | this.Temperature = temperature;
236 | this.Suffix = suffix;
237 | this.TopP = top_p;
238 | this.NumChoicesPerPrompt = numOutputs;
239 | this.PresencePenalty = presencePenalty;
240 | this.FrequencyPenalty = frequencyPenalty;
241 | this.Logprobs = logProbs;
242 | this.Echo = echo;
243 | this.MultipleStopSequences = stopSequences;
244 | }
245 |
246 |
247 | }
248 |
249 | }
250 |
--------------------------------------------------------------------------------
/OpenAI_API/Chat/ChatMessage.cs:
--------------------------------------------------------------------------------
1 | using Newtonsoft.Json;
2 | using Newtonsoft.Json.Linq;
3 | using System;
4 | using System.Collections.Generic;
5 | using System.IO;
6 | using System.Text;
7 | using static System.Net.WebRequestMethods;
8 |
9 | namespace OpenAI_API.Chat
10 | {
11 | ///
12 | /// Chat message sent or received from the API. Includes who is speaking in the "role" and the message text in the "content"
13 | ///
14 | public class ChatMessage
15 | {
16 | ///
17 | /// Creates an empty , with defaulting to
18 | ///
19 | public ChatMessage()
20 | {
21 | this.Role = ChatMessageRole.User;
22 | }
23 |
24 | ///
25 | /// Constructor for a new Chat Message
26 | ///
27 | /// The role of the message, which can be "system", "assistant" or "user"
28 | /// The text to send in the message
29 | public ChatMessage(ChatMessageRole role, string text)
30 | {
31 | this.Role = role;
32 | this.TextContent = text;
33 | }
34 |
35 | ///
36 | /// Constructor for a new Chat Message with text and one or more images
37 | ///
38 | /// The role of the message, which can be "system", "assistant" or "user"
39 | /// The text to send in the message. May be null if only sending image(s).
40 | /// Optionally add one or more images to the message if using a GPT Vision model. Consider using to load an image from a local file, or to point to an image via URL. Please see for more information and limitations.
41 | public ChatMessage(ChatMessageRole role, string text, params ImageInput[] imageInputs)
42 | {
43 | this.Role = role;
44 | this.TextContent = text;
45 | this.Images.AddRange(imageInputs);
46 | }
47 |
48 | [JsonProperty("role")]
49 | internal string rawRole { get; set; }
50 |
51 | ///
52 | /// The role of the message, which can be "system", "assistant" or "user"
53 | ///
54 | [JsonIgnore]
55 | public ChatMessageRole Role
56 | {
57 | get
58 | {
59 | return ChatMessageRole.FromString(rawRole);
60 | }
61 | set
62 | {
63 | rawRole = value.ToString();
64 | }
65 | }
66 |
67 | ///
68 | /// The text content of the message.
69 | ///
70 | [JsonIgnore]
71 | public string TextContent { get; set; }
72 |
73 | ///
74 | /// To support multi-modal messages, this property has been renamed to . Please use that instead."/>
75 | ///
76 | [Obsolete("This property has been renamed to TextContent.")]
77 | [JsonIgnore]
78 | public string Content { get => TextContent; set => TextContent = value; }
79 |
80 | ///
81 | /// This is only used for serializing the request into JSON, do not use it directly.
82 | ///
83 | [JsonProperty("content")]
84 | [JsonConverter(typeof(ContentDataConverter))]
85 | internal IList ContentItems
86 | {
87 | get
88 | {
89 | List items = new List();
90 | if (!string.IsNullOrEmpty(TextContent))
91 | {
92 | items.Add(new ContentItem(TextContent));
93 | }
94 | if (Images != null && Images.Count > 0)
95 | {
96 | foreach (var image in Images)
97 | {
98 | items.Add(new ContentItem(image));
99 | }
100 | }
101 |
102 | return items;
103 | }
104 | set
105 | {
106 | foreach (var item in value)
107 | {
108 | if (item.Type == "text")
109 | {
110 | TextContent = item.Text;
111 | }
112 | else if (item.Type == "image_url")
113 | {
114 | Images.Add(item.Image);
115 | }
116 | }
117 | }
118 | }
119 |
120 | ///
121 | /// An optional name of the user in a multi-user chat
122 | ///
123 | [JsonProperty("name")]
124 | public string Name { get; set; }
125 |
126 | ///
127 | /// Optionally add one or more images to the message if using a GPT Vision model. Please see for more information and limitations.
128 | ///
129 | [JsonIgnore]
130 | public List Images { get; set; } = new List();
131 |
132 | ///
133 | /// This is a helper class to serialize the content of the message to JSON
134 | ///
135 | internal class ContentItem
136 | {
137 | private string text;
138 | private ImageInput image;
139 |
140 | ///
141 | /// The type of content to send to the API. This can be "text" or "image_url".
142 | ///
143 | [JsonProperty("type")]
144 | public string Type { get; set; } = "text";
145 |
146 | ///
147 | /// Sends text to the API. This is the default type.
148 | ///
149 | [JsonProperty("text")]
150 | public string Text
151 | {
152 | get
153 | {
154 | if (Type == "text")
155 | return text;
156 | else
157 | return null;
158 | }
159 |
160 | set
161 | {
162 | text = value;
163 | image = null;
164 | Type = "text";
165 | }
166 | }
167 |
168 | ///
169 | /// Send an image to GPT Vision. Please see for more information and limitations."/>
170 | ///
171 | [JsonProperty("image_url")]
172 | public ImageInput Image
173 | {
174 | get
175 | {
176 | if (Type == "image_url")
177 | return image;
178 | else
179 | return null;
180 | }
181 |
182 | set
183 | {
184 | image = value;
185 | text = null;
186 | Type = "image_url";
187 | }
188 | }
189 |
190 | ///
191 | /// Creates an empty
192 | ///
193 | public ContentItem()
194 | {
195 |
196 | }
197 |
198 | ///
199 | /// Creates a new with the given text
200 | ///
201 | /// The text to send to the API
202 | public ContentItem(string text)
203 | {
204 | this.Text = text;
205 | this.Type = "text";
206 | }
207 |
208 | ///
209 | /// Creates a new with the given image
210 | ///
211 | /// The image to send to the API. Consider using to load an image from a local file, or to point to an image via URL.
212 | public ContentItem(ImageInput image)
213 | {
214 | this.Image = image;
215 | this.Type = "image_url";
216 | }
217 | }
218 |
219 | ///
220 | /// Represents an image to send to the API in a chat message as part of GPT Vision.
221 | ///
222 | public class ImageInput
223 | {
224 | ///
225 | /// Either a URL of the image or the base64 encoded image data
226 | ///
227 | [JsonProperty("url")]
228 | public string Url { get; set; }
229 |
230 | ///
231 | /// By controlling the detail parameter, which has three options, low, high, or auto, you have control over how the model processes the image and generates its textual understanding.
232 | ///
233 | [JsonProperty("detail")]
234 | public string Detail { get; set; } = "auto";
235 |
236 | ///
237 | /// Instantiates a new ImageInput object with the given url
238 | ///
239 | /// A link to the image
240 | /// By controlling the detail parameter, which has three options, low, high, or auto, you have control over how the model processes the image and generates its textual understanding
241 | public ImageInput(string url, string detail = "auto")
242 | {
243 | this.Url = url;
244 | this.Detail = detail;
245 | }
246 |
247 | ///
248 | /// Instantiates a new ImageInput object with the given image data bytes
249 | ///
250 | /// The image as bytes to be base64 encoded. OpenAI currently supports PNG (.png), JPEG (.jpeg and .jpg), WEBP (.webp), and non-animated GIF (.gif)
251 | /// By controlling the detail parameter, which has three options, low, high, or auto, you have control over how the model processes the image and generates its textual understanding
252 | public ImageInput(byte[] imageData, string detail = "auto")
253 | {
254 | this.Url = "data:image/jpeg;base64," + Convert.ToBase64String(imageData);
255 | this.Detail = detail;
256 | }
257 |
258 | ///
259 | /// Instantiates a new ImageInput object with the given image loaded from disk
260 | ///
261 | /// The local file path of the image. OpenAI currently supports PNG (.png), JPEG (.jpeg and .jpg), WEBP (.webp), and non-animated GIF (.gif)
262 | /// By controlling the detail parameter, which has three options, low, high, or auto, you have control over how the model processes the image and generates its textual understanding
263 | ///
264 | public static ImageInput FromFile(string filePath, string detail = "auto")
265 | {
266 | return new ImageInput(System.IO.File.ReadAllBytes(filePath), detail);
267 | }
268 |
269 | ///
270 | /// Instantiates a new ImageInput object with the given image data bytes
271 | ///
272 | /// The image as bytes to be base64 encoded
273 | /// By controlling the detail parameter, which has three options, low, high, or auto, you have control over how the model processes the image and generates its textual understanding
274 | ///
275 | public static ImageInput FromImageBytes(byte[] imageData, string detail = "auto")
276 | {
277 | return new ImageInput(imageData, detail);
278 | }
279 |
280 | ///
281 | /// Instantiates a new ImageInput object with the given url
282 | ///
283 | /// A link to the image
284 | /// By controlling the detail parameter, which has three options, low, high, or auto, you have control over how the model processes the image and generates its textual understanding
285 | ///
286 | public static ImageInput FromImageUrl(string url, string detail = "auto")
287 | {
288 | return new ImageInput(url, detail);
289 | }
290 |
291 | ///
292 | /// By default, the model will use the auto setting which will look at the image input size and decide if it should use the low or high setting.
293 | ///
294 | public const string DetailAuto = "auto";
295 | ///
296 | /// low will disable the “high res” model. The model will receive a low-res 512px x 512px version of the image, and represent the image with a budget of 65 tokens. This allows the API to return faster responses and consume fewer input tokens for use cases that do not require high detail.
297 | ///
298 | public const string DetailLow = "low";
299 | ///
300 | /// high will enable “high res” mode, which first allows the model to see the low res image and then creates detailed crops of input images as 512px squares based on the input image size. Each of the detailed crops uses twice the token budget (65 tokens) for a total of 129 tokens.
301 | ///
302 | public const string DetailHigh = "high";
303 | }
304 |
305 | internal class ContentDataConverter : JsonConverter
306 | {
307 | public override bool CanConvert(Type objectType)
308 | {
309 | return true;
310 | }
311 |
312 | public override object ReadJson(JsonReader reader, Type objectType, object existingValue, JsonSerializer serializer)
313 | {
314 | JToken token = JToken.Load(reader);
315 | if (token.Type == JTokenType.Object)
316 | {
317 | return token.ToObject>();
318 | }
319 | else if (token.Type == JTokenType.String)
320 | {
321 | List content = new List();
322 | content.Add(new ContentItem(token.ToObject()));
323 | return content;
324 | }
325 | else
326 | {
327 | return null;
328 | }
329 | }
330 |
331 | public override void WriteJson(JsonWriter writer, object value, JsonSerializer serializer)
332 | {
333 | serializer.Serialize(writer, value);
334 | }
335 | }
336 |
337 | }
338 | }
339 |
--------------------------------------------------------------------------------
/OpenAI_API/Chat/IChatEndpoint.cs:
--------------------------------------------------------------------------------
1 | using OpenAI_API.Models;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Threading.Tasks;
5 |
6 | namespace OpenAI_API.Chat
7 | {
8 | ///
9 | /// An interface for , the ChatGPT API endpoint. Use this endpoint to send multiple messages and carry on a conversation.
10 | ///
11 | public interface IChatEndpoint
12 | {
13 | ///
14 | /// This allows you to set default parameters for every request, for example to set a default temperature or max tokens. For every request, if you do not have a parameter set on the request but do have it set here as a default, the request will automatically pick up the default value.
15 | ///
16 | ChatRequest DefaultChatRequestArgs { get; set; }
17 |
18 | ///
19 | /// Creates an ongoing chat which can easily encapsulate the conversation. This is the simplest way to use the Chat endpoint.
20 | ///
21 | /// Allows setting the parameters to use when calling the ChatGPT API. Can be useful for setting temperature, presence_penalty, and more. See OpenAI documentation for a list of possible parameters to tweak.
22 | /// A which encapsulates a back and forth chat between a user and an assistant.
23 | Conversation CreateConversation(ChatRequest defaultChatRequestArgs = null);
24 |
25 |
26 | ///
27 | /// Ask the API to complete the request using the specified parameters. This is non-streaming, so it will wait until the API returns the full result. Any non-specified parameters will fall back to default values specified in if present.
28 | ///
29 | /// The request to send to the API.
30 | /// Asynchronously returns the completion result. Look in its property for the results.
31 | Task CreateChatCompletionAsync(ChatRequest request);
32 |
33 | ///
34 | /// Ask the API to complete the request using the specified parameters. This is non-streaming, so it will wait until the API returns the full result. Any non-specified parameters will fall back to default values specified in if present.
35 | ///
36 | /// The request to send to the API.
37 | /// Overrides as a convenience.
38 | /// Asynchronously returns the completion result. Look in its property for the results.
39 | Task CreateChatCompletionAsync(ChatRequest request, int numOutputs = 5);
40 |
41 | ///
42 | /// Ask the API to complete the request using the specified parameters. This is non-streaming, so it will wait until the API returns the full result. Any non-specified parameters will fall back to default values specified in if present.
43 | ///
44 | /// The array of messages to send to the API
45 | /// The model to use. See the ChatGPT models available from
46 | /// What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or but not both.
47 | /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or but not both.
48 | /// How many different choices to request for each prompt.
49 | /// How many tokens to complete to. Can return fewer if a stop sequence is hit.
50 | /// The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.
51 | /// The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.
52 | /// Maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
53 | /// One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
54 | /// Asynchronously returns the completion result. Look in its property for the results.
55 | Task CreateChatCompletionAsync(IList messages, Model model = null, double? temperature = null, double? top_p = null, int? numOutputs = null, int? max_tokens = null, double? frequencyPenalty = null, double? presencePenalty = null, IReadOnlyDictionary logitBias = null, params string[] stopSequences);
56 |
57 | ///
58 | /// Ask the API to complete the request using the specified message(s). Any parameters will fall back to default values specified in if present.
59 | ///
60 | /// The messages to use in the generation.
61 | /// The with the API response.
62 | Task CreateChatCompletionAsync(params ChatMessage[] messages);
63 |
64 | ///
65 | /// Ask the API to complete the request using the specified message(s). Any parameters will fall back to default values specified in if present.
66 | ///
67 | /// The user message or messages to use in the generation. All strings are assumed to be of Role
68 | /// The with the API response.
69 | Task CreateChatCompletionAsync(params string[] userMessages);
70 |
71 | ///
72 | /// Ask the API to complete the request using the specified message and image(s). Any parameters will fall back to default values specified in if present, except for , which will default to .
73 | ///
74 | /// The user message text to use in the generation.
75 | /// The images to use in the generation.
76 | /// The with the API response.
77 | Task CreateChatCompletionAsync(string userMessage, params ChatMessage.ImageInput[] images);
78 |
79 |
80 | ///
81 | /// Ask the API to complete the message(s) using the specified request, and stream the results to the as they come in.
82 | /// If you are on the latest C# supporting async enumerables, you may prefer the cleaner syntax of instead.
83 | ///
84 | /// The request to send to the API. This does not fall back to default values specified in .
85 | /// An action to be called as each new result arrives, which includes the index of the result in the overall result set.
86 | Task StreamChatAsync(ChatRequest request, Action resultHandler);
87 |
88 | ///
89 | /// Ask the API to complete the message(s) using the specified request, and stream the results as they come in.
90 | /// If you are not using C# 8 supporting async enumerables or if you are using the .NET Framework, you may need to use instead.
91 | ///
92 | /// The request to send to the API. This does not fall back to default values specified in .
93 | /// An async enumerable with each of the results as they come in. See for more details on how to consume an async enumerable.
94 | IAsyncEnumerable StreamChatEnumerableAsync(ChatRequest request);
95 |
96 | ///
97 | /// Ask the API to complete the message(s) using the specified request, and stream the results as they come in.
98 | /// If you are not using C# 8 supporting async enumerables or if you are using the .NET Framework, you may need to use instead.
99 | ///
100 | /// The array of messages to send to the API
101 | /// The model to use. See the ChatGPT models available from
102 | /// What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or but not both.
103 | /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or but not both.
104 | /// How many different choices to request for each prompt.
105 | /// How many tokens to complete to. Can return fewer if a stop sequence is hit.
106 | /// The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.
107 | /// The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.
108 | /// Maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
109 | /// One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
110 | /// An async enumerable with each of the results as they come in. See the C# docs for more details on how to consume an async enumerable.
111 | IAsyncEnumerable StreamChatEnumerableAsync(IList messages, Model model = null, double? temperature = null, double? top_p = null, int? numOutputs = null, int? max_tokens = null, double? frequencyPenalty = null, double? presencePenalty = null, IReadOnlyDictionary logitBias = null, params string[] stopSequences);
112 |
113 | ///
114 | /// Ask the API to complete the message(s) using the specified request, and stream the results to the as they come in.
115 | /// If you are on the latest C# supporting async enumerables, you may prefer the cleaner syntax of instead.
116 | ///
117 | /// The request to send to the API. This does not fall back to default values specified in .
118 | /// An action to be called as each new result arrives, which includes the index of the result in the overall result set.
119 | Task StreamCompletionAsync(ChatRequest request, Action resultHandler);
120 | }
121 | }
--------------------------------------------------------------------------------