├── .github ├── FUNDING.yml └── workflows │ └── dart.yml ├── .gitignore ├── .vscode ├── launch.json └── settings.json ├── CHANGELOG.md ├── LICENSE ├── README.md ├── analysis_options.yaml ├── example ├── .env.example ├── .gitignore ├── CHANGELOG.md ├── README.md ├── analysis_options.yaml ├── lib │ ├── chat_completion_example.dart │ ├── chat_completion_stream_example.dart │ ├── chat_completion_with_log_probs_example.dart │ ├── chat_example_with_tools.dart │ ├── chat_stream_example.dart │ ├── chat_stream_example_with_function_call.dart │ ├── completion_example.dart │ ├── completion_stream.dart │ ├── create_audio_speech.dart │ ├── create_audio_transcription.dart │ ├── create_audio_translation.dart │ ├── edit_example.dart │ ├── embeddings_example.dart │ ├── env │ │ └── env.dart │ ├── example_for_testing_stream_error.dart │ ├── fine_tune._stream.dart │ ├── image_edit_example.dart │ ├── image_variation_example.dart │ ├── islamic_chat.dart │ ├── list_all_files.dart │ ├── log_response_bodies.dart │ ├── main.dart │ ├── moderation_example.dart │ ├── one_chat_message_example.dart │ ├── one_word_completion.dart │ ├── request_with_custom_client.dart │ ├── sentence_completion.dart │ ├── setting_for_web_env.dart │ ├── simple_tools_demo.dart │ ├── stream_request_with_custom_client.dart │ └── testing_file_ignore.dart ├── pubspec.lock └── pubspec.yaml ├── lib ├── dart_openai.dart └── src │ ├── core │ ├── base │ │ ├── audio │ │ │ ├── audio.dart │ │ │ └── interfaces.dart │ │ ├── chat │ │ │ ├── chat.dart │ │ │ └── interfaces │ │ │ │ └── create.dart │ │ ├── completion.dart │ │ ├── edits │ │ │ ├── edits.dart │ │ │ └── interfaces │ │ │ │ └── create.dart │ │ ├── embeddings │ │ │ ├── base.dart │ │ │ └── interfaces │ │ │ │ └── create.dart │ │ ├── entity │ │ │ ├── base.dart │ │ │ └── interfaces │ │ │ │ └── enpoint.dart │ │ ├── files │ │ │ ├── base.dart │ │ │ └── interfaces │ │ │ │ ├── delete.dart │ │ │ │ ├── list.dart │ │ │ │ ├── retrieve_content.dart │ │ │ │ ├── retrive.dart │ │ │ │ └── upload.dart │ │ ├── fine_tunes │ │ │ ├── base.dart │ │ │ └── interfaces │ │ │ │ ├── cancel.dart │ │ │ │ ├── create.dart │ │ │ │ ├── delete.dart │ │ │ │ ├── events.dart │ │ │ │ ├── list.dart │ │ │ │ ├── retrieve.dart │ │ │ │ └── stream_events.dart │ │ ├── images │ │ │ ├── base.dart │ │ │ └── interfaces │ │ │ │ ├── create.dart │ │ │ │ ├── edit.dart │ │ │ │ └── variations.dart │ │ ├── interfaces │ │ │ └── create.dart │ │ ├── model │ │ │ ├── base.dart │ │ │ └── interfaces │ │ │ │ ├── delete.dart │ │ │ │ ├── list.dart │ │ │ │ └── one.dart │ │ ├── moderations │ │ │ ├── base.dart │ │ │ └── interfaces │ │ │ │ └── create.dart │ │ └── openai_client │ │ │ └── base.dart │ ├── builder │ │ ├── base_api_url.dart │ │ └── headers.dart │ ├── constants │ │ ├── config.dart │ │ ├── endpoints.dart │ │ └── strings.dart │ ├── enum.dart │ ├── exceptions │ │ ├── api_key_not_set.dart │ │ ├── export.dart │ │ ├── request_failure.dart │ │ └── unexpected.dart │ ├── models │ │ ├── audio │ │ │ └── audio.dart │ │ ├── chat │ │ │ ├── chat.dart │ │ │ ├── etc │ │ │ │ └── message_adapter.dart │ │ │ ├── stream │ │ │ │ ├── chat.dart │ │ │ │ └── sub_models │ │ │ │ │ ├── choices │ │ │ │ │ ├── choices.dart │ │ │ │ │ └── sub_models │ │ │ │ │ │ └── delta.dart │ │ │ │ │ └── usage.dart │ │ │ └── sub_models │ │ │ │ ├── choices │ │ │ │ ├── choices.dart │ │ │ │ └── sub_models │ │ │ │ │ ├── log_probs │ │ │ │ │ ├── log_probs.dart │ │ │ │ │ └── sub_models │ │ │ │ │ │ ├── content.dart │ │ │ │ │ │ └── top_prob.dart │ │ │ │ │ ├── message.dart │ │ │ │ │ └── sub_models │ │ │ │ │ ├── content.dart │ │ │ │ │ ├── sub_models │ │ │ │ │ └── response_function_call.dart │ │ │ │ │ └── tool_call.dart │ │ │ │ └── usage.dart │ │ ├── completion │ │ │ ├── completion.dart │ │ │ ├── stream │ │ │ │ ├── completion.dart │ │ │ │ └── sub_models │ │ │ │ │ └── choices.dart │ │ │ └── sub_models │ │ │ │ ├── choice.dart │ │ │ │ └── usage.dart │ │ ├── edit │ │ │ ├── edit.dart │ │ │ └── sub_models │ │ │ │ ├── choice.dart │ │ │ │ └── usage.dart │ │ ├── embedding │ │ │ ├── embedding.dart │ │ │ └── sub-models │ │ │ │ ├── data.dart │ │ │ │ └── usage.dart │ │ ├── export.dart │ │ ├── file │ │ │ └── file.dart │ │ ├── fine_tune │ │ │ ├── fine_tune.dart │ │ │ ├── stream │ │ │ │ └── fine_tun_event.dart │ │ │ └── sub_models │ │ │ │ ├── event.dart │ │ │ │ ├── hyper_params.dart │ │ │ │ └── training_files.dart │ │ ├── image │ │ │ └── image │ │ │ │ ├── image.dart │ │ │ │ └── sub_models │ │ │ │ └── data.dart │ │ ├── model │ │ │ ├── model.dart │ │ │ └── sub_models │ │ │ │ └── permission.dart │ │ ├── moderation │ │ │ ├── moderation.dart │ │ │ └── sub_models │ │ │ │ ├── catgeories.dart │ │ │ │ ├── catgeories_scores.dart │ │ │ │ └── result.dart │ │ └── tool │ │ │ ├── function │ │ │ ├── function.dart │ │ │ ├── function_call.dart │ │ │ ├── function_call_response.dart │ │ │ ├── property.dart │ │ │ └── stream_function_call_response.dart │ │ │ └── tool.dart │ ├── networking │ │ └── client.dart │ └── utils │ │ ├── extensions.dart │ │ ├── logger.dart │ │ ├── streaming_http_client_default.dart │ │ ├── streaming_http_client_io.dart │ │ └── streaming_http_client_web.dart │ └── instance │ ├── audio │ └── audio.dart │ ├── chat │ └── chat.dart │ ├── completion │ └── completion.dart │ ├── edits │ └── edits.dart │ ├── embedding │ └── embedding.dart │ ├── files │ └── files.dart │ ├── fine_tunes │ └── fine_tunes.dart │ ├── images │ └── images.dart │ ├── model │ └── model.dart │ ├── moderations │ └── moderations.dart │ └── openai.dart ├── pubspec.lock ├── pubspec.yaml └── test └── openai_test.dart /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [anasfik] 4 | patreon: gwhyyy 5 | custom: ['paypal.me/guessWhyyy'] 6 | -------------------------------------------------------------------------------- /.github/workflows/dart.yml: -------------------------------------------------------------------------------- 1 | name: Dart 2 | 3 | on: 4 | push: 5 | branches: ["main"] 6 | pull_request: 7 | branches: ["main"] 8 | 9 | jobs: 10 | build: 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - uses: actions/checkout@v3 15 | 16 | # Note: This workflow uses the latest stable version of the Dart SDK. 17 | # You can specify other versions if desired, see documentation here: 18 | # https://github.com/dart-lang/setup-dart/blob/main/README.md 19 | # - uses: dart-lang/setup-dart@v1 20 | - uses: dart-lang/setup-dart@9a04e6d73cca37bd455e0608d7e5092f881fd603 21 | 22 | - name: Install dependencies 23 | env: 24 | OPEN_AI_API_KEY: ${{ secrets.OPEN_AI_API_KEY }} 25 | run: | 26 | dart pub get 27 | sed -i -E "s/OpenAI.apiKey = \".*\";/OpenAI.apiKey = \"$OPEN_AI_API_KEY\";/g" test/openai_test.dart 28 | cd example/ 29 | dart pub get 30 | dart run build_runner build 31 | cd .. 32 | 33 | # Uncomment this step to verify the use of 'dart format' on each commit. 34 | # - name: Verify formatting 35 | # run: dart format --output=none --set-exit-if-changed . 36 | 37 | 38 | 39 | # Your project will need to have tests in test/ and a dependency on 40 | # package:test for this step to succeed. Note that Flutter projects will 41 | # want to change this to 'flutter test'. 42 | - name: Run tests 43 | run: dart test 44 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Files and directories created by pub. 2 | .dart_tool/ 3 | .packages 4 | 5 | build/ 6 | example/.env 7 | 8 | *.jsonl 9 | env.g.dart 10 | .env 11 | *.mp3 12 | *.png 13 | .idea/ 14 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "name": "openai", 9 | "request": "launch", 10 | "type": "dart" 11 | } 12 | ] 13 | } -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "cSpell.words": ["Epoches", "openai", "Probs"], 3 | "editor.acceptSuggestionOnEnter": "off" 4 | } 5 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Gwhyyy 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /analysis_options.yaml: -------------------------------------------------------------------------------- 1 | 2 | linter: 3 | rules: 4 | 5 | analyzer: 6 | plugins: 7 | - dart_code_metrics 8 | 9 | dart_code_metrics: 10 | extends: 11 | - ... # configures the list of preset configurations 12 | metrics: 13 | cyclomatic-complexity: 20 14 | lines-of-code: 100 15 | maximum-nesting-level: 4 16 | number-of-parameters: 4 17 | 18 | rules: 19 | - avoid-dynamic 20 | - avoid-redundant-async 21 | - avoid-passing-async-when-sync-expected 22 | - avoid-redundant-async 23 | - avoid-unnecessary-type-assertions 24 | - avoid-unnecessary-type-casts 25 | - avoid-unrelated-type-assertions 26 | - avoid-unused-parameters 27 | - avoid-nested-conditional-expressions 28 | - newline-before-return 29 | - no-boolean-literal-compare 30 | - no-empty-block 31 | - prefer-trailing-comma 32 | - prefer-conditional-expressions 33 | - no-equal-then-else 34 | - prefer-moving-to-variable 35 | - avoid-duplicate-exports 36 | - avoid-dynamic 37 | - avoid-late-keyword 38 | - avoid-nested-conditional-expressions 39 | - avoid-unnecessary-type-assertions 40 | - avoid-unnecessary-type-casts 41 | - member-ordering 42 | - no-magic-number 43 | - prefer-trailing-comma 44 | - always-remove-listener 45 | - avoid-border-all 46 | - avoid-expanded-as-spacer 47 | - avoid-wrapping-in-padding 48 | - prefer-const-border-radius 49 | - prefer-correct-edge-insets-constructor 50 | - prefer-single-widget-per-file 51 | 52 | # - arguments-ordering: 53 | # child-last: true 54 | # - prefer-match-file-name rules-exclude: 55 | # - ... # configures the list of files that should be ignored by rules 56 | # anti-patterns: 57 | # - ... # configures the list of anti-patterns 58 | 59 | exclude: 60 | - "example/**" 61 | - "build/**" 62 | - "**/*.g.dart" 63 | - "**/*.freezed.dart" -------------------------------------------------------------------------------- /example/.env.example: -------------------------------------------------------------------------------- 1 | OPEN_AI_API_KEY= -------------------------------------------------------------------------------- /example/.gitignore: -------------------------------------------------------------------------------- 1 | # Files and directories created by pub. 2 | .dart_tool/ 3 | .packages 4 | 5 | .env 6 | env.g.dart 7 | 8 | # Conventional directory for build output. 9 | build/ 10 | *.mp3 11 | 12 | *.png 13 | 14 | speechOutput/ -------------------------------------------------------------------------------- /example/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## 1.0.0 2 | 3 | - Initial version. 4 | -------------------------------------------------------------------------------- /example/README.md: -------------------------------------------------------------------------------- 1 | ## Setup 2 | 3 | 1. Set your API Key inside the `.env.example` file : 4 | 5 | ``` 6 | OPEN_AI_API_KEY= 7 | ``` 8 | 9 | 2. if you're willing to change the environment variable name, then you should change it as well inside the `lib/env/env.dart`, otherwise if you're not changing it, pass directly to the next step. 10 | 3. Change `.env.example` file name to `.env`. 11 | 4. Run `dart pub get`. 12 | 5. Run `dart run build_runner build`. 13 | 6. Run the `lib/main.dart` from your IDE, alternatively, Run `dart run lib/main.dart` from your command line. 14 | 7. That's it, now run your main.dart and other dart files. 15 | -------------------------------------------------------------------------------- /example/analysis_options.yaml: -------------------------------------------------------------------------------- 1 | # This file configures the static analysis results for your project (errors, 2 | # warnings, and lints). 3 | # 4 | # This enables the 'recommended' set of lints from `package:lints`. 5 | # This set helps identify many issues that may lead to problems when running 6 | # or consuming Dart code, and enforces writing Dart using a single, idiomatic 7 | # style and format. 8 | # 9 | # If you want a smaller set of lints you can change this to specify 10 | # 'package:lints/core.yaml'. These are just the most critical lints 11 | # (the recommended set includes the core lints). 12 | # The core lints are also what is used by pub.dev for scoring packages. 13 | 14 | include: package:lints/recommended.yaml 15 | 16 | # Uncomment the following section to specify additional rules. 17 | 18 | # linter: 19 | # rules: 20 | # - camel_case_types 21 | 22 | # analyzer: 23 | # exclude: 24 | # - path/to/excluded/files/** 25 | 26 | # For more information about the core and recommended set of lints, see 27 | # https://dart.dev/go/core-lints 28 | 29 | # For additional information about configuring this file, see 30 | # https://dart.dev/guides/language/analysis-options 31 | -------------------------------------------------------------------------------- /example/lib/chat_completion_example.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/dart_openai.dart'; 2 | 3 | import 'env/env.dart'; 4 | 5 | void main() async { 6 | // Set the OpenAI API key from the .env file. 7 | OpenAI.apiKey = Env.apiKey; 8 | 9 | final systemMessage = OpenAIChatCompletionChoiceMessageModel( 10 | content: [ 11 | OpenAIChatCompletionChoiceMessageContentItemModel.text( 12 | "return any message you are given as JSON.", 13 | ), 14 | ], 15 | role: OpenAIChatMessageRole.assistant, 16 | ); 17 | 18 | final userMessage = OpenAIChatCompletionChoiceMessageModel( 19 | content: [ 20 | OpenAIChatCompletionChoiceMessageContentItemModel.text( 21 | "Hello, I am a chatbot created by OpenAI. How are you today?", 22 | ), 23 | 24 | //! image url contents are allowed only for models with image support 25 | // OpenAIChatCompletionChoiceMessageContentItemModel.imageUrl( 26 | // "https://placehold.co/600x400", 27 | // ), 28 | ], 29 | role: OpenAIChatMessageRole.user, 30 | name: "anas", 31 | ); 32 | 33 | final requestMessages = [ 34 | systemMessage, 35 | userMessage, 36 | ]; 37 | 38 | OpenAIChatCompletionModel chatCompletion = await OpenAI.instance.chat.create( 39 | model: "gpt-3.5-turbo-1106", 40 | responseFormat: {"type": "json_object"}, 41 | seed: 6, 42 | messages: requestMessages, 43 | temperature: 0.2, 44 | maxTokens: 500, 45 | 46 | // uncomment and set your own properties if you want to use tool choices feature.. 47 | 48 | // toolChoice: "auto", 49 | // tools: [], 50 | ); 51 | 52 | print(chatCompletion.choices.first.message); // 53 | print(chatCompletion.systemFingerprint); // 54 | print(chatCompletion.usage.promptTokens); // 55 | print(chatCompletion.id); // 56 | } 57 | -------------------------------------------------------------------------------- /example/lib/chat_completion_stream_example.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/dart_openai.dart'; 2 | 3 | import 'env/env.dart'; 4 | 5 | void main() { 6 | // Set the OpenAI API key from the .env file. 7 | OpenAI.apiKey = Env.apiKey; 8 | 9 | final userMessage = OpenAIChatCompletionChoiceMessageModel( 10 | content: [ 11 | OpenAIChatCompletionChoiceMessageContentItemModel.text( 12 | "Hello my friend!", 13 | ), 14 | ], 15 | role: OpenAIChatMessageRole.user, 16 | ); 17 | 18 | // Creates A Stream Of Chat Completions. 19 | final chatStream = OpenAI.instance.chat.createStream( 20 | model: "gpt-3.5-turbo", 21 | messages: [ 22 | userMessage, 23 | ], 24 | toolChoice: "none", 25 | seed: 423, 26 | n: 2, 27 | ); 28 | 29 | // Listen to the stream. 30 | chatStream.listen( 31 | (streamChatCompletion) { 32 | final content = streamChatCompletion.choices.first.delta.content; 33 | print(content); 34 | }, 35 | onError: (error) { 36 | print(error); 37 | }, 38 | cancelOnError: false, 39 | onDone: () { 40 | print("Done"); 41 | }, 42 | ); 43 | } 44 | -------------------------------------------------------------------------------- /example/lib/chat_completion_with_log_probs_example.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/dart_openai.dart'; 2 | 3 | import 'env/env.dart'; 4 | 5 | void main() async { 6 | // Set the OpenAI API key from the .env file. 7 | OpenAI.apiKey = Env.apiKey; 8 | 9 | final systemMessage = OpenAIChatCompletionChoiceMessageModel( 10 | content: [ 11 | OpenAIChatCompletionChoiceMessageContentItemModel.text( 12 | "return any message you are given as JSON.", 13 | ), 14 | ], 15 | role: OpenAIChatMessageRole.assistant, 16 | ); 17 | 18 | final userMessage = OpenAIChatCompletionChoiceMessageModel( 19 | content: [ 20 | OpenAIChatCompletionChoiceMessageContentItemModel.text( 21 | "Hello, I am a chatbot created by OpenAI. How are you today?", 22 | ), 23 | ], 24 | role: OpenAIChatMessageRole.user, 25 | name: "anas", 26 | ); 27 | 28 | final requestMessages = [ 29 | systemMessage, 30 | userMessage, 31 | ]; 32 | 33 | OpenAIChatCompletionModel chatCompletion = await OpenAI.instance.chat.create( 34 | model: "gpt-3.5-turbo-1106", 35 | responseFormat: {"type": "json_object"}, 36 | seed: 6, 37 | messages: requestMessages, 38 | temperature: 0.2, 39 | maxTokens: 500, 40 | logprobs: true, 41 | topLogprobs: 2, 42 | ); 43 | 44 | print(chatCompletion.choices.first.logprobs?.content.first.bytes); // 45 | } 46 | -------------------------------------------------------------------------------- /example/lib/chat_example_with_tools.dart: -------------------------------------------------------------------------------- 1 | import 'dart:convert'; 2 | 3 | import 'package:dart_openai/dart_openai.dart'; 4 | 5 | import 'env/env.dart'; 6 | 7 | Future main() async { 8 | OpenAI.apiKey = Env.apiKey; 9 | OpenAI.showResponsesLogs = true; 10 | 11 | final function = OpenAIFunctionModel.withParameters( 12 | name: "getCurrentWeather", 13 | description: "Get the current weather in a given location", 14 | parameters: [ 15 | OpenAIFunctionProperty.string( 16 | name: "location", 17 | description: 'The city and state, e.g. San Francisco, CA', 18 | isRequired: true, 19 | ), 20 | OpenAIFunctionProperty.string( 21 | name: "unit", 22 | description: 'The unit of temperature to return', 23 | enumValues: ["celsius", "fahrenheit"], 24 | ), 25 | ], 26 | ); 27 | 28 | final userMsg = OpenAIChatCompletionChoiceMessageModel( 29 | content: [ 30 | OpenAIChatCompletionChoiceMessageContentItemModel.text( 31 | "What’s the weather like in Boston right now?", 32 | ), 33 | ], 34 | role: OpenAIChatMessageRole.user, 35 | ); 36 | 37 | final chatRes1 = await OpenAI.instance.chat.create( 38 | model: "gpt-3.5-turbo-0613", 39 | messages: [userMsg], 40 | tools: [ 41 | OpenAIToolModel(type: "function", function: function), 42 | ], 43 | ); 44 | 45 | final assistantMsg1 = chatRes1.choices.first.message; 46 | final toolCalls = assistantMsg1.toolCalls; 47 | 48 | if (toolCalls == null || 49 | toolCalls.isEmpty || 50 | toolCalls.first.function.name != "getCurrentWeather") { 51 | print(assistantMsg1.content); 52 | return; 53 | } 54 | 55 | final funcCall = toolCalls.first.function; 56 | 57 | final weather = getCurrentWeather( 58 | location: jsonDecode(funcCall.arguments)?["location"], 59 | unit: jsonDecode(funcCall.arguments)?["unit"], 60 | ); 61 | 62 | final toolMsg = OpenAIChatCompletionChoiceMessageModel( 63 | toolCalls: [toolCalls.first], 64 | content: [ 65 | OpenAIChatCompletionChoiceMessageContentItemModel.text( 66 | weather.toMap().toString(), 67 | ), 68 | ], 69 | role: OpenAIChatMessageRole.tool, 70 | ); 71 | 72 | final chatRes2 = await OpenAI.instance.chat.create( 73 | model: "gpt-3.5-turbo-0613", 74 | messages: [ 75 | userMsg, 76 | assistantMsg1, 77 | toolMsg.asRequestFunctionMessage(toolCallId: toolCalls.first.id!), 78 | ], 79 | tools: [ 80 | OpenAIToolModel(type: "function", function: function), 81 | ], 82 | ); 83 | 84 | final assistantMsg2 = chatRes2.choices.first.message; 85 | 86 | print(assistantMsg2.content); 87 | // The weather in Boston right now is sunny with a temperature of 22 degrees Celsius. 88 | } 89 | 90 | Weather getCurrentWeather({ 91 | required String location, 92 | String? unit = "celsius", 93 | }) { 94 | return Weather( 95 | temperature: 22, 96 | unit: unit ?? "celsius", 97 | description: "Sunny", 98 | ); 99 | } 100 | 101 | class Weather { 102 | final int temperature; 103 | final String unit; 104 | final String description; 105 | 106 | const Weather({ 107 | required this.temperature, 108 | required this.unit, 109 | required this.description, 110 | }); 111 | 112 | Map toMap() { 113 | return { 114 | 'temperature': temperature, 115 | 'unit': unit, 116 | 'description': description, 117 | }; 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /example/lib/chat_stream_example.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/dart_openai.dart'; 2 | 3 | import 'env/env.dart'; 4 | 5 | void main() async { 6 | OpenAI.apiKey = Env.apiKey; 7 | 8 | final chatStream = OpenAI.instance.chat.createStream( 9 | model: "gpt-3.5-turbfqfqo", 10 | messages: [ 11 | OpenAIChatCompletionChoiceMessageModel( 12 | content: [ 13 | OpenAIChatCompletionChoiceMessageContentItemModel.text( 14 | "Hello, can you say: 'You are Anas'", 15 | ), 16 | ], 17 | role: OpenAIChatMessageRole.user, 18 | ), 19 | OpenAIChatCompletionChoiceMessageModel( 20 | content: [ 21 | OpenAIChatCompletionChoiceMessageContentItemModel.text( 22 | "You are Anas", 23 | ), 24 | ], 25 | role: OpenAIChatMessageRole.assistant, 26 | ), 27 | OpenAIChatCompletionChoiceMessageModel( 28 | content: [ 29 | OpenAIChatCompletionChoiceMessageContentItemModel.text( 30 | "Now I want you to repeat it, but change the word 'Anas' to 'Mohamed'", 31 | ), 32 | ], 33 | role: OpenAIChatMessageRole.user, 34 | ), 35 | ], 36 | ); 37 | 38 | chatStream.listen( 39 | (event) { 40 | print(event.choices.first.delta.content); 41 | }, 42 | onError: (e) { 43 | print("Error, $e"); 44 | }, 45 | onDone: () { 46 | print("Done"); 47 | }, 48 | ); 49 | } 50 | -------------------------------------------------------------------------------- /example/lib/chat_stream_example_with_function_call.dart: -------------------------------------------------------------------------------- 1 | import 'dart:convert'; 2 | 3 | import 'package:dart_openai/dart_openai.dart'; 4 | 5 | import 'env/env.dart'; 6 | 7 | void main() async { 8 | OpenAI.apiKey = Env.apiKey; 9 | 10 | final chatStream = OpenAI.instance.chat.createStream( 11 | model: "gpt-3.5-turbo", 12 | messages: [ 13 | OpenAIChatCompletionChoiceMessageModel( 14 | content: [ 15 | OpenAIChatCompletionChoiceMessageContentItemModel.text( 16 | "What is the fastest car in the world as of 2023?", 17 | ), 18 | ], 19 | role: OpenAIChatMessageRole.user, 20 | ), 21 | ], 22 | tools: [ 23 | OpenAIToolModel( 24 | type: "function", 25 | function: OpenAIFunctionModel.withParameters( 26 | name: "fastestCarInTheWorldInTheYear", 27 | parameters: [ 28 | OpenAIFunctionProperty.integer( 29 | name: "year", 30 | description: "The year to get the fastest car in the world for.", 31 | ), 32 | ], 33 | ), 34 | ), 35 | ], 36 | ); 37 | 38 | final functionNameMapper = {}; 39 | 40 | final stringBuf = StringBuffer(); 41 | 42 | chatStream.listen((event) { 43 | final function = event.choices.first.delta.toolCalls?.first.function; 44 | 45 | final name = function?.name; 46 | functionNameMapper[name ?? ""] = name; 47 | final args = function?.arguments; 48 | 49 | if (args != null) { 50 | stringBuf.write(args); 51 | } 52 | }, onDone: () { 53 | if (functionNameMapper.containsKey("fastestCarInTheWorldInTheYear")) { 54 | final fullResponse = stringBuf.toString(); 55 | 56 | print(fullResponse); 57 | 58 | if (isJSONDecoded(fullResponse)) { 59 | final decode = jsonDecode(fullResponse) as Map; 60 | final yearParam = decode['year'] as int; 61 | 62 | fastestCarInTheWorldInTheYear(yearParam); 63 | } else { 64 | // just saying, in case you need to handl normal responses. 65 | print("Response can not be decoded, it is not valid JSON"); 66 | } 67 | } else { 68 | print("there is functioning calling but not ours"); 69 | } 70 | }); 71 | } 72 | 73 | bool isJSONDecoded(String source) { 74 | try { 75 | jsonDecode(source); 76 | return true; 77 | } catch (e) { 78 | return false; 79 | } 80 | } 81 | 82 | void fastestCarInTheWorldInTheYear(int year) { 83 | print( 84 | "[Mock Handling]: The fastest car in the world in $year is the Bugatti Chiron Super Sport 300+", 85 | ); 86 | } 87 | -------------------------------------------------------------------------------- /example/lib/completion_example.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/dart_openai.dart'; 2 | import 'package:example/env/env.dart'; 3 | 4 | Future main() async { 5 | // Set the OpenAI API key from the .env file. 6 | OpenAI.apiKey = Env.apiKey; 7 | 8 | final completion = await OpenAI.instance.completion.create( 9 | model: "text-davinci-003", 10 | prompt: "Dart is", 11 | ); 12 | 13 | print(completion.choices.first.text); 14 | 15 | // // Creates The Completion 16 | // OpenAICompletionModel completion = await OpenAI.instance.completion.create( 17 | // model: "text-davinci-003", 18 | // prompt: 'Flutter is ', 19 | // maxTokens: 100, 20 | // temperature: 0.8, 21 | // ); 22 | 23 | // Prints the completion text. 24 | } 25 | -------------------------------------------------------------------------------- /example/lib/completion_stream.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/dart_openai.dart'; 2 | 3 | import 'env/env.dart'; 4 | 5 | void main() { 6 | // Set the OpenAI API key from the .env file. 7 | OpenAI.apiKey = Env.apiKey; 8 | 9 | // Creates A Stream Of Completions text. 10 | Stream stream = 11 | OpenAI.instance.completion.createStream( 12 | model: "text-davinci-003", 13 | n: 1, 14 | prompt: ["hello"], 15 | ); 16 | 17 | // listen to the stream and print the text. 18 | stream.listen((event) { 19 | print(event.choices.first.text); 20 | }); 21 | } 22 | -------------------------------------------------------------------------------- /example/lib/create_audio_speech.dart: -------------------------------------------------------------------------------- 1 | import 'dart:io'; 2 | 3 | import 'package:dart_openai/dart_openai.dart'; 4 | import 'package:example/env/env.dart'; 5 | 6 | void main() async { 7 | OpenAI.apiKey = Env.apiKey; 8 | 9 | // The speech request. 10 | File speechFile = await OpenAI.instance.audio.createSpeech( 11 | model: "tts-1", 12 | input: "it is what it is.", 13 | voice: "nova", 14 | responseFormat: OpenAIAudioSpeechResponseFormat.opus, 15 | outputDirectory: await Directory("speechOutput").create(), 16 | outputFileName: DateTime.now().microsecondsSinceEpoch.toString(), 17 | ); 18 | 19 | // The file result. 20 | print(speechFile.path); 21 | } 22 | -------------------------------------------------------------------------------- /example/lib/create_audio_transcription.dart: -------------------------------------------------------------------------------- 1 | import 'dart:io'; 2 | 3 | import 'package:dart_openai/dart_openai.dart'; 4 | 5 | import 'env/env.dart'; 6 | 7 | // ignore: depend_on_referenced_packages 8 | import 'package:http/http.dart' as http; 9 | 10 | Future main() async { 11 | // Set the OpenAI API key from the .env file. 12 | OpenAI.apiKey = Env.apiKey; 13 | 14 | // create the audio transcription. 15 | final transcription = await OpenAI.instance.audio.createTranscription( 16 | file: await getFileFromUrl( 17 | 'https://www.cbvoiceovers.com/wp-content/uploads/2017/05/Commercial-showreel.mp3', 18 | ), 19 | model: "whisper-1", 20 | responseFormat: OpenAIAudioResponseFormat.verbose_json, 21 | timestamp_granularities: [OpenAIAudioTimestampGranularity.segment], 22 | ); 23 | 24 | // print the transcription. 25 | print(transcription.text); 26 | print(transcription.segments?.map((e) => e.end)); 27 | } 28 | 29 | Future getFileFromUrl(String networkUrl) async { 30 | final response = await http.get(Uri.parse(networkUrl)); 31 | final uniqueImageName = DateTime.now().microsecondsSinceEpoch; 32 | final file = File("$uniqueImageName.mp3"); 33 | await file.writeAsBytes(response.bodyBytes); 34 | return file; 35 | } 36 | -------------------------------------------------------------------------------- /example/lib/create_audio_translation.dart: -------------------------------------------------------------------------------- 1 | import 'dart:io'; 2 | 3 | import 'package:dart_openai/dart_openai.dart'; 4 | 5 | import 'env/env.dart'; 6 | 7 | // ignore: depend_on_referenced_packages 8 | import 'package:http/http.dart' as http; 9 | 10 | Future main() async { 11 | // Set the OpenAI API key from the .env file. 12 | OpenAI.apiKey = Env.apiKey; 13 | 14 | // create the audio transcription. 15 | final translation = await OpenAI.instance.audio.createTranslation( 16 | file: await getFileFromUrl( 17 | 'https://www.cbvoiceovers.com/wp-content/uploads/2017/05/Commercial-showreel.mp3', 18 | fileExtension: "mp3"), 19 | model: "whisper-1", 20 | responseFormat: OpenAIAudioResponseFormat.json, 21 | ); 22 | 23 | // print the translation. 24 | print(translation.text); 25 | } 26 | 27 | Future getFileFromUrl( 28 | String networkUrl, { 29 | String fileExtension = 'png', 30 | }) async { 31 | final response = await http.get(Uri.parse(networkUrl)); 32 | final uniqueImageName = DateTime.now().microsecondsSinceEpoch; 33 | final file = File("$uniqueImageName.$fileExtension"); 34 | await file.writeAsBytes(response.bodyBytes); 35 | return file; 36 | } 37 | -------------------------------------------------------------------------------- /example/lib/edit_example.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/dart_openai.dart'; 2 | 3 | import 'env/env.dart'; 4 | 5 | Future main() async { 6 | // Set the OpenAI API key from the .env file. 7 | OpenAI.apiKey = Env.apiKey; 8 | 9 | // Creates the Edit 10 | OpenAIEditModel edit = await OpenAI.instance.edit.create( 11 | model: "text-davinci-edit-001", 12 | input: 13 | "Flutter is a cross-platform UI toolkit that is designed to allow code reuse across operating systems such as iOS and Android, while also allowing applications to interface directly with underlying platform services.", 14 | instruction: "summarize the input to 50 tokens at maximum", 15 | temperature: 0.8, 16 | n: 4, 17 | ); 18 | 19 | // Prints the choices. 20 | for (int index = 0; index < edit.choices.length; index++) { 21 | print(edit.choices[index].text); 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /example/lib/embeddings_example.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/dart_openai.dart'; 2 | import 'package:example/env/env.dart'; 3 | 4 | void main() async { 5 | OpenAI.apiKey = Env.apiKey; 6 | 7 | final embedding = await OpenAI.instance.embedding.create( 8 | model: "text-embedding-ada-002", 9 | input: "This is a sample text", 10 | ); 11 | 12 | for (int index = 0; index < embedding.data.length; index++) { 13 | final currentItem = embedding.data[index]; 14 | print(currentItem); 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /example/lib/env/env.dart: -------------------------------------------------------------------------------- 1 | // lib/env/env.dart 2 | import 'package:envied/envied.dart'; 3 | 4 | part 'env.g.dart'; 5 | 6 | @Envied(path: ".env") 7 | abstract class Env { 8 | @EnviedField(varName: 'OPEN_AI_API_KEY') 9 | static const String apiKey = _Env.apiKey; 10 | } 11 | -------------------------------------------------------------------------------- /example/lib/example_for_testing_stream_error.dart: -------------------------------------------------------------------------------- 1 | import 'dart:async'; 2 | 3 | import 'package:dart_openai/dart_openai.dart'; 4 | import 'package:example/env/env.dart'; 5 | 6 | // This example is for testing the stream error, as example, cut the internet connection and run this example. 7 | 8 | void main() { 9 | // we set the api key 10 | OpenAI.apiKey = Env.apiKey; 11 | 12 | // we create the custom stream. 13 | final stream = itemBodyCompletionStream( 14 | "What is the best makeup brand?", 15 | "Name 10 mackeup products", 16 | "You're a makeup artist", 17 | ); 18 | 19 | // we listen 20 | stream.listen((event) { 21 | print(event.body); 22 | }, onError: (e) { 23 | // if there is some error, it will be catched here. 24 | print(e); 25 | }, onDone: () { 26 | print("done"); 27 | }); 28 | } 29 | 30 | Stream itemBodyCompletionStream( 31 | String system, 32 | String user, 33 | String assistant, 34 | ) { 35 | final bodyCompletion = OpenAI.instance.chat.createStream( 36 | model: "gpt-3.5-turbo", 37 | messages: [ 38 | OpenAIChatCompletionChoiceMessageModel( 39 | role: OpenAIChatMessageRole.system, 40 | content: [ 41 | OpenAIChatCompletionChoiceMessageContentItemModel.text(system), 42 | ], 43 | ), 44 | OpenAIChatCompletionChoiceMessageModel( 45 | role: OpenAIChatMessageRole.user, 46 | content: [ 47 | OpenAIChatCompletionChoiceMessageContentItemModel.text(user), 48 | ], 49 | ), 50 | OpenAIChatCompletionChoiceMessageModel( 51 | role: OpenAIChatMessageRole.assistant, 52 | content: [ 53 | OpenAIChatCompletionChoiceMessageContentItemModel.text(assistant), 54 | ], 55 | ), 56 | ], 57 | ); 58 | final stream = StreamController(); 59 | 60 | bodyCompletion.listen((event) { 61 | final content = event.choices[0].delta.content; 62 | 63 | return content != null && content.first?.text != null 64 | ? stream.add(ItemBodyCompletion(body: content.first!.text!)) 65 | : () {}; 66 | }, onDone: () { 67 | stream.close(); 68 | }, onError: (e) { 69 | stream.addError(e); 70 | }); 71 | return stream.stream; 72 | } 73 | 74 | class ItemBodyCompletion { 75 | final String body; 76 | 77 | ItemBodyCompletion({required this.body}); 78 | } 79 | -------------------------------------------------------------------------------- /example/lib/fine_tune._stream.dart: -------------------------------------------------------------------------------- 1 | import 'dart:convert'; 2 | import 'dart:io'; 3 | 4 | import 'package:dart_openai/dart_openai.dart'; 5 | 6 | import 'env/env.dart'; 7 | 8 | void main() async { 9 | // Set the OpenAI API key from the .env file. 10 | OpenAI.apiKey = Env.apiKey; 11 | 12 | // Upload an example File 13 | OpenAIFileModel file = await OpenAI.instance.file.upload( 14 | purpose: 'fine-tune', 15 | file: jsonLFileExample(), 16 | ); 17 | 18 | // Creating a fine-tune job. 19 | OpenAIFineTuneModel fineTuneModel = await OpenAI.instance.fineTune.create( 20 | model: "ada", 21 | trainingFile: file.id, 22 | ); 23 | 24 | // Get a stream of events for a fine-tune job. 25 | Stream fineTuneEventStream = 26 | OpenAI.instance.fineTune.listEventsStream(fineTuneModel.id); 27 | 28 | // Listen to the stream. 29 | fineTuneEventStream.listen((event) { 30 | print(event); 31 | }); 32 | 33 | // Wait for 5 seconds. 34 | await Future.delayed(Duration(seconds: 5)); 35 | 36 | // cancel the fine-tune job. 37 | final cancelledFineTune = 38 | await OpenAI.instance.fineTune.cancel(fineTuneModel.id); 39 | 40 | // print the cancelled fine-tune job. 41 | print("Cancelled fine-tune job: ${cancelledFineTune.id}"); 42 | } 43 | 44 | File jsonLFileExample() { 45 | final file = File("example.jsonl"); 46 | file.writeAsStringSync( 47 | jsonEncode( 48 | {"prompt": "", "completion": ""}), 49 | ); 50 | return file; 51 | } 52 | -------------------------------------------------------------------------------- /example/lib/image_edit_example.dart: -------------------------------------------------------------------------------- 1 | import 'dart:io'; 2 | 3 | import 'package:dart_openai/dart_openai.dart'; 4 | import 'package:example/env/env.dart'; 5 | 6 | void main() async { 7 | OpenAI.apiKey = Env.apiKey; 8 | 9 | OpenAIImageModel imageEdits = await OpenAI.instance.image.edit( 10 | prompt: 'mask the image with color red', 11 | image: File("IMAGE PATH HERE"), 12 | mask: File("MASK PATH HERE"), 13 | n: 1, 14 | size: OpenAIImageSize.size1024, 15 | responseFormat: OpenAIImageResponseFormat.b64Json, 16 | ); 17 | 18 | for (int index = 0; index < imageEdits.data.length; index++) { 19 | final currentItem = imageEdits.data[index]; 20 | print(currentItem.b64Json); 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /example/lib/image_variation_example.dart: -------------------------------------------------------------------------------- 1 | import 'dart:async'; 2 | import 'dart:io'; 3 | 4 | import 'package:dart_openai/dart_openai.dart'; 5 | 6 | import 'env/env.dart'; 7 | 8 | Future main() async { 9 | // Set the OpenAI API key from the .env file. 10 | OpenAI.apiKey = Env.apiKey; 11 | 12 | // Creates the Image Variation 13 | final imageVariations = await OpenAI.instance.image.variation( 14 | model: "dall-e-2", 15 | image: File("dart.png"), 16 | n: 4, 17 | size: OpenAIImageSize.size512, 18 | responseFormat: OpenAIImageResponseFormat.url, 19 | ); 20 | 21 | for (var index = 0; index < imageVariations.data.length; index++) { 22 | final currentItem = imageVariations.data[index]; 23 | print(currentItem.url); 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /example/lib/islamic_chat.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/dart_openai.dart'; 2 | 3 | import 'env/env.dart'; 4 | 5 | Future main() async { 6 | OpenAI.apiKey = Env.apiKey; 7 | final chatStream = OpenAI.instance.chat.createStream( 8 | model: "gpt-3.5-turbo", 9 | messages: [ 10 | OpenAIChatCompletionChoiceMessageModel( 11 | role: OpenAIChatMessageRole.user, 12 | content: [ 13 | OpenAIChatCompletionChoiceMessageContentItemModel.text( 14 | "Say hello!", 15 | ), 16 | ], 17 | ), 18 | ], 19 | ); 20 | 21 | chatStream.listen((event) { 22 | print(event.choices.first.delta.content?.map((e) => e?.toMap())); 23 | }); 24 | } 25 | -------------------------------------------------------------------------------- /example/lib/list_all_files.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/dart_openai.dart'; 2 | 3 | import 'env/env.dart'; 4 | 5 | Future main() async { 6 | // Set the OpenAI API key from the .env file. 7 | OpenAI.apiKey = Env.apiKey; 8 | 9 | // List all files. 10 | List files = await OpenAI.instance.file.list(); 11 | 12 | // Print the files. 13 | print(files); 14 | } 15 | -------------------------------------------------------------------------------- /example/lib/log_response_bodies.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/dart_openai.dart'; 2 | import 'package:example/env/env.dart'; 3 | 4 | void main() { 5 | OpenAI.showLogs = true; 6 | OpenAI.showResponsesLogs = true; 7 | OpenAI.apiKey = Env.apiKey; 8 | 9 | final models = OpenAI.instance.model.list(); 10 | 11 | print(models); 12 | } 13 | -------------------------------------------------------------------------------- /example/lib/main.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/dart_openai.dart'; 2 | 3 | import 'env/env.dart'; 4 | 5 | Future main() async { 6 | // Set the OpenAI API key from the .env file. 7 | OpenAI.apiKey = Env.apiKey; 8 | 9 | // Start using! 10 | final completion = await OpenAI.instance.completion.create( 11 | model: "text-davinci-003", 12 | prompt: "Dart is", 13 | ); 14 | 15 | // Printing the output to the console 16 | print(completion.choices[0].text); 17 | 18 | // Generate an image from a prompt. 19 | final image = await OpenAI.instance.image.create( 20 | prompt: "dog", 21 | n: 1, 22 | ); 23 | 24 | // Printing the output to the console. 25 | for (int index = 0; index < image.data.length; index++) { 26 | final currentItem = image.data[index]; 27 | print(currentItem.url); 28 | } 29 | 30 | // create a moderation 31 | final moderation = await OpenAI.instance.moderation.create( 32 | input: "I will cut your head off", 33 | ); 34 | 35 | // Printing moderation 36 | print(moderation.results.first.categories.violence); 37 | } 38 | -------------------------------------------------------------------------------- /example/lib/moderation_example.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/dart_openai.dart'; 2 | 3 | import 'env/env.dart'; 4 | 5 | Future main() async { 6 | // Set the OpenAI API key from the .env file. 7 | OpenAI.apiKey = Env.apiKey; 8 | 9 | // Creates the moderation. 10 | OpenAIModerationModel moderation = await OpenAI.instance.moderation.create( 11 | input: 'I hate you, I will kill you', 12 | ); 13 | 14 | // prints the result 15 | print(moderation.results.first.categories); 16 | } 17 | -------------------------------------------------------------------------------- /example/lib/one_chat_message_example.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/dart_openai.dart'; 2 | 3 | import 'env/env.dart'; 4 | 5 | void main() async { 6 | OpenAI.apiKey = Env.apiKey; 7 | 8 | final chatStream = OpenAI.instance.chat.createStream( 9 | model: "gpt-3.5-turbo", 10 | messages: [ 11 | OpenAIChatCompletionChoiceMessageModel( 12 | content: [ 13 | OpenAIChatCompletionChoiceMessageContentItemModel.text( 14 | "You are a Moroccon guy who lives in Morocco and knows how to speak in Darija", 15 | ), 16 | ], 17 | role: OpenAIChatMessageRole.system, 18 | ), 19 | OpenAIChatCompletionChoiceMessageModel( 20 | content: [ 21 | OpenAIChatCompletionChoiceMessageContentItemModel.text( 22 | "salam", 23 | ), 24 | ], 25 | role: OpenAIChatMessageRole.user, 26 | ), 27 | ], 28 | ); 29 | 30 | chatStream.listen((event) { 31 | print(event.choices.first.delta.content); 32 | }); 33 | } 34 | -------------------------------------------------------------------------------- /example/lib/one_word_completion.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/dart_openai.dart'; 2 | import 'package:example/env/env.dart'; 3 | 4 | Future main() async { 5 | OpenAI.apiKey = Env.apiKey; 6 | OpenAI.showLogs = false; 7 | 8 | Stream wordsOfSentenceToComplete = 9 | Stream.fromIterable(["Hi ", "Edward ", ", ", "I "]); 10 | 11 | String accumulativeSentence = ""; 12 | 13 | await for (var word in wordsOfSentenceToComplete) { 14 | accumulativeSentence += word; 15 | 16 | final completion = await OpenAI.instance.completion.create( 17 | model: "text-davinci-003", 18 | prompt: accumulativeSentence, 19 | maxTokens: 5, 20 | ); 21 | 22 | print( 23 | "completion for '$accumulativeSentence': ${completion.choices.first.text}", 24 | ); 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /example/lib/request_with_custom_client.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/dart_openai.dart'; 2 | 3 | import 'env/env.dart'; 4 | 5 | import 'package:http/http.dart' as http; 6 | 7 | Future main() async { 8 | /// Set the API key to be used in requests. 9 | OpenAI.apiKey = Env.apiKey; 10 | 11 | /// THe custom client that will be used in the next request. 12 | final httpClient = http.Client(); 13 | 14 | /// We sent the request to get the list of all models. 15 | final modelsUsingCUstomClient = await OpenAI.instance.model.list( 16 | client: httpClient, 17 | ); 18 | 19 | /// printing the IDs of all models retrieved form the request. 20 | print( 21 | modelsUsingCUstomClient 22 | .map((modelItem) => modelItem.id) 23 | .toList() 24 | .join("\n"), 25 | ); 26 | } 27 | -------------------------------------------------------------------------------- /example/lib/sentence_completion.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/dart_openai.dart'; 2 | import 'package:example/env/env.dart'; 3 | 4 | Future main() async { 5 | OpenAI.apiKey = Env.apiKey; 6 | OpenAI.showLogs = false; 7 | 8 | Stream wordsOfSentenceToComplete = 9 | Stream.fromIterable(["Hi ", "Edward", ", ", "I "]); 10 | 11 | String accumulativeSentence = ""; 12 | 13 | await for (var word in wordsOfSentenceToComplete) { 14 | accumulativeSentence += word; 15 | 16 | final completion = await OpenAI.instance.completion.create( 17 | model: "text-davinci-003", 18 | prompt: accumulativeSentence, 19 | maxTokens: 20, 20 | ); 21 | 22 | print( 23 | "completion for '$accumulativeSentence': ${completion.choices.first.text}"); 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /example/lib/setting_for_web_env.dart: -------------------------------------------------------------------------------- 1 | // import 'package:dart_openai/dart_openai.dart'; 2 | 3 | // void main() { 4 | // OpenAI.isWeb = true; 5 | 6 | // print(OpenAI.isWeb); 7 | // } 8 | -------------------------------------------------------------------------------- /example/lib/simple_tools_demo.dart: -------------------------------------------------------------------------------- 1 | import 'dart:convert'; 2 | 3 | import 'package:dart_openai/dart_openai.dart'; 4 | import 'package:example/env/env.dart'; 5 | 6 | void main() async { 7 | OpenAI.apiKey = Env.apiKey; 8 | 9 | // The function to be called by the tool. 10 | void sumNumbers(int number1, int number2) { 11 | print("Your sum answer is ${number1 + number2}"); 12 | } 13 | 14 | // The tool object that wilm be sent to the API. 15 | final sumNumbersTool = OpenAIToolModel( 16 | type: "function", 17 | function: OpenAIFunctionModel.withParameters( 18 | name: "sumOfTwoNumbers", 19 | parameters: [ 20 | OpenAIFunctionProperty.integer( 21 | name: "number1", 22 | description: "The first number to add", 23 | ), 24 | OpenAIFunctionProperty.integer( 25 | name: "number2", 26 | description: "The second number to add", 27 | ), 28 | ], 29 | ), 30 | ); 31 | 32 | // The user text message that will be sent to the API. 33 | final userMessage = OpenAIChatCompletionChoiceMessageModel( 34 | content: [ 35 | OpenAIChatCompletionChoiceMessageContentItemModel.text( 36 | "What is the sum of 9996 and 3?", 37 | ), 38 | ], 39 | role: OpenAIChatMessageRole.user, 40 | ); 41 | 42 | // The actual call. 43 | final chat = await OpenAI.instance.chat.create( 44 | model: "gpt-3.5-turbo", 45 | messages: [userMessage], 46 | tools: [sumNumbersTool], 47 | ); 48 | 49 | // ! This handling is only for showcase and not completed as edge cases will not be handled that you should handle in your app. 50 | 51 | final message = chat.choices.first.message; 52 | 53 | // Wether the message has a tool call. 54 | if (message.haveToolCalls) { 55 | final call = message.toolCalls!.first; 56 | 57 | // Wether the tool call is the one we sent. 58 | if (call.function.name == "sumOfTwoNumbers") { 59 | // decode the arguments from the tool call. 60 | final decodedArgs = jsonDecode(call.function.arguments); 61 | 62 | final number1 = decodedArgs["number1"]; 63 | final number2 = decodedArgs["number2"]; 64 | 65 | // Call the function with the arguments. 66 | sumNumbers(number1, number2); 67 | } 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /example/lib/stream_request_with_custom_client.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/dart_openai.dart'; 2 | 3 | import 'env/env.dart'; 4 | 5 | import 'package:http/http.dart' as http; 6 | 7 | Future main() async { 8 | // Set the OpenAI API key from the .env file. 9 | OpenAI.apiKey = Env.apiKey; 10 | 11 | // Start using! 12 | final chatStream = OpenAI.instance.chat.createStream( 13 | model: "gpt-3.5-turbo", 14 | messages: [ 15 | OpenAIChatCompletionChoiceMessageModel( 16 | content: [ 17 | OpenAIChatCompletionChoiceMessageContentItemModel.text( 18 | "Hello, can you say: 'You are Anas'", 19 | ), 20 | ], 21 | role: OpenAIChatMessageRole.user, 22 | ), 23 | ], 24 | client: http.Client(), 25 | ); 26 | chatStream.listen((event) { 27 | print(event.choices.first.delta.content); 28 | }); 29 | } 30 | -------------------------------------------------------------------------------- /example/lib/testing_file_ignore.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/dart_openai.dart'; 2 | 3 | void main() async { 4 | Stream completionStream = 5 | OpenAI.instance.completion.createStream( 6 | model: "text-davinci-003", 7 | prompt: "Github is ", 8 | maxTokens: 100, 9 | temperature: 0.5, 10 | topP: 1, 11 | seed: 42, 12 | stop: '###', 13 | n: 2, 14 | ); 15 | 16 | completionStream.listen((event) { 17 | final firstCompletionChoice = event.choices.first; 18 | print(firstCompletionChoice.index); // ... 19 | print(firstCompletionChoice.text); // ... 20 | }); 21 | } 22 | -------------------------------------------------------------------------------- /example/pubspec.yaml: -------------------------------------------------------------------------------- 1 | name: example 2 | description: A sample command-line application. 3 | version: 1.0.0 4 | publish_to: none 5 | 6 | environment: 7 | sdk: ">=3.0.0 <4.0.0" 8 | 9 | dependencies: 10 | path: ^1.8.0 11 | dart_openai: 12 | path: ../ 13 | envied: ^0.5.1 14 | http: ^1.1.0 15 | 16 | dev_dependencies: 17 | build_runner: ^2.4.6 18 | envied_generator: ^0.5.1 19 | lints: ^3.0.0 20 | test: ^1.24.9 21 | -------------------------------------------------------------------------------- /lib/dart_openai.dart: -------------------------------------------------------------------------------- 1 | export 'src/instance/openai.dart'; 2 | export 'src/core/exceptions/export.dart'; 3 | export 'src/core/models/export.dart'; 4 | -------------------------------------------------------------------------------- /lib/src/core/base/audio/audio.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/src/core/base/entity/interfaces/enpoint.dart'; 2 | 3 | import 'interfaces.dart'; 4 | 5 | abstract class OpenAIAudioBase implements CreateInterface, EndpointInterface {} 6 | -------------------------------------------------------------------------------- /lib/src/core/base/audio/interfaces.dart: -------------------------------------------------------------------------------- 1 | import 'dart:io'; 2 | 3 | import '../../../../dart_openai.dart'; 4 | 5 | abstract class CreateInterface { 6 | Future createSpeech({ 7 | required String model, 8 | required String input, 9 | required String voice, 10 | OpenAIAudioSpeechResponseFormat? responseFormat, 11 | double? speed, 12 | String outputFileName = "output", 13 | Directory? outputDirectory, 14 | }); 15 | 16 | Future createTranscription({ 17 | required File file, 18 | required String model, 19 | String? prompt, 20 | OpenAIAudioResponseFormat? responseFormat, 21 | double? temperature, 22 | String? language, 23 | List? timestamp_granularities, 24 | }); 25 | 26 | Future createTranslation({ 27 | required File file, 28 | required String model, 29 | String? prompt, 30 | OpenAIAudioResponseFormat? responseFormat, 31 | double? temperature, 32 | }); 33 | } 34 | -------------------------------------------------------------------------------- /lib/src/core/base/chat/chat.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/src/core/base/entity/interfaces/enpoint.dart'; 2 | 3 | import 'interfaces/create.dart'; 4 | 5 | abstract class OpenAIChatBase implements CreateInterface, EndpointInterface {} 6 | -------------------------------------------------------------------------------- /lib/src/core/base/chat/interfaces/create.dart: -------------------------------------------------------------------------------- 1 | import 'package:http/http.dart' as http; 2 | 3 | import '../../../models/chat/chat.dart'; 4 | import '../../../models/tool/tool.dart'; 5 | 6 | abstract class CreateInterface { 7 | Future create({ 8 | required String model, 9 | required List messages, 10 | List? tools, 11 | toolChoice, 12 | double? temperature, 13 | double? topP, 14 | int? n, 15 | stop, 16 | int? maxTokens, 17 | double? presencePenalty, 18 | double? frequencyPenalty, 19 | Map? logitBias, 20 | String? user, 21 | http.Client? client, 22 | Map? responseFormat, 23 | int? seed, 24 | }); 25 | 26 | Stream createStream({ 27 | required String model, 28 | required List messages, 29 | List? tools, 30 | toolChoice, 31 | double? temperature, 32 | double? topP, 33 | int? n, 34 | stop, 35 | int? maxTokens, 36 | double? presencePenalty, 37 | double? frequencyPenalty, 38 | Map? logitBias, 39 | Map? responseFormat, 40 | String? user, 41 | http.Client? client, 42 | int? seed, 43 | }); 44 | 45 | Stream createRemoteFunctionStream({ 46 | required String model, 47 | required List messages, 48 | List? tools, 49 | toolChoice, 50 | double? temperature, 51 | double? topP, 52 | int? n, 53 | stop, 54 | int? maxTokens, 55 | double? presencePenalty, 56 | double? frequencyPenalty, 57 | Map? logitBias, 58 | String? user, 59 | http.Client? client, 60 | Map? responseFormat, 61 | int? seed, 62 | }); 63 | } 64 | -------------------------------------------------------------------------------- /lib/src/core/base/completion.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/src/core/base/entity/interfaces/enpoint.dart'; 2 | 3 | import 'interfaces/create.dart'; 4 | 5 | abstract class OpenAICompletionBase 6 | implements CreateInterface, EndpointInterface {} 7 | -------------------------------------------------------------------------------- /lib/src/core/base/edits/edits.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/src/core/base/entity/interfaces/enpoint.dart'; 2 | 3 | import 'interfaces/create.dart'; 4 | 5 | abstract class OpenAIEditsBase implements CreateInterface, EndpointInterface {} 6 | -------------------------------------------------------------------------------- /lib/src/core/base/edits/interfaces/create.dart: -------------------------------------------------------------------------------- 1 | import '../../../models/edit/edit.dart'; 2 | 3 | import 'package:http/http.dart' as http; 4 | 5 | abstract class CreateInterface { 6 | Future create({ 7 | required String model, 8 | String? input, 9 | required String? instruction, 10 | int? n, 11 | double? temperature, 12 | double? topP, 13 | http.Client? client, 14 | }); 15 | } 16 | -------------------------------------------------------------------------------- /lib/src/core/base/embeddings/base.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/src/core/base/entity/interfaces/enpoint.dart'; 2 | 3 | import 'interfaces/create.dart'; 4 | 5 | abstract class OpenAIEmbeddingBase 6 | implements CreateInterface, EndpointInterface {} 7 | -------------------------------------------------------------------------------- /lib/src/core/base/embeddings/interfaces/create.dart: -------------------------------------------------------------------------------- 1 | import '../../../models/embedding/embedding.dart'; 2 | 3 | import 'package:http/http.dart' as http; 4 | 5 | abstract class CreateInterface { 6 | Future create({ 7 | required String model, 8 | required input, 9 | String? user, 10 | http.Client? client, 11 | }); 12 | } 13 | -------------------------------------------------------------------------------- /lib/src/core/base/entity/base.dart: -------------------------------------------------------------------------------- 1 | import 'interfaces/enpoint.dart'; 2 | 3 | abstract class OpenAIEntityBase implements EndpointInterface {} 4 | -------------------------------------------------------------------------------- /lib/src/core/base/entity/interfaces/enpoint.dart: -------------------------------------------------------------------------------- 1 | import 'package:meta/meta.dart'; 2 | 3 | @internal 4 | abstract class EndpointInterface { 5 | /// This getter returns the endpoint of the entity. 6 | String get endpoint; 7 | } 8 | -------------------------------------------------------------------------------- /lib/src/core/base/files/base.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/src/core/base/entity/interfaces/enpoint.dart'; 2 | 3 | import 'interfaces/delete.dart'; 4 | import 'interfaces/list.dart'; 5 | import 'interfaces/retrieve_content.dart'; 6 | import 'interfaces/retrive.dart'; 7 | import 'interfaces/upload.dart'; 8 | 9 | abstract class OpenAIFilesBase 10 | implements 11 | EndpointInterface, 12 | UploadInterface, 13 | ListInterface, 14 | DeleteInterface, 15 | RetrieveInterface, 16 | RetrieveContentInterface {} 17 | -------------------------------------------------------------------------------- /lib/src/core/base/files/interfaces/delete.dart: -------------------------------------------------------------------------------- 1 | import 'package:http/http.dart' as http; 2 | 3 | abstract class DeleteInterface { 4 | Future delete( 5 | String fileId, { 6 | http.Client? client, 7 | }); 8 | } 9 | -------------------------------------------------------------------------------- /lib/src/core/base/files/interfaces/list.dart: -------------------------------------------------------------------------------- 1 | import '../../../models/file/file.dart'; 2 | 3 | import 'package:http/http.dart' as http; 4 | 5 | abstract class ListInterface { 6 | Future> list({ 7 | http.Client? client, 8 | }); 9 | } 10 | -------------------------------------------------------------------------------- /lib/src/core/base/files/interfaces/retrieve_content.dart: -------------------------------------------------------------------------------- 1 | import 'package:http/http.dart' as http; 2 | 3 | abstract class RetrieveContentInterface { 4 | Future retrieveContent( 5 | String fileId, { 6 | http.Client? client, 7 | }); 8 | } 9 | -------------------------------------------------------------------------------- /lib/src/core/base/files/interfaces/retrive.dart: -------------------------------------------------------------------------------- 1 | import '../../../models/file/file.dart'; 2 | import 'package:http/http.dart' as http; 3 | 4 | abstract class RetrieveInterface { 5 | Future retrieve( 6 | String fileId, { 7 | http.Client? client, 8 | }); 9 | } 10 | -------------------------------------------------------------------------------- /lib/src/core/base/files/interfaces/upload.dart: -------------------------------------------------------------------------------- 1 | import 'dart:io'; 2 | 3 | import '../../../models/file/file.dart'; 4 | 5 | abstract class UploadInterface { 6 | Future upload({ 7 | required File file, 8 | required String purpose, 9 | }); 10 | } 11 | -------------------------------------------------------------------------------- /lib/src/core/base/fine_tunes/base.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/src/core/base/entity/interfaces/enpoint.dart'; 2 | 3 | import 'interfaces/cancel.dart'; 4 | import 'interfaces/create.dart'; 5 | import 'interfaces/delete.dart'; 6 | import 'interfaces/events.dart'; 7 | import 'interfaces/list.dart'; 8 | import 'interfaces/retrieve.dart'; 9 | import 'interfaces/stream_events.dart'; 10 | 11 | abstract class OpenAIFineTunesBase 12 | implements 13 | EndpointInterface, 14 | CreateInterface, 15 | ListInterface, 16 | RetrieveInterface, 17 | CancelInterface, 18 | EventsInterface, 19 | DeleteInterface, 20 | StreamListInterface {} 21 | -------------------------------------------------------------------------------- /lib/src/core/base/fine_tunes/interfaces/cancel.dart: -------------------------------------------------------------------------------- 1 | import '../../../models/fine_tune/fine_tune.dart'; 2 | 3 | import 'package:http/http.dart' as http; 4 | 5 | abstract class CancelInterface { 6 | Future cancel( 7 | String fineTuneId, { 8 | http.Client? client, 9 | }); 10 | } 11 | -------------------------------------------------------------------------------- /lib/src/core/base/fine_tunes/interfaces/create.dart: -------------------------------------------------------------------------------- 1 | import '../../../models/fine_tune/fine_tune.dart'; 2 | 3 | import 'package:http/http.dart' as http; 4 | 5 | abstract class CreateInterface { 6 | Future create({ 7 | required String trainingFile, 8 | String? validationFile, 9 | String? model, 10 | int? nEpoches, 11 | int? batchSize, 12 | double? learningRateMultiplier, 13 | double? promptLossWeight, 14 | bool? computeClassificationMetrics, 15 | int? classificationNClass, 16 | int? classificationPositiveClass, 17 | int? classificationBetas, 18 | String? suffix, 19 | http.Client? client, 20 | }); 21 | } 22 | -------------------------------------------------------------------------------- /lib/src/core/base/fine_tunes/interfaces/delete.dart: -------------------------------------------------------------------------------- 1 | import 'package:http/http.dart' as http; 2 | 3 | abstract class DeleteInterface { 4 | Future delete( 5 | String fineTuneId, { 6 | http.Client? client, 7 | }); 8 | } 9 | -------------------------------------------------------------------------------- /lib/src/core/base/fine_tunes/interfaces/events.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/src/core/models/fine_tune/sub_models/event.dart'; 2 | 3 | import 'package:http/http.dart' as http; 4 | 5 | abstract class EventsInterface { 6 | Future> listEvents( 7 | String fineTuneId, { 8 | http.Client? client, 9 | }); 10 | } 11 | -------------------------------------------------------------------------------- /lib/src/core/base/fine_tunes/interfaces/list.dart: -------------------------------------------------------------------------------- 1 | import 'package:http/http.dart' as http; 2 | 3 | abstract class ListInterface { 4 | Future list({ 5 | http.Client? client, 6 | }); 7 | } 8 | -------------------------------------------------------------------------------- /lib/src/core/base/fine_tunes/interfaces/retrieve.dart: -------------------------------------------------------------------------------- 1 | import 'package:http/http.dart' as http; 2 | 3 | abstract class RetrieveInterface { 4 | Future retrieve( 5 | String fineTuneId, { 6 | http.Client? client, 7 | }); 8 | } 9 | -------------------------------------------------------------------------------- /lib/src/core/base/fine_tunes/interfaces/stream_events.dart: -------------------------------------------------------------------------------- 1 | import '../../../models/export.dart'; 2 | 3 | import 'package:http/http.dart' as http; 4 | 5 | abstract class StreamListInterface { 6 | Stream listEventsStream( 7 | String fineTuneId, { 8 | http.Client? client, 9 | }); 10 | } 11 | -------------------------------------------------------------------------------- /lib/src/core/base/images/base.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/src/core/base/entity/interfaces/enpoint.dart'; 2 | 3 | import '../../enum.dart'; 4 | import 'interfaces/create.dart'; 5 | import 'interfaces/edit.dart'; 6 | import 'interfaces/variations.dart'; 7 | 8 | abstract class OpenAIImagesBase 9 | implements 10 | EndpointInterface, 11 | CreateInterface, 12 | EditInterface, 13 | VariationInterface {} 14 | 15 | extension SizeToStingExtension on OpenAIImageSize { 16 | String get value { 17 | switch (this) { 18 | case OpenAIImageSize.size256: 19 | return "256x256"; 20 | case OpenAIImageSize.size512: 21 | return "512x512"; 22 | case OpenAIImageSize.size1024: 23 | return "1024x1024"; 24 | case OpenAIImageSize.size1792Horizontal: 25 | return "1792x1024"; 26 | case OpenAIImageSize.size1792Vertical: 27 | return "1024x1792"; 28 | } 29 | } 30 | } 31 | 32 | extension StyleToStingExtension on OpenAIImageStyle { 33 | String get value { 34 | return name; 35 | 36 | // ! pretty sure this will be needed in the future in case of adding more styles that can't be got from the `name` field. 37 | // switch (this) { 38 | // case OpenAIImageStyle.vivid: 39 | // return "vivid"; 40 | // case OpenAIImageStyle.natural: 41 | // return "natural"; 42 | // } 43 | } 44 | } 45 | 46 | extension QualityToStingExtension on OpenAIImageQuality { 47 | String get value { 48 | return name; 49 | 50 | // ! pretty sure this will be needed in the future in case of adding more qualities that can't be got from the `name` field. 51 | // switch (this) { 52 | // case OpenAIImageQuality.hd: 53 | // return "hd"; 54 | // } 55 | } 56 | } 57 | 58 | extension ResponseFormatToStingExtension on OpenAIImageResponseFormat { 59 | String get value { 60 | switch (this) { 61 | case OpenAIImageResponseFormat.url: 62 | return "url"; 63 | case OpenAIImageResponseFormat.b64Json: 64 | return "b64_json"; 65 | } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /lib/src/core/base/images/interfaces/create.dart: -------------------------------------------------------------------------------- 1 | import '../../../enum.dart'; 2 | import '../../../models/image/image/image.dart'; 3 | 4 | import 'package:http/http.dart' as http; 5 | 6 | abstract class CreateInterface { 7 | Future create({ 8 | required String prompt, 9 | int? n, 10 | OpenAIImageSize? size, 11 | OpenAIImageResponseFormat? responseFormat, 12 | String? user, 13 | http.Client? client, 14 | }); 15 | } 16 | -------------------------------------------------------------------------------- /lib/src/core/base/images/interfaces/edit.dart: -------------------------------------------------------------------------------- 1 | import 'dart:io'; 2 | 3 | import '../../../enum.dart'; 4 | import '../../../models/image/image/image.dart'; 5 | 6 | abstract class EditInterface { 7 | Future edit({ 8 | required File image, 9 | File? mask, 10 | required String prompt, 11 | int? n, 12 | OpenAIImageSize? size, 13 | OpenAIImageResponseFormat? responseFormat, 14 | String? user, 15 | }); 16 | } 17 | -------------------------------------------------------------------------------- /lib/src/core/base/images/interfaces/variations.dart: -------------------------------------------------------------------------------- 1 | import 'dart:io'; 2 | 3 | import '../../../../../dart_openai.dart'; 4 | 5 | abstract class VariationInterface { 6 | Future variation({ 7 | required File image, 8 | int? n, 9 | OpenAIImageSize? size, 10 | OpenAIImageResponseFormat? responseFormat, 11 | String? user, 12 | }); 13 | } 14 | -------------------------------------------------------------------------------- /lib/src/core/base/interfaces/create.dart: -------------------------------------------------------------------------------- 1 | import '../../models/completion/completion.dart'; 2 | 3 | import 'package:http/http.dart' as http; 4 | 5 | abstract class CreateInterface { 6 | Future create({ 7 | required String model, 8 | prompt, 9 | String? suffix, 10 | int? maxTokens, 11 | double? temperature, 12 | double? topP, 13 | int? n, 14 | int? logprobs, 15 | bool? echo, 16 | stop, 17 | double? presencePenalty, 18 | double? frequencyPenalty, 19 | int? bestOf, 20 | Map? logitBias, 21 | String? user, 22 | http.Client? client, 23 | }); 24 | 25 | Stream createStream({ 26 | required String model, 27 | prompt, 28 | String? suffix, 29 | int? maxTokens, 30 | double? temperature, 31 | double? topP, 32 | int? n, 33 | int? logprobs, 34 | bool? echo, 35 | stop, 36 | double? presencePenalty, 37 | double? frequencyPenalty, 38 | int? bestOf, 39 | Map? logitBias, 40 | String? user, 41 | http.Client? client, 42 | }); 43 | 44 | Stream createStreamText({ 45 | required String model, 46 | prompt, 47 | String? suffix, 48 | int? maxTokens, 49 | double? temperature, 50 | double? topP, 51 | int? n, 52 | int? logprobs, 53 | bool? echo, 54 | stop, 55 | double? presencePenalty, 56 | double? frequencyPenalty, 57 | int? bestOf, 58 | Map? logitBias, 59 | String? user, 60 | http.Client? client, 61 | }); 62 | } 63 | -------------------------------------------------------------------------------- /lib/src/core/base/model/base.dart: -------------------------------------------------------------------------------- 1 | import '../entity/base.dart'; 2 | import 'interfaces/delete.dart'; 3 | import 'interfaces/list.dart'; 4 | import 'interfaces/one.dart'; 5 | 6 | abstract class OpenAIModelBase 7 | implements 8 | OpenAIEntityBase, 9 | ListInterface, 10 | RetrieveInterface, 11 | DeleteInterface {} 12 | -------------------------------------------------------------------------------- /lib/src/core/base/model/interfaces/delete.dart: -------------------------------------------------------------------------------- 1 | import 'package:http/http.dart' as http; 2 | 3 | abstract class DeleteInterface { 4 | Future delete( 5 | String fineTuneId, { 6 | http.Client? client, 7 | }); 8 | } 9 | -------------------------------------------------------------------------------- /lib/src/core/base/model/interfaces/list.dart: -------------------------------------------------------------------------------- 1 | import '../../../models/model/model.dart'; 2 | 3 | import "package:http/http.dart" as http; 4 | 5 | abstract class ListInterface { 6 | Future> list({ 7 | http.Client? client, 8 | }); 9 | } 10 | -------------------------------------------------------------------------------- /lib/src/core/base/model/interfaces/one.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/src/core/models/model/model.dart'; 2 | 3 | import 'package:http/http.dart' as http; 4 | 5 | abstract class RetrieveInterface { 6 | Future retrieve( 7 | String modelId, { 8 | http.Client? client, 9 | }); 10 | } 11 | -------------------------------------------------------------------------------- /lib/src/core/base/moderations/base.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/src/core/base/entity/interfaces/enpoint.dart'; 2 | 3 | import 'interfaces/create.dart'; 4 | 5 | abstract class OpenAIModerationBase 6 | implements CreateInterface, EndpointInterface {} 7 | -------------------------------------------------------------------------------- /lib/src/core/base/moderations/interfaces/create.dart: -------------------------------------------------------------------------------- 1 | import '../../../models/moderation/moderation.dart'; 2 | 3 | import 'package:http/http.dart' as http; 4 | 5 | abstract class CreateInterface { 6 | Future create({ 7 | required String input, 8 | String? model, 9 | http.Client? client, 10 | }); 11 | } 12 | -------------------------------------------------------------------------------- /lib/src/core/base/openai_client/base.dart: -------------------------------------------------------------------------------- 1 | abstract class OpenAIClientBase {} 2 | -------------------------------------------------------------------------------- /lib/src/core/builder/base_api_url.dart: -------------------------------------------------------------------------------- 1 | import 'package:meta/meta.dart'; 2 | 3 | import '../constants/config.dart'; 4 | 5 | /// This class is responsible for building the API url for all the requests endpoints 6 | @immutable 7 | @internal 8 | abstract class BaseApiUrlBuilder { 9 | /// This is used to build the API url for all the requests, it will return a [String]. 10 | /// 11 | /// 12 | /// [endpoint] is the endpoint of the request. 13 | /// if an [id] is pr =ovided, it will be added to the url as well. 14 | /// if a [query] is provided, it will be added to the url as well. 15 | @internal 16 | static String build(String endpoint, [String? id, String? query]) { 17 | final baseUrl = OpenAIConfig.baseUrl; 18 | final version = OpenAIConfig.version; 19 | final usedEndpoint = _handleEndpointsStarting(endpoint); 20 | 21 | String apiLink = "$baseUrl"; 22 | apiLink += "/$version"; 23 | apiLink += "$usedEndpoint"; 24 | 25 | if (id != null) { 26 | apiLink += "/$id"; 27 | } else if (query != null) { 28 | apiLink += "?$query"; 29 | } 30 | 31 | return apiLink; 32 | } 33 | 34 | // This is used to handle the endpoints that don't start with a slash. 35 | static String _handleEndpointsStarting(String endpoint) { 36 | return endpoint.startsWith("/") ? endpoint : "/$endpoint"; 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /lib/src/core/builder/headers.dart: -------------------------------------------------------------------------------- 1 | import 'package:meta/meta.dart'; 2 | import 'package:dart_openai/src/core/utils/logger.dart'; 3 | 4 | /// {@template headers_builder} 5 | /// This class is responsible for building the headers for all the requests. 6 | /// {@endtemplate} 7 | @immutable 8 | @internal 9 | abstract class HeadersBuilder { 10 | /// {@template headers_builder_api_key} 11 | /// This is used to store the API key if it is set. 12 | /// {@endtemplate} 13 | static String? _apiKey; 14 | 15 | /// {@template headers_builder_organization} 16 | /// This is used to store the organization id if it is set. 17 | /// {@endtemplate} 18 | static String? _organization; 19 | 20 | /// This represents additional hezders to be added in all requests made by the package/ 21 | static Map _additionalHeadersToRequests = {}; 22 | 23 | /// {@macro headers_builder_organization} 24 | @internal 25 | static String? get organization => _organization; 26 | 27 | /// This is used to check if the organization id is set or not. 28 | static bool get isOrganizationSet => organization != null; 29 | 30 | /// {@macro headers_builder_api_key} 31 | @internal 32 | static String? get apiKey => _apiKey; 33 | 34 | @internal 35 | static set organization(String? organizationId) { 36 | _organization = organizationId; 37 | OpenAILogger.logOrganization(_organization); 38 | } 39 | 40 | @internal 41 | static set apiKey(String? apiKey) { 42 | _apiKey = apiKey; 43 | OpenAILogger.logAPIKey(_apiKey); 44 | } 45 | 46 | /// {@macro headers_builder} 47 | /// 48 | /// it will return a [Map]. 49 | /// 50 | /// if the [organization] is set, it will be added to the headers as well. 51 | /// If in anyhow the API key is not set, it will throw an [AssertionError] while debugging. 52 | @internal 53 | static Map build() { 54 | Map headers = { 55 | 'Content-Type': 'application/json', 56 | }; 57 | 58 | assert( 59 | apiKey != null, 60 | """ 61 | You must set the API key before making building any headers for a request.""", 62 | ); 63 | headers = { 64 | ...headers, 65 | ..._additionalHeadersToRequests, 66 | if (isOrganizationSet) 'OpenAI-Organization': organization!, 67 | "Authorization": "Bearer $apiKey", 68 | }; 69 | 70 | return headers; 71 | } 72 | 73 | /// Will save the given [headers] to the [_additionalHeadersToRequests] map. so it will be used in all requests. 74 | @internal 75 | static void includeHeaders(Map headers) { 76 | _additionalHeadersToRequests = { 77 | ..._additionalHeadersToRequests, 78 | ...headers, 79 | }; 80 | 81 | OpenAILogger.logIncludedHeaders(_additionalHeadersToRequests); 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /lib/src/core/constants/config.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/src/core/constants/strings.dart'; 2 | import 'package:dart_openai/src/core/utils/logger.dart'; 3 | import 'package:meta/meta.dart'; 4 | 5 | /// {@template openai_config} 6 | /// This class is responsible about general configs for the SDK such as the base url.. 7 | /// {@endtemplate} 8 | @immutable 9 | @internal 10 | abstract class OpenAIConfig { 11 | /// {@template openai_config_default_requests_timeOut} 12 | /// The default maximum duration a request can take, this will be applied to all requests, defaults to 30 seconds. 13 | /// {@endtemplate} 14 | static final defaultRequestsTimeOut = Duration(seconds: 30); 15 | 16 | /// {@template openai_config_requests_timeOut} 17 | /// The maximum duration a request can take, this will be applied to all requests, defaults to 30 seconds. 18 | /// if you need custom timeout for each method individulaly, consider using the `client` field in each method and pass a custom HTTP client to it. 19 | /// {@endtemplate} 20 | static Duration requestsTimeOut = defaultRequestsTimeOut; 21 | 22 | /// {@template openai_config_base_url} 23 | /// This is base API url for all the requests. 24 | /// {@endtemplate} 25 | static String? _baseUrl; 26 | 27 | /// {@template openai_config_version} 28 | /// This is the version of the API. 29 | /// {@endtemplate} 30 | static String get version => OpenAIStrings.version; 31 | 32 | /// {@macro openai_config_base_url} 33 | @internal 34 | static String get baseUrl { 35 | return _baseUrl ?? OpenAIStrings.defaultBaseUrl; 36 | } 37 | 38 | @internal 39 | static set baseUrl(String? baseUrl) { 40 | _baseUrl = baseUrl; 41 | OpenAILogger.logBaseUrl(_baseUrl); 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /lib/src/core/constants/endpoints.dart: -------------------------------------------------------------------------------- 1 | /// {@template openai_endpoints} 2 | /// The class holding all endpoints for the API that are used. 3 | /// {@endtemplate} 4 | class OpenAIApisEndpoints { 5 | /// none. 6 | final completion = "/completions"; 7 | 8 | /// none. 9 | final audio = "/audio"; 10 | 11 | /// none. 12 | final chat = "/chat/completions"; 13 | 14 | /// none. 15 | final edits = "/edits"; 16 | 17 | /// none. 18 | final embeddings = "/embeddings"; 19 | 20 | /// none. 21 | final files = "/files"; 22 | 23 | /// none. 24 | final fineTunes = "/fine-tunes"; 25 | 26 | /// none. 27 | final images = "/images"; 28 | 29 | /// none. 30 | final models = "/models"; 31 | 32 | /// none. 33 | final moderation = "/moderations"; 34 | 35 | /// {@macro openai_endpoints} 36 | static const OpenAIApisEndpoints _instance = OpenAIApisEndpoints._(); 37 | 38 | /// {@macro openai_endpoints} 39 | static OpenAIApisEndpoints get instance => _instance; 40 | 41 | /// {@macro openai_endpoints} 42 | const OpenAIApisEndpoints._(); 43 | } 44 | -------------------------------------------------------------------------------- /lib/src/core/constants/strings.dart: -------------------------------------------------------------------------------- 1 | import 'package:meta/meta.dart'; 2 | 3 | import 'endpoints.dart'; 4 | 5 | /// {@template openai_strings} 6 | /// This class is responsible for storing general [String] constants. 7 | /// {@endtemplate} 8 | @internal 9 | @immutable 10 | abstract class OpenAIStrings { 11 | /// This is the capitalized version of "openai" name used in the SDK, like the logger name. 12 | static const openai = 'OpenAI'; 13 | 14 | /// This is the version of the API, in case it changes, it will be updated here. 15 | static const version = 'v1'; 16 | 17 | /// This is the default base url for all the requests. 18 | static const defaultBaseUrl = 'https://api.openai.com'; 19 | 20 | /// The verb name for the [GET] method. 21 | static const getMethod = 'GET'; 22 | 23 | /// The verb name for the [POST] method. 24 | static const postMethod = 'POST'; 25 | 26 | /// The identifier and initial value to exclude for stream responses (SSE). 27 | static const streamResponseStart = "data: "; 28 | 29 | /// The identifier and final value to exclude for stream responses (SSE). 30 | static const streamResponseEnd = "[DONE]"; 31 | 32 | /// The name of the error field a failed response will have. 33 | static const errorFieldKey = 'error'; 34 | 35 | /// The name of the message field a failed response will have. 36 | static const messageFieldKey = 'message'; 37 | 38 | /// {@macro openai_endpoints} 39 | static final endpoints = OpenAIApisEndpoints.instance; 40 | } 41 | -------------------------------------------------------------------------------- /lib/src/core/enum.dart: -------------------------------------------------------------------------------- 1 | enum OpenAIImageSize { 2 | size256, 3 | size512, 4 | size1024, 5 | size1792Horizontal, 6 | size1792Vertical 7 | } 8 | 9 | enum OpenAIImageStyle { vivid, natural } 10 | 11 | enum OpenAIImageQuality { hd } 12 | 13 | enum OpenAIImageResponseFormat { url, b64Json } 14 | 15 | enum OpenAIAudioTimestampGranularity { word, segment } 16 | 17 | enum OpenAIAudioResponseFormat { json, text, srt, verbose_json, vtt } 18 | 19 | enum OpenAIAudioSpeechResponseFormat { mp3, opus, aac, flac } 20 | 21 | enum OpenAIChatMessageRole { system, user, assistant, function, tool } 22 | -------------------------------------------------------------------------------- /lib/src/core/exceptions/api_key_not_set.dart: -------------------------------------------------------------------------------- 1 | /// {@template api_key_not_set_exception} 2 | /// This exception is thrown when the API key is not set and the user tries to make a request. 3 | /// {@endtemplate} 4 | class MissingApiKeyException implements Exception { 5 | /// The message to be displayed when the exception is thrown. 6 | final String message; 7 | 8 | /// {@macro api_key_not_set_exception} 9 | MissingApiKeyException(this.message); 10 | 11 | @override 12 | String toString() { 13 | return message; 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /lib/src/core/exceptions/export.dart: -------------------------------------------------------------------------------- 1 | export './api_key_not_set.dart'; 2 | export './request_failure.dart'; 3 | export './unexpected.dart'; 4 | -------------------------------------------------------------------------------- /lib/src/core/exceptions/request_failure.dart: -------------------------------------------------------------------------------- 1 | import 'package:meta/meta.dart'; 2 | 3 | /// {@template http_request_failure_exception} 4 | /// This exception is thrown when a request fails, from the API. 5 | /// {@endtemplate} 6 | @immutable 7 | class RequestFailedException implements Exception { 8 | /// The error message of the request that failed, if any. 9 | final String message; 10 | 11 | /// The status code of the request that failed, if any. 12 | final int statusCode; 13 | 14 | /// {@macro http_request_failure_exception} 15 | RequestFailedException(this.message, this.statusCode); 16 | 17 | @override 18 | String toString() { 19 | return 'RequestFailedException(message: $message, statusCode: $statusCode)'; 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /lib/src/core/exceptions/unexpected.dart: -------------------------------------------------------------------------------- 1 | /// {@template unexpected_response_exception} 2 | /// This exception is thrown when an unexpected non-error response is got, this means that the response from the request does not have an error sign (the error field), but it is not no the success response as well. 3 | /// {@endtemplate} 4 | class OpenAIUnexpectedException implements Exception { 5 | /// The message to be displayed when the exception is thrown. 6 | final String message; 7 | 8 | final String? rawResponseBody; 9 | 10 | /// {@macro unexpected_response_exception} 11 | OpenAIUnexpectedException(this.message, [this.rawResponseBody]); 12 | 13 | @override 14 | String toString() { 15 | return message; 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /lib/src/core/models/audio/audio.dart: -------------------------------------------------------------------------------- 1 | import 'package:meta/meta.dart'; 2 | 3 | /// {@template openai_audio_model} 4 | /// This class represents the audio model of the OpenAI API, which is used and get returned while using the [OpenAIAudio] methods. 5 | /// {@endtemplate} 6 | @immutable 7 | final class OpenAIAudioModel { 8 | /// The text response from the audio requests. 9 | /// This is the only field that is returned from the API. 10 | final String text; 11 | final String? task; 12 | final String? language; 13 | final double? duration; 14 | 15 | final List? words; 16 | final List? segments; 17 | 18 | @override 19 | int get hashCode => text.hashCode; 20 | 21 | /// {@macro openai_audio_model} 22 | const OpenAIAudioModel({ 23 | required this.text, 24 | this.task, 25 | this.language, 26 | this.duration, 27 | this.words, 28 | this.segments, 29 | }); 30 | 31 | /// This is used to convert a [Map] object to a [OpenAIAudioModel] object. 32 | factory OpenAIAudioModel.fromMap(Map json) { 33 | return OpenAIAudioModel( 34 | text: json['text'], 35 | task: json['task'], 36 | language: json['language'], 37 | duration: json['duration'], 38 | words: json['words'] != null 39 | ? List.from(json['words'].map((x) => Word.fromMap(x))) 40 | : null, 41 | segments: json['segments'] != null 42 | ? List.from(json['segments'].map((x) => Segment.fromMap(x))) 43 | : null, 44 | ); 45 | } 46 | 47 | /// This method used to convert the [OpenAIAudioModel] to a [Map] object. 48 | /// 49 | /// could be useful if you want to save an audio response to a database. 50 | Map toMap() { 51 | return { 52 | 'text': text, 53 | if (task != null) 'task': task, 54 | if (language != null) 'language': language, 55 | if (duration != null) 'duration': duration, 56 | if (words != null) 'words': words, 57 | if (segments != null) 'segments': segments, 58 | }; 59 | } 60 | 61 | @override 62 | String toString() { 63 | return 'OpenAIAudioModel(text: $text, task: $task, language: $language, duration: $duration, words: $words, segments: $segments)'; 64 | } 65 | 66 | @override 67 | bool operator ==(Object other) { 68 | if (identical(this, other)) return true; 69 | 70 | return other is OpenAIAudioModel && 71 | other.text == text && 72 | other.task == task && 73 | other.language == language && 74 | other.duration == duration && 75 | other.words == words && 76 | other.segments == segments; 77 | } 78 | } 79 | 80 | final class Word { 81 | final String word; 82 | final double start; 83 | final double end; 84 | 85 | const Word({ 86 | required this.word, 87 | required this.start, 88 | required this.end, 89 | }); 90 | 91 | factory Word.fromMap(Map json) { 92 | return Word( 93 | word: json['word'], 94 | start: json['start'], 95 | end: json['end'], 96 | ); 97 | } 98 | 99 | Map toMap() { 100 | return { 101 | 'word': word, 102 | 'start': start, 103 | 'end': end, 104 | }; 105 | } 106 | 107 | @override 108 | String toString() => 'Word(word: $word, start: $start, end: $end)'; 109 | 110 | @override 111 | bool operator ==(Object other) { 112 | if (identical(this, other)) return true; 113 | 114 | return other is Word && 115 | other.word == word && 116 | other.start == start && 117 | other.end == end; 118 | } 119 | } 120 | 121 | final class Segment { 122 | final int id; 123 | final int seek; 124 | final double start; 125 | final double end; 126 | final String text; 127 | final List tokens; 128 | final double temperature; 129 | final double avg_logprob; 130 | final double compression_ratio; 131 | final double no_speech_prob; 132 | 133 | const Segment({ 134 | required this.id, 135 | required this.seek, 136 | required this.start, 137 | required this.end, 138 | required this.text, 139 | required this.tokens, 140 | required this.temperature, 141 | required this.avg_logprob, 142 | required this.compression_ratio, 143 | required this.no_speech_prob, 144 | }); 145 | 146 | factory Segment.fromMap(Map json) { 147 | return Segment( 148 | id: json['id'], 149 | seek: json['seek'], 150 | start: json['start'], 151 | end: json['end'], 152 | text: json['text'], 153 | tokens: List.from(json['tokens']), 154 | temperature: json['temperature'], 155 | avg_logprob: json['avg_logprob'], 156 | compression_ratio: json['compression_ratio'], 157 | no_speech_prob: json['no_speech_prob'], 158 | ); 159 | } 160 | 161 | Map toMap() { 162 | return { 163 | 'id': id, 164 | 'seek': seek, 165 | 'start': start, 166 | 'end': end, 167 | 'text': text, 168 | 'tokens': tokens, 169 | 'temperature': temperature, 170 | 'avg_logprob': avg_logprob, 171 | 'compression_ratio': compression_ratio, 172 | 'no_speech_prob': no_speech_prob, 173 | }; 174 | } 175 | 176 | @override 177 | String toString() => 178 | 'Segment(id: $id, seek: $seek, start: $start, end: $end, text: $text, tokens: $tokens, temperature: $temperature, avg_logprob: $avg_logprob, compression_ratio: $compression_ratio, no_speech_prob: $no_speech_prob)'; 179 | 180 | @override 181 | bool operator ==(Object other) { 182 | if (identical(this, other)) return true; 183 | 184 | return other is Segment && 185 | other.id == id && 186 | other.seek == seek && 187 | other.start == start && 188 | other.end == end && 189 | other.text == text && 190 | other.tokens == tokens && 191 | other.temperature == temperature && 192 | other.avg_logprob == avg_logprob && 193 | other.compression_ratio == compression_ratio && 194 | other.no_speech_prob == no_speech_prob; 195 | } 196 | } 197 | -------------------------------------------------------------------------------- /lib/src/core/models/chat/chat.dart: -------------------------------------------------------------------------------- 1 | import 'package:collection/collection.dart'; 2 | import 'package:meta/meta.dart'; 3 | 4 | import 'sub_models/choices/choices.dart'; 5 | import 'sub_models/usage.dart'; 6 | 7 | export 'sub_models/usage.dart'; 8 | export 'sub_models/choices/choices.dart'; 9 | export 'stream/chat.dart'; 10 | 11 | /// {@template openai_chat_completion_model} 12 | /// This class represents the chat completion response model of the OpenAI API, which is used and get returned while using the [OpenAIChat] methods. 13 | /// {@endtemplate} 14 | @immutable 15 | final class OpenAIChatCompletionModel { 16 | /// The [id]entifier of the chat completion. 17 | final String id; 18 | 19 | /// The date and time when the chat completion was [created]. 20 | final DateTime created; 21 | 22 | /// The [choices] of the chat completion. 23 | final List choices; 24 | 25 | /// The [usage] of the chat completion. 26 | final OpenAIChatCompletionUsageModel usage; 27 | 28 | /// This fingerprint represents the backend configuration that the model runs with. 29 | final String? systemFingerprint; 30 | 31 | /// Weither the chat completion have at least one choice in [choices]. 32 | bool get haveChoices => choices.isNotEmpty; 33 | 34 | /// Weither the chat completion have system fingerprint. 35 | bool get haveSystemFingerprint => systemFingerprint != null; 36 | 37 | @override 38 | int get hashCode { 39 | return id.hashCode ^ 40 | created.hashCode ^ 41 | choices.hashCode ^ 42 | usage.hashCode ^ 43 | systemFingerprint.hashCode; 44 | } 45 | 46 | /// {@macro openai_chat_completion_model} 47 | const OpenAIChatCompletionModel({ 48 | required this.id, 49 | required this.created, 50 | required this.choices, 51 | required this.usage, 52 | required this.systemFingerprint, 53 | }); 54 | 55 | /// This is used to convert a [Map] object to a [OpenAIChatCompletionModel] object. 56 | factory OpenAIChatCompletionModel.fromMap(Map json) { 57 | return OpenAIChatCompletionModel( 58 | id: json['id'], 59 | created: DateTime.fromMillisecondsSinceEpoch(json['created'] * 1000), 60 | choices: (json['choices'] as List) 61 | .map((choice) => OpenAIChatCompletionChoiceModel.fromMap(choice)) 62 | .toList(), 63 | usage: OpenAIChatCompletionUsageModel.fromMap(json['usage']), 64 | systemFingerprint: json['system_fingerprint'], 65 | ); 66 | } 67 | 68 | /// This is used to convert a [OpenAIChatCompletionModel] object to a [Map] object. 69 | Map toMap() { 70 | return { 71 | "id": id, 72 | "created": created.millisecondsSinceEpoch, 73 | "choices": choices.map((e) => e.toMap()).toList(), 74 | "usage": usage.toMap(), 75 | "system_fingerprint": systemFingerprint, 76 | }; 77 | } 78 | 79 | @override 80 | String toString() { 81 | return 'OpenAIChatCompletionModel(id: $id, created: $created, choices: $choices, usage: $usage, systemFingerprint: $systemFingerprint)'; 82 | } 83 | 84 | @override 85 | bool operator ==(Object other) { 86 | const ListEquality listEquals = ListEquality(); 87 | if (identical(this, other)) return true; 88 | 89 | return other is OpenAIChatCompletionModel && 90 | other.id == id && 91 | other.created == created && 92 | listEquals.equals(other.choices, choices) && 93 | other.usage == usage && 94 | other.systemFingerprint == systemFingerprint; 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /lib/src/core/models/chat/etc/message_adapter.dart: -------------------------------------------------------------------------------- 1 | import '../chat.dart'; 2 | 3 | /// This is a mixin class that contains helper function(s) to adapt old text-based content to the new implementation of the content that accepts a list of content types like images. 4 | mixin class OpenAIMessageDynamicContentFromFieldAdapter { 5 | /// This is a helper function to adapt old text-based content to the new implementation of the content that accepts a list of content types like images.. 6 | static List 7 | dynamicContentFromField( 8 | fieldData, 9 | ) { 10 | if (fieldData is String) { 11 | return _singleItemListFrom(fieldData); 12 | } else if (fieldData is List) { 13 | return _listOfContentItemsFrom(fieldData); 14 | } else { 15 | throw Exception( 16 | 'Invalid content type, nor text or list, please report this issue.', 17 | ); 18 | } 19 | } 20 | 21 | static List 22 | _singleItemListFrom(String directTextContent) { 23 | return [ 24 | OpenAIChatCompletionChoiceMessageContentItemModel.text( 25 | directTextContent, 26 | ), 27 | ]; 28 | } 29 | 30 | static List 31 | _listOfContentItemsFrom(List listOfContentsItems) { 32 | return (listOfContentsItems).map( 33 | (item) { 34 | if (item is! Map) { 35 | throw Exception('Invalid content item, please report this issue.'); 36 | } else { 37 | final asMap = item as Map; 38 | 39 | return OpenAIChatCompletionChoiceMessageContentItemModel.fromMap( 40 | asMap, 41 | ); 42 | } 43 | }, 44 | ).toList(); 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /lib/src/core/models/chat/stream/chat.dart: -------------------------------------------------------------------------------- 1 | import 'package:collection/collection.dart'; 2 | 3 | import 'sub_models/choices/choices.dart'; 4 | 5 | export 'sub_models/choices/choices.dart'; 6 | export 'sub_models/usage.dart'; 7 | 8 | /// {@template openai_stream_chat_completion} 9 | /// The [OpenAIStreamChatCompletionModel] class represents the chat completion response model of the OpenAI API, which is used and get returned while using the chat methods that leverages [Stream] functionality. 10 | /// {@endtemplate} 11 | final class OpenAIStreamChatCompletionModel { 12 | /// The [id] of the chat completion. 13 | final String id; 14 | 15 | /// The date and time when the chat completion is [created]. 16 | final DateTime created; 17 | 18 | /// The [choices] of the chat completion. 19 | final List choices; 20 | 21 | /// This fingerprint represents the backend configuration that the model runs with. 22 | final String? systemFingerprint; 23 | 24 | /// Wether the chat completion have at least one choice in [choices]. 25 | bool get haveChoices => choices.isNotEmpty; 26 | 27 | /// Wether the chat completion have a [systemFingerprint] or not. 28 | bool get haveSystemFingerprint => systemFingerprint != null; 29 | 30 | @override 31 | int get hashCode { 32 | return id.hashCode ^ 33 | created.hashCode ^ 34 | choices.hashCode ^ 35 | systemFingerprint.hashCode; 36 | } 37 | 38 | /// {@macro openai_stream_chat_completion} 39 | const OpenAIStreamChatCompletionModel({ 40 | required this.id, 41 | required this.created, 42 | required this.choices, 43 | required this.systemFingerprint, 44 | }); 45 | 46 | /// {@macro openai_stream_chat_completion} 47 | /// This is used to convert a [Map] object to a [OpenAIStreamChatCompletionModel] object. 48 | factory OpenAIStreamChatCompletionModel.fromMap(Map json) { 49 | return OpenAIStreamChatCompletionModel( 50 | id: json['id'], 51 | created: DateTime.fromMillisecondsSinceEpoch(json['created'] * 1000), 52 | choices: (json['choices'] as List) 53 | .map( 54 | (choice) => OpenAIStreamChatCompletionChoiceModel.fromMap(choice), 55 | ) 56 | .toList(), 57 | systemFingerprint: json['system_fingerprint'], 58 | ); 59 | } 60 | 61 | //! This don't need toMap()? 62 | 63 | @override 64 | String toString() { 65 | return 'OpenAIStreamChatCompletionModel(id: $id, created: $created, choices: $choices, systemFingerprint: $systemFingerprint)'; 66 | } 67 | 68 | @override 69 | bool operator ==(Object other) { 70 | const ListEquality listEquals = ListEquality(); 71 | if (identical(this, other)) return true; 72 | 73 | return other is OpenAIStreamChatCompletionModel && 74 | other.id == id && 75 | other.created == created && 76 | listEquals.equals(other.choices, choices) && 77 | other.systemFingerprint == systemFingerprint; 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /lib/src/core/models/chat/stream/sub_models/choices/choices.dart: -------------------------------------------------------------------------------- 1 | import 'sub_models/delta.dart'; 2 | export "sub_models/delta.dart"; 3 | 4 | /// {@template openai_stream_chat_completion_choice} 5 | /// The [OpenAIStreamChatCompletionChoiceModel] class represents the chat completion choice response model of the OpenAI API, which is used and get returned while using the chat methods that leverages [Stream] functionality. 6 | /// {@endtemplate} 7 | final class OpenAIStreamChatCompletionChoiceModel { 8 | /// The [index] of the choice. 9 | final int index; 10 | 11 | /// The [delta] of the choice. 12 | final OpenAIStreamChatCompletionChoiceDeltaModel delta; 13 | 14 | /// The [finishReason] of the choice. 15 | final String? finishReason; 16 | 17 | /// Weither the choice have a finish reason or not. 18 | bool get hasFinishReason => finishReason != null; 19 | 20 | @override 21 | int get hashCode { 22 | return index.hashCode ^ delta.hashCode ^ finishReason.hashCode; 23 | } 24 | 25 | /// {@macro openai_stream_chat_completion_choice} 26 | const OpenAIStreamChatCompletionChoiceModel({ 27 | required this.index, 28 | required this.delta, 29 | required this.finishReason, 30 | }); 31 | 32 | /// {@macro openai_stream_chat_completion_choice} 33 | factory OpenAIStreamChatCompletionChoiceModel.fromMap( 34 | Map json, 35 | ) { 36 | return OpenAIStreamChatCompletionChoiceModel( 37 | index: json['index'], 38 | delta: OpenAIStreamChatCompletionChoiceDeltaModel.fromMap(json['delta']), 39 | finishReason: json['finish_reason'], 40 | ); 41 | } 42 | 43 | @override 44 | String toString() { 45 | return 'OpenAIStreamChatCompletionChoiceModel(index: $index, delta: $delta, finishReason: $finishReason)'; 46 | } 47 | 48 | @override 49 | bool operator ==(Object other) { 50 | if (identical(this, other)) return true; 51 | 52 | return other is OpenAIStreamChatCompletionChoiceModel && 53 | other.index == index && 54 | other.delta == delta && 55 | other.finishReason == finishReason; 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /lib/src/core/models/chat/stream/sub_models/choices/sub_models/delta.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/dart_openai.dart'; 2 | 3 | import '../../../../etc/message_adapter.dart'; 4 | 5 | /// {@template openai_stream_chat_completion_choice_delta_model} 6 | /// This contains the [role] and [content] of the choice of the chat response. 7 | /// {@endtemplate} 8 | final class OpenAIStreamChatCompletionChoiceDeltaModel { 9 | /// The [role] of the message. 10 | final OpenAIChatMessageRole? role; 11 | 12 | /// The [content] of the message. 13 | final List? content; 14 | 15 | /// The [toolCalls] of the message. 16 | final List? toolCalls; 17 | 18 | /// Weither the message have a role or not. 19 | bool get haveToolCalls => toolCalls != null; 20 | 21 | /// Weither the message have a role or not. 22 | bool get haveRole => role != null; 23 | 24 | /// Weither the message have a content or not. 25 | bool get haveContent => content != null; 26 | 27 | @override 28 | int get hashCode { 29 | return role.hashCode ^ content.hashCode ^ toolCalls.hashCode; 30 | } 31 | 32 | /// {@macro openai_chat_completion_choice_message_model} 33 | const OpenAIStreamChatCompletionChoiceDeltaModel({ 34 | required this.role, 35 | required this.content, 36 | this.toolCalls, 37 | }); 38 | 39 | /// This is used to convert a [Map] object to a [OpenAIChatCompletionChoiceMessageModel] object. 40 | factory OpenAIStreamChatCompletionChoiceDeltaModel.fromMap( 41 | Map json, 42 | ) { 43 | return OpenAIStreamChatCompletionChoiceDeltaModel( 44 | role: json['role'] != null 45 | ? OpenAIChatMessageRole.values 46 | .firstWhere((role) => role.name == json['role']) 47 | : null, 48 | content: json['content'] != null 49 | ? OpenAIMessageDynamicContentFromFieldAdapter.dynamicContentFromField( 50 | json['content'], 51 | ) 52 | : null, 53 | toolCalls: json['tool_calls'] != null 54 | ? (json['tool_calls'] as List) 55 | .map((toolCall) => OpenAIStreamResponseToolCall.fromMap(toolCall)) 56 | .toList() 57 | : null, 58 | ); 59 | } 60 | 61 | /// This method used to convert the [OpenAIChatCompletionChoiceMessageModel] to a [Map] object. 62 | Map toMap() { 63 | return { 64 | "role": role?.name, 65 | "content": content, 66 | "tool_calls": toolCalls?.map((toolCall) => toolCall.toMap()).toList(), 67 | }; 68 | } 69 | 70 | @override 71 | String toString() { 72 | String str = 'OpenAIChatCompletionChoiceMessageModel(' 73 | 'role: $role, ' 74 | 'content: $content, '; 75 | if (toolCalls != null) { 76 | str += 'toolCalls: $toolCalls, '; 77 | } 78 | 79 | str += ')'; 80 | 81 | return str; 82 | } 83 | 84 | @override 85 | bool operator ==(Object other) { 86 | if (identical(this, other)) return true; 87 | 88 | return other is OpenAIStreamChatCompletionChoiceDeltaModel && 89 | other.role == role && 90 | other.content == content && 91 | other.toolCalls == toolCalls; 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /lib/src/core/models/chat/stream/sub_models/usage.dart: -------------------------------------------------------------------------------- 1 | export "choices/choices.dart"; 2 | 3 | /// {@template openai_stream_chat_completion_usage} 4 | /// The [OpenAIStreamChatCompletionUsageModel] class represents the usage model of the OpenAI API, which is used and get returned while using the chat methods that leverages [Stream] functionality. 5 | /// {@endtemplate} 6 | final class OpenAIStreamChatCompletionUsageModel { 7 | /// The number of tokens used for the prompt(s). 8 | final int promptTokens; 9 | 10 | /// The number of tokens used for the chat completion(s). 11 | final int completionTokens; 12 | 13 | /// The total number of tokens used for the chat completion(s). 14 | /// This is the sum of [promptTokens] and [completionTokens]. 15 | final int totalTokens; 16 | 17 | @override 18 | int get hashCode { 19 | return promptTokens.hashCode ^ 20 | completionTokens.hashCode ^ 21 | totalTokens.hashCode; 22 | } 23 | 24 | /// {@macro openai_stream_chat_completion_usage} 25 | const OpenAIStreamChatCompletionUsageModel({ 26 | required this.promptTokens, 27 | required this.completionTokens, 28 | required this.totalTokens, 29 | }); 30 | 31 | /// {@macro openai_stream_chat_completion_usage} 32 | /// This is used to convert a [Map] object to a [OpenAIStreamChatCompletionUsageModel] object. 33 | factory OpenAIStreamChatCompletionUsageModel.fromMap( 34 | Map json, 35 | ) { 36 | return OpenAIStreamChatCompletionUsageModel( 37 | promptTokens: json['prompt_tokens'], 38 | completionTokens: json['completion_tokens'], 39 | totalTokens: json['total_tokens'], 40 | ); 41 | } 42 | 43 | @override 44 | String toString() { 45 | return 'OpenAIChatCompletionUsageModel(promptTokens: $promptTokens, completionTokens: $completionTokens, totalTokens: $totalTokens)'; 46 | } 47 | 48 | @override 49 | bool operator ==(Object other) { 50 | if (identical(this, other)) return true; 51 | 52 | return other is OpenAIStreamChatCompletionUsageModel && 53 | other.promptTokens == promptTokens && 54 | other.completionTokens == completionTokens && 55 | other.totalTokens == totalTokens; 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /lib/src/core/models/chat/sub_models/choices/choices.dart: -------------------------------------------------------------------------------- 1 | import 'sub_models/log_probs/log_probs.dart'; 2 | import 'sub_models/message.dart'; 3 | 4 | /// {@template openai_chat_completion_choice} 5 | /// This class represents a choice of the [OpenAIChatCompletionModel] model of the OpenAI API, which is used and get returned while using the [OpenAIChat] methods. 6 | /// {@endtemplate} 7 | final class OpenAIChatCompletionChoiceModel { 8 | /// The [index] of the choice. 9 | 10 | //! This is dynamic because the API sometimes returns a [String] and sometimes an [int]. 11 | final index; 12 | 13 | /// The [message] of the choice. 14 | final OpenAIChatCompletionChoiceMessageModel message; 15 | 16 | /// The [finishReason] of the choice. 17 | final String? finishReason; 18 | 19 | /// The log probability of the choice. 20 | final OpenAIChatCompletionChoiceLogProbsModel? logprobs; 21 | 22 | /// Weither the choice have a finish reason. 23 | bool get haveFinishReason => finishReason != null; 24 | 25 | @override 26 | int get hashCode { 27 | return index.hashCode ^ message.hashCode ^ finishReason.hashCode; 28 | } 29 | 30 | /// {@macro openai_chat_completion_choice} 31 | const OpenAIChatCompletionChoiceModel({ 32 | required this.index, 33 | required this.message, 34 | required this.finishReason, 35 | required this.logprobs, 36 | }); 37 | 38 | /// This is used to convert a [Map] object to a [OpenAIChatCompletionChoiceModel] object. 39 | factory OpenAIChatCompletionChoiceModel.fromMap(Map json) { 40 | return OpenAIChatCompletionChoiceModel( 41 | //! Here we use the [int.tryParse] method to convert the [String] to an [int] if it's possible, otherwise we use the [String] value. 42 | index: json['index'] is int 43 | ? json['index'] 44 | : int.tryParse(json['index'].toString()) ?? json['index'], 45 | message: OpenAIChatCompletionChoiceMessageModel.fromMap(json['message']), 46 | finishReason: json['finish_reason'], 47 | logprobs: json['logprobs'] != null 48 | ? OpenAIChatCompletionChoiceLogProbsModel.fromMap(json['logprobs']) 49 | : null, 50 | ); 51 | } 52 | 53 | /// This method used to convert the [OpenAIChatCompletionChoiceModel] to a [Map] object. 54 | Map toMap() { 55 | return { 56 | "index": index, 57 | "message": message.toMap(), 58 | "finish_reason": finishReason, 59 | "logprobs": logprobs?.toMap(), 60 | }; 61 | } 62 | 63 | @override 64 | String toString() { 65 | return 'OpenAIChatCompletionChoiceModel(index: $index, message: $message, finishReason: $finishReason)'; 66 | } 67 | 68 | @override 69 | bool operator ==(Object other) { 70 | if (identical(this, other)) return true; 71 | 72 | return other is OpenAIChatCompletionChoiceModel && 73 | other.index == index && 74 | other.message == message && 75 | other.finishReason == finishReason; 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /lib/src/core/models/chat/sub_models/choices/sub_models/log_probs/log_probs.dart: -------------------------------------------------------------------------------- 1 | // ignore_for_file: public_member_api_docs, sort_constructors_first 2 | import 'sub_models/content.dart'; 3 | 4 | class OpenAIChatCompletionChoiceLogProbsModel { 5 | OpenAIChatCompletionChoiceLogProbsModel({ 6 | required this.content, 7 | }); 8 | 9 | final List content; 10 | 11 | factory OpenAIChatCompletionChoiceLogProbsModel.fromMap( 12 | Map json, 13 | ) { 14 | return OpenAIChatCompletionChoiceLogProbsModel( 15 | content: json["content"] != null 16 | ? List.from( 17 | json["content"].map( 18 | (x) => 19 | OpenAIChatCompletionChoiceLogProbsContentModel.fromMap(x), 20 | ), 21 | ) 22 | : [], 23 | ); 24 | } 25 | 26 | Map toMap() { 27 | return { 28 | "content": content.map((x) => x.toMap()).toList(), 29 | }; 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /lib/src/core/models/chat/sub_models/choices/sub_models/log_probs/sub_models/content.dart: -------------------------------------------------------------------------------- 1 | import 'top_prob.dart'; 2 | 3 | class OpenAIChatCompletionChoiceLogProbsContentModel { 4 | final String? token; 5 | 6 | final double? logprob; 7 | 8 | final List? bytes; 9 | 10 | final List? topLogprobs; 11 | 12 | OpenAIChatCompletionChoiceLogProbsContentModel({ 13 | this.token, 14 | this.logprob, 15 | this.bytes, 16 | this.topLogprobs, 17 | }); 18 | 19 | factory OpenAIChatCompletionChoiceLogProbsContentModel.fromMap( 20 | Map map, 21 | ) { 22 | return OpenAIChatCompletionChoiceLogProbsContentModel( 23 | token: map['token'], 24 | logprob: map['logprob'], 25 | bytes: List.from(map['bytes']), 26 | topLogprobs: List.from( 27 | map['top_logprobs']?.map( 28 | (x) => OpenAIChatCompletionChoiceTopLogProbsContentModel.fromMap(x), 29 | ), 30 | ), 31 | ); 32 | } 33 | 34 | Map toMap() { 35 | return { 36 | 'token': token, 37 | 'logprob': logprob, 38 | 'bytes': bytes, 39 | }; 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /lib/src/core/models/chat/sub_models/choices/sub_models/log_probs/sub_models/top_prob.dart: -------------------------------------------------------------------------------- 1 | import 'content.dart'; 2 | 3 | class OpenAIChatCompletionChoiceTopLogProbsContentModel 4 | extends OpenAIChatCompletionChoiceLogProbsContentModel { 5 | OpenAIChatCompletionChoiceTopLogProbsContentModel({ 6 | super.token, 7 | super.logprob, 8 | super.bytes, 9 | }); 10 | 11 | factory OpenAIChatCompletionChoiceTopLogProbsContentModel.fromMap( 12 | Map map, 13 | ) { 14 | return OpenAIChatCompletionChoiceTopLogProbsContentModel( 15 | token: map['token'], 16 | logprob: map['logprob'], 17 | bytes: List.from(map['bytes']), 18 | ); 19 | } 20 | 21 | Map toMap() { 22 | return { 23 | 'token': token, 24 | 'logprob': logprob, 25 | 'bytes': bytes, 26 | }; 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /lib/src/core/models/chat/sub_models/choices/sub_models/message.dart: -------------------------------------------------------------------------------- 1 | // ignore_for_file: public_member_api_docs, sort_constructors_first 2 | import '../../../../../enum.dart'; 3 | import '../../../etc/message_adapter.dart'; 4 | import 'sub_models/content.dart'; 5 | import 'sub_models/tool_call.dart'; 6 | export 'sub_models/content.dart'; 7 | export 'sub_models/tool_call.dart'; 8 | 9 | /// {@template openai_chat_completion_choice_message_model} 10 | /// This represents the message of the [OpenAIChatCompletionChoiceModel] model of the OpenAI API, which is used and get returned while using the [OpenAIChat] methods. 11 | /// {@endtemplate} 12 | final class OpenAIChatCompletionChoiceMessageModel { 13 | /// The [role] of the message. 14 | final OpenAIChatMessageRole role; 15 | 16 | /// The [content] of the message. 17 | final List? content; 18 | 19 | /// The function that the model is requesting to call. 20 | final List? toolCalls; 21 | 22 | /// The message participent name. 23 | final String? name; 24 | 25 | /// Weither the message have tool calls. 26 | bool get haveToolCalls => toolCalls != null; 27 | 28 | /// Weither the message have content. 29 | bool get haveContent => content != null && content!.isNotEmpty; 30 | 31 | @override 32 | int get hashCode { 33 | return role.hashCode ^ content.hashCode ^ toolCalls.hashCode; 34 | } 35 | 36 | /// {@macro openai_chat_completion_choice_message_model} 37 | const OpenAIChatCompletionChoiceMessageModel({ 38 | required this.role, 39 | required this.content, 40 | this.toolCalls, 41 | this.name, 42 | }); 43 | 44 | /// This is used to convert a [Map] object to a [OpenAIChatCompletionChoiceMessageModel] object. 45 | factory OpenAIChatCompletionChoiceMessageModel.fromMap( 46 | Map json, 47 | ) { 48 | return OpenAIChatCompletionChoiceMessageModel( 49 | name: json['name'], 50 | role: OpenAIChatMessageRole.values 51 | .firstWhere((role) => role.name == json['role']), 52 | content: json['content'] != null 53 | ? OpenAIMessageDynamicContentFromFieldAdapter.dynamicContentFromField( 54 | json['content'], 55 | ) 56 | : null, 57 | toolCalls: json['tool_calls'] != null 58 | ? (json['tool_calls'] as List) 59 | .map((toolCall) => OpenAIResponseToolCall.fromMap(toolCall)) 60 | .toList() 61 | : null, 62 | ); 63 | } 64 | 65 | // This method used to convert the [OpenAIChatCompletionChoiceMessageModel] to a [Map] object. 66 | Map toMap() { 67 | return { 68 | "role": role.name, 69 | "content": content?.map((contentItem) => contentItem.toMap()).toList(), 70 | if (toolCalls != null && role == OpenAIChatMessageRole.assistant) 71 | "tool_calls": toolCalls!.map((toolCall) => toolCall.toMap()).toList(), 72 | if (name != null) "name": name, 73 | }; 74 | } 75 | 76 | @override 77 | String toString() { 78 | String str = 'OpenAIChatCompletionChoiceMessageModel(' 79 | 'role: $role, ' 80 | 'content: $content, '; 81 | 82 | if (toolCalls != null) { 83 | str += 'toolCalls: $toolCalls, '; 84 | } 85 | str += ')'; 86 | 87 | return str; 88 | } 89 | 90 | @override 91 | bool operator ==(Object other) { 92 | if (identical(this, other)) return true; 93 | 94 | return other is OpenAIChatCompletionChoiceMessageModel && 95 | other.role == role && 96 | other.content == content && 97 | other.toolCalls == toolCalls; 98 | } 99 | 100 | /// Converts a response function message to a request function message, so that it can be used in the next request. 101 | /// 102 | /// You should pass the response function message's [toolCallId] to this method, since it is required when requesting it. 103 | RequestFunctionMessage asRequestFunctionMessage({ 104 | required String toolCallId, 105 | }) { 106 | return RequestFunctionMessage( 107 | content: this.content, 108 | role: this.role, 109 | toolCallId: toolCallId, 110 | ); 111 | } 112 | } 113 | 114 | /// {@template openai_chat_completion_function_choice_message_model} 115 | /// This represents the message of the [RequestFunctionMessage] model of the OpenAI API, which is used while using the [OpenAIChat] methods, precisely to send a response function message as a request function message for next requests. 116 | /// {@endtemplate} 117 | base class RequestFunctionMessage 118 | extends OpenAIChatCompletionChoiceMessageModel { 119 | /// The [toolCallId] of the message. 120 | final String toolCallId; 121 | 122 | /// {@macro openai_chat_completion_function_choice_message_model} 123 | RequestFunctionMessage({ 124 | required super.role, 125 | required super.content, 126 | required this.toolCallId, 127 | }); 128 | 129 | @override 130 | Map toMap() { 131 | return { 132 | "role": role.name, 133 | "content": content?.map((toolCall) => toolCall.toMap()).toList(), 134 | "tool_call_id": toolCallId, 135 | }; 136 | } 137 | 138 | //! Does this needs fromMap method? 139 | } 140 | -------------------------------------------------------------------------------- /lib/src/core/models/chat/sub_models/choices/sub_models/sub_models/content.dart: -------------------------------------------------------------------------------- 1 | // ignore_for_file: public_member_api_docs, sort_constructors_first 2 | /// {@template openai_chat_completion_choice_message_content_item_model} 3 | /// This represents the content item of the [OpenAIChatCompletionChoiceMessageModel] model of the OpenAI API, which is used in the [OpenAIChat] methods. 4 | /// {@endtemplate} 5 | class OpenAIChatCompletionChoiceMessageContentItemModel { 6 | /// The type of the content item. 7 | final String type; 8 | 9 | /// The text content of the item. 10 | final String? text; 11 | 12 | /// The image url object. 13 | final Map? imageUrl; 14 | 15 | final String? imageBase64; 16 | 17 | @override 18 | int get hashCode => type.hashCode ^ text.hashCode ^ imageUrl.hashCode; 19 | 20 | /// {@macro openai_chat_completion_choice_message_content_item_model} 21 | OpenAIChatCompletionChoiceMessageContentItemModel._({ 22 | required this.type, 23 | this.text, 24 | this.imageUrl, 25 | this.imageBase64, 26 | }); 27 | 28 | /// This is used to convert a [Map] object to a [OpenAIChatCompletionChoiceMessageContentItemModel] object. 29 | factory OpenAIChatCompletionChoiceMessageContentItemModel.fromMap( 30 | Map asMap, 31 | ) { 32 | return OpenAIChatCompletionChoiceMessageContentItemModel._( 33 | type: asMap['type'], 34 | text: asMap['text'], 35 | imageUrl: asMap['image_url'], 36 | imageBase64: asMap['imageBase64'], 37 | ); 38 | } 39 | 40 | /// Represents a text content item factory, which is used to create a text [OpenAIChatCompletionChoiceMessageContentItemModel]. 41 | factory OpenAIChatCompletionChoiceMessageContentItemModel.text(String text) { 42 | return OpenAIChatCompletionChoiceMessageContentItemModel._( 43 | type: 'text', 44 | text: text, 45 | ); 46 | } 47 | 48 | /// Represents a image content item factory, which is used to create a image [OpenAIChatCompletionChoiceMessageContentItemModel]. 49 | factory OpenAIChatCompletionChoiceMessageContentItemModel.imageUrl( 50 | String imageUrl, 51 | ) { 52 | return OpenAIChatCompletionChoiceMessageContentItemModel._( 53 | type: 'image_url', 54 | imageUrl: {'url': imageUrl}, 55 | ); 56 | } 57 | 58 | factory OpenAIChatCompletionChoiceMessageContentItemModel.imageBase64( 59 | String imageBase64, 60 | ) { 61 | return OpenAIChatCompletionChoiceMessageContentItemModel._( 62 | type: 'image_base64', 63 | imageBase64: imageBase64, 64 | ); 65 | } 66 | 67 | /// This method used to convert the [OpenAIChatCompletionChoiceMessageContentItemModel] to a [Map] object. 68 | Map toMap() { 69 | return { 70 | "type": type, 71 | if (text != null) "text": text, 72 | if (imageUrl != null) "image_url": imageUrl, 73 | if (imageBase64 != null) 74 | "image_url": {"url": "data:image/jpeg;base64,${imageBase64}"} 75 | }; 76 | } 77 | 78 | @override 79 | bool operator ==( 80 | covariant OpenAIChatCompletionChoiceMessageContentItemModel other, 81 | ) { 82 | if (identical(this, other)) return true; 83 | 84 | return other.type == type && 85 | other.text == text && 86 | other.imageUrl == imageUrl && 87 | other.imageBase64 == imageBase64; 88 | } 89 | 90 | @override 91 | String toString() => switch (type) { 92 | 'text' => 93 | 'OpenAIChatCompletionChoiceMessageContentItemModel(type: $type, text: $text)', 94 | 'image' => 95 | 'OpenAIChatCompletionChoiceMessageContentItemModel(type: $type, imageUrl: $imageUrl)', 96 | 'image_base64' => 97 | 'OpenAIChatCompletionChoiceMessageContentItemModel(type: $type, imageBase64: $imageBase64)', 98 | _ => 'OpenAIChatCompletionChoiceMessageContentItemModel(type: $type)', 99 | }; 100 | } 101 | -------------------------------------------------------------------------------- /lib/src/core/models/chat/sub_models/choices/sub_models/sub_models/sub_models/response_function_call.dart: -------------------------------------------------------------------------------- 1 | // ignore_for_file: public_member_api_docs, sort_constructors_first 2 | /// {@template openai_chat_completion_response_function_model} 3 | /// This represents the response function of the [OpenAIChatCompletionChoiceMessageModel] model of the OpenAI API, which is used in the [OpenAIChat] methods. 4 | /// {@endtemplate} 5 | class OpenAIResponseFunction { 6 | /// The name of the function. 7 | final String? name; 8 | 9 | /// The arguments of the function. 10 | final arguments; 11 | 12 | //! Not sure if the arguments will always be a Map, if you do confirm it from OpenAI docs please open an issue. 13 | 14 | /// Weither the function have a name or not. 15 | bool get hasName => name != null; 16 | 17 | /// Weither the function have arguments or not. 18 | bool get hasArguments => arguments != null; 19 | 20 | @override 21 | int get hashCode => name.hashCode ^ arguments.hashCode; 22 | 23 | /// {@macro openai_chat_completion_response_function_model} 24 | OpenAIResponseFunction({ 25 | required this.name, 26 | required this.arguments, 27 | }); 28 | 29 | /// This method used to convert a [Map] object to a [OpenAIResponseFunction] object. 30 | factory OpenAIResponseFunction.fromMap(Map map) { 31 | return OpenAIResponseFunction( 32 | name: map['name'], 33 | arguments: map['arguments'], 34 | ); 35 | } 36 | 37 | /// This method used to convert the [OpenAIResponseFunction] to a [Map] object. 38 | Map toMap() { 39 | return { 40 | "name": name, 41 | "arguments": arguments, 42 | }; 43 | } 44 | 45 | @override 46 | String toString() => 47 | 'OpenAIResponseFunction(name: $name, arguments: $arguments)'; 48 | 49 | @override 50 | bool operator ==(covariant OpenAIResponseFunction other) { 51 | if (identical(this, other)) return true; 52 | 53 | return other.name == name && other.arguments == arguments; 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /lib/src/core/models/chat/sub_models/choices/sub_models/sub_models/tool_call.dart: -------------------------------------------------------------------------------- 1 | // ignore_for_file: public_member_api_docs, sort_constructors_first 2 | 3 | import 'sub_models/response_function_call.dart'; 4 | 5 | /// {@template openai_chat_completion_response_tool_call_model} 6 | /// This represents the tool call of the [OpenAIChatCompletionChoiceMessageModel] model of the OpenAI API, which is used and get returned while using the [OpenAIChat] methods. 7 | /// {@endtemplate} 8 | class OpenAIResponseToolCall { 9 | /// The id of the tool call. 10 | final String? id; 11 | 12 | /// The type of the tool call. 13 | final String? type; 14 | 15 | /// The function of the tool call. 16 | final OpenAIResponseFunction function; 17 | 18 | /// Weither the tool call have an id. 19 | bool get haveId => id != null; 20 | 21 | /// Weither the tool call have a type. 22 | bool get haveType => type != null; 23 | 24 | @override 25 | int get hashCode => id.hashCode ^ type.hashCode ^ function.hashCode; 26 | 27 | /// {@macro openai_chat_completion_response_tool_call_model} 28 | OpenAIResponseToolCall({ 29 | required this.id, 30 | required this.type, 31 | required this.function, 32 | }); 33 | 34 | /// This is used to convert a [Map] object to a [OpenAIResponseToolCall] object. 35 | factory OpenAIResponseToolCall.fromMap(Map map) { 36 | return OpenAIResponseToolCall( 37 | id: map['id'], 38 | type: map['type'], 39 | function: OpenAIResponseFunction.fromMap(map['function']), 40 | ); 41 | } 42 | 43 | /// This method used to convert the [OpenAIResponseToolCall] to a [Map] object. 44 | Map toMap() { 45 | return { 46 | "id": id, 47 | "type": type, 48 | "function": function.toMap(), 49 | }; 50 | } 51 | 52 | @override 53 | String toString() { 54 | return "OpenAIResponseToolCall(id: $id,type: $type,function: $function)"; 55 | } 56 | 57 | @override 58 | bool operator ==(covariant OpenAIResponseToolCall other) { 59 | if (identical(this, other)) return true; 60 | 61 | return other.id == id && other.type == type && other.function == function; 62 | } 63 | } 64 | 65 | /// {@template openai_chat_completion_response_stream_tool_call_model} 66 | /// This represents the stream tool call of the [OpenAIChatCompletionChoiceMessageModel] model of the OpenAI API, which is used and get returned while using the [OpenAIChat] methods. 67 | /// {@endtemplate} 68 | class OpenAIStreamResponseToolCall extends OpenAIResponseToolCall { 69 | /// The index of the tool call. 70 | //! please fill an issue if it happen that the index is not an int in some cases. 71 | final int index; 72 | 73 | @override 74 | int get hashCode => super.hashCode ^ index.hashCode; 75 | 76 | /// {@macro openai_chat_completion_response_stream_tool_call_model} 77 | OpenAIStreamResponseToolCall({ 78 | required super.id, 79 | required super.type, 80 | required super.function, 81 | required this.index, 82 | }); 83 | 84 | /// This is used to convert a [Map] object to a [OpenAIStreamResponseToolCall] object. 85 | factory OpenAIStreamResponseToolCall.fromMap(Map map) { 86 | return OpenAIStreamResponseToolCall( 87 | id: map['id'], 88 | type: map['type'], 89 | function: OpenAIResponseFunction.fromMap(map['function']), 90 | index: map['index'], 91 | ); 92 | } 93 | 94 | /// This method used to convert the [OpenAIStreamResponseToolCall] to a [Map] object. 95 | Map toMap() { 96 | return { 97 | "id": id, 98 | "type": type, 99 | "function": function.toMap(), 100 | "index": index, 101 | }; 102 | } 103 | 104 | @override 105 | bool operator ==(covariant OpenAIStreamResponseToolCall other) { 106 | if (identical(this, other)) return true; 107 | 108 | return other.index == index; 109 | } 110 | 111 | @override 112 | String toString() => 'OpenAIStreamResponseToolCall(index: $index})'; 113 | } 114 | -------------------------------------------------------------------------------- /lib/src/core/models/chat/sub_models/usage.dart: -------------------------------------------------------------------------------- 1 | export 'choices/sub_models/message.dart'; 2 | 3 | /// {@template openai_chat_completion_usage_model} 4 | /// This class represents the chat completion usage model of the OpenAI API, which is used and get returned while using the [OpenAIChat] methods. 5 | /// {@endtemplate} 6 | final class OpenAIChatCompletionUsageModel { 7 | /// The number of tokens used for the prompt(s). 8 | final int promptTokens; 9 | 10 | /// The number of tokens used for the chat completion(s). 11 | final int completionTokens; 12 | 13 | /// The total number of tokens used for the chat completion(s). 14 | /// This is the sum of [promptTokens] and [completionTokens]. 15 | final int totalTokens; 16 | 17 | @override 18 | int get hashCode { 19 | return promptTokens.hashCode ^ 20 | completionTokens.hashCode ^ 21 | totalTokens.hashCode; 22 | } 23 | 24 | /// {@macro openai_chat_completion_usage_model} 25 | const OpenAIChatCompletionUsageModel({ 26 | required this.promptTokens, 27 | required this.completionTokens, 28 | required this.totalTokens, 29 | }); 30 | 31 | /// This is used to convert a [Map] object to a [OpenAIChatCompletionUsageModel] object. 32 | factory OpenAIChatCompletionUsageModel.fromMap(Map json) { 33 | return OpenAIChatCompletionUsageModel( 34 | promptTokens: json['prompt_tokens'], 35 | completionTokens: json['completion_tokens'], 36 | totalTokens: json['total_tokens'], 37 | ); 38 | } 39 | 40 | /// This is used to convert a [OpenAIChatCompletionUsageModel] object to a [Map] object. 41 | Map toMap() { 42 | return { 43 | "prompt_tokens": promptTokens, 44 | "completion_tokens": completionTokens, 45 | "total_tokens": totalTokens, 46 | }; 47 | } 48 | 49 | @override 50 | String toString() { 51 | return 'OpenAIChatCompletionUsageModel(promptTokens: $promptTokens, completionTokens: $completionTokens, totalTokens: $totalTokens)'; 52 | } 53 | 54 | @override 55 | bool operator ==(Object other) { 56 | if (identical(this, other)) return true; 57 | 58 | return other is OpenAIChatCompletionUsageModel && 59 | other.promptTokens == promptTokens && 60 | other.completionTokens == completionTokens && 61 | other.totalTokens == totalTokens; 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /lib/src/core/models/completion/completion.dart: -------------------------------------------------------------------------------- 1 | import 'package:collection/collection.dart'; 2 | import "package:meta/meta.dart"; 3 | 4 | import 'sub_models/choice.dart'; 5 | import 'sub_models/usage.dart'; 6 | 7 | export 'sub_models/choice.dart'; 8 | export 'sub_models/usage.dart'; 9 | export 'stream/completion.dart'; 10 | 11 | /// {@template openai_completion_model} 12 | /// This represents the response from a completion request by the [OpenAICompletion] methods. 13 | /// {@endtemplate} 14 | @immutable 15 | final class OpenAICompletionModel { 16 | /// The [id]entifier of the completion. 17 | final String id; 18 | 19 | /// The date the completion was [created]. 20 | final DateTime created; 21 | 22 | /// The [model] used to generate the completion. 23 | final String model; 24 | 25 | /// The [choices] generated by the completion. 26 | final List choices; 27 | 28 | /// The [usage] of the completion, if any. 29 | final OpenAICompletionModelUsage? usage; 30 | 31 | /// The [systemFingerprint] of the completion, if any. 32 | final String? systemFingerprint; 33 | 34 | /// Weither the completion have at least one choice in [choices]. 35 | bool get haveChoices => choices.isNotEmpty; 36 | 37 | /// Weither the completion have system fingerprint. 38 | bool get haveSystemFingerprint => systemFingerprint != null; 39 | 40 | /// Weither the completion have usage information. 41 | bool get haveUsage => usage != null; 42 | 43 | @override 44 | int get hashCode { 45 | return id.hashCode ^ 46 | created.hashCode ^ 47 | model.hashCode ^ 48 | choices.hashCode ^ 49 | usage.hashCode ^ 50 | systemFingerprint.hashCode; 51 | } 52 | 53 | /// {@macro openai_completion_model} 54 | const OpenAICompletionModel({ 55 | required this.id, 56 | required this.created, 57 | required this.model, 58 | required this.choices, 59 | required this.usage, 60 | required this.systemFingerprint, 61 | }); 62 | 63 | /// {@macro openai_completion_model} 64 | /// This method is used to convert a [Map] object to a [OpenAICompletionModel] object. 65 | factory OpenAICompletionModel.fromMap(Map json) { 66 | return OpenAICompletionModel( 67 | id: json['id'], 68 | created: DateTime.fromMillisecondsSinceEpoch(json['created'] * 1000), 69 | model: json['model'], 70 | choices: (json['choices'] as List) 71 | .map((i) => OpenAICompletionModelChoice.fromMap(i)) 72 | .toList(), 73 | usage: json['usage'] != null 74 | ? OpenAICompletionModelUsage.fromMap(json['usage']) 75 | : null, 76 | systemFingerprint: json['system_fingerprint'], 77 | ); 78 | } 79 | 80 | @override 81 | String toString() { 82 | return 'OpenAICompletionModel(id: $id, created: $created, model: $model, choices: $choices, usage: $usage, systemFingerprint: $systemFingerprint)'; 83 | } 84 | 85 | @override 86 | bool operator ==(covariant OpenAICompletionModel other) { 87 | if (identical(this, other)) return true; 88 | final listEquals = const DeepCollectionEquality().equals; 89 | 90 | return other.id == id && 91 | other.created == created && 92 | other.model == model && 93 | listEquals(other.choices, choices) && 94 | other.usage == usage && 95 | other.systemFingerprint == systemFingerprint; 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /lib/src/core/models/completion/stream/completion.dart: -------------------------------------------------------------------------------- 1 | import 'package:collection/collection.dart'; 2 | import 'package:meta/meta.dart'; 3 | 4 | import 'sub_models/choices.dart'; 5 | 6 | export 'sub_models/choices.dart'; 7 | 8 | /// {@template openai_stream_completion_model} 9 | /// This class is used to represent an OpenAI stream completion. 10 | /// {@endtemplate} 11 | @immutable 12 | final class OpenAIStreamCompletionModel { 13 | /// The [id]entifier of the completion. 14 | final String id; 15 | 16 | /// The date the completion was [created]. 17 | final DateTime created; 18 | 19 | /// The [choices] generated by the completion. 20 | final List choices; 21 | 22 | /// The [model] used to generate the completion. 23 | final String model; 24 | 25 | /// The system fingerprint of the completion. 26 | final String? systemFingerprint; 27 | 28 | /// Weither the completion have at least one choice in [choices]. 29 | bool get haveChoices => choices.isNotEmpty; 30 | 31 | /// Weither the completion have a system fingerprint. 32 | bool get haveSystemFingerprint => systemFingerprint != null; 33 | 34 | @override 35 | int get hashCode { 36 | return id.hashCode ^ created.hashCode ^ choices.hashCode ^ model.hashCode; 37 | } 38 | 39 | /// {@macro openai_stream_completion_model} 40 | const OpenAIStreamCompletionModel({ 41 | required this.id, 42 | required this.created, 43 | required this.choices, 44 | required this.model, 45 | required this.systemFingerprint, 46 | }); 47 | 48 | /// {@macro openai_stream_completion_model} 49 | /// This method is used to convert a [Map] object to a [OpenAIStreamCompletionModel] object. 50 | factory OpenAIStreamCompletionModel.fromMap(Map json) { 51 | return OpenAIStreamCompletionModel( 52 | id: json['id'], 53 | created: DateTime.fromMillisecondsSinceEpoch(json['created'] * 1000), 54 | choices: (json['choices'] as List) 55 | .map((e) => OpenAIStreamCompletionModelChoice.fromMap(e)) 56 | .toList(), 57 | model: json['model'], 58 | systemFingerprint: json['system_fingerprint'], 59 | ); 60 | } 61 | 62 | @override 63 | bool operator ==(covariant OpenAIStreamCompletionModel other) { 64 | if (identical(this, other)) return true; 65 | final listEquals = const DeepCollectionEquality().equals; 66 | 67 | return other.id == id && 68 | other.created == created && 69 | listEquals(other.choices, choices) && 70 | other.model == model && 71 | other.systemFingerprint == systemFingerprint; 72 | } 73 | 74 | @override 75 | String toString() { 76 | return 'OpenAIStreamCompletionModel(id: $id, created: $created, choices: $choices, model: $model, systemFingerprint: $systemFingerprint)'; 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /lib/src/core/models/completion/stream/sub_models/choices.dart: -------------------------------------------------------------------------------- 1 | import "package:meta/meta.dart"; 2 | 3 | /// {@template openai_stream_completion_model_choice} 4 | /// This class is used to represent a choice generated by an OpenAI stream completion. 5 | /// {@endtemplate} 6 | @immutable 7 | final class OpenAIStreamCompletionModelChoice { 8 | /// The text generated by the completion. 9 | final String text; 10 | 11 | /// The index of the choice. 12 | final int index; 13 | 14 | /// The log probabilities of the tokens in the completion. 15 | final int? logprobs; 16 | 17 | /// The reason the completion finished. 18 | final String? finishReason; 19 | 20 | /// Weither the choice have log probabilities. 21 | bool get haveLogprobs => logprobs != null; 22 | 23 | /// Weither the choice have a finish reason. 24 | bool get haveFinishReason => finishReason != null; 25 | 26 | @override 27 | int get hashCode { 28 | return text.hashCode ^ 29 | index.hashCode ^ 30 | logprobs.hashCode ^ 31 | finishReason.hashCode; 32 | } 33 | 34 | /// {@macro openai_stream_completion_model_choice} 35 | const OpenAIStreamCompletionModelChoice({ 36 | required this.text, 37 | required this.index, 38 | required this.logprobs, 39 | required this.finishReason, 40 | }); 41 | 42 | /// {@macro openai_stream_completion_model_choice} 43 | /// This method is used to convert a [Map] object to a [OpenAIStreamCompletionModelChoice] object. 44 | factory OpenAIStreamCompletionModelChoice.fromMap( 45 | Map json, 46 | ) { 47 | return OpenAIStreamCompletionModelChoice( 48 | text: json['text'], 49 | index: json['index'], 50 | logprobs: json['logprobs'], 51 | finishReason: json['finishReason'], 52 | ); 53 | } 54 | 55 | @override 56 | String toString() { 57 | return 'OpenAIStreamCompletionModelChoice(text: $text, index: $index, logprobs: $logprobs, finishReason: $finishReason)'; 58 | } 59 | 60 | @override 61 | bool operator ==(covariant OpenAIStreamCompletionModelChoice other) { 62 | if (identical(this, other)) return true; 63 | 64 | return other.text == text && 65 | other.index == index && 66 | other.logprobs == logprobs && 67 | other.finishReason == finishReason; 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /lib/src/core/models/completion/sub_models/choice.dart: -------------------------------------------------------------------------------- 1 | import 'package:meta/meta.dart'; 2 | 3 | /// {@template openai_completion_model_choice_model} 4 | /// This class is used to represent a choice generated by a completion request. 5 | /// {@endtemplate} 6 | @immutable 7 | final class OpenAICompletionModelChoice { 8 | /// The text generated by the completion. 9 | final String text; 10 | 11 | /// The index of the choice. 12 | final int index; 13 | 14 | /// The log probabilities of the tokens in the completion. 15 | final int? logprobs; 16 | 17 | /// The reason the completion finished. 18 | final String? finishReason; 19 | 20 | /// Weither the choice have log probabilities. 21 | bool get haveLogprobs => logprobs != null; 22 | 23 | /// Weither the choice have a finish reason. 24 | bool get haveFinishReason => finishReason != null; 25 | 26 | @override 27 | int get hashCode { 28 | return text.hashCode ^ 29 | index.hashCode ^ 30 | logprobs.hashCode ^ 31 | finishReason.hashCode; 32 | } 33 | 34 | /// {@macro openai_completion_model_choice_model} 35 | const OpenAICompletionModelChoice({ 36 | required this.text, 37 | required this.index, 38 | required this.logprobs, 39 | required this.finishReason, 40 | }); 41 | 42 | /// {@macro openai_completion_model_choice} 43 | /// This method is used to convert a [Map] object to a [OpenAICompletionModelChoice] object. 44 | factory OpenAICompletionModelChoice.fromMap(Map json) { 45 | return OpenAICompletionModelChoice( 46 | text: json['text'], 47 | index: json['index'], 48 | logprobs: json['logprobs'], 49 | finishReason: json['finishReason'], 50 | ); 51 | } 52 | 53 | @override 54 | bool operator ==(covariant OpenAICompletionModelChoice other) { 55 | if (identical(this, other)) return true; 56 | 57 | return other.text == text && 58 | other.index == index && 59 | other.logprobs == logprobs && 60 | other.finishReason == finishReason; 61 | } 62 | 63 | @override 64 | String toString() { 65 | return 'OpenAICompletionModelChoice(text: $text, index: $index, logprobs: $logprobs, finishReason: $finishReason)'; 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /lib/src/core/models/completion/sub_models/usage.dart: -------------------------------------------------------------------------------- 1 | import 'package:meta/meta.dart'; 2 | 3 | /// {@template openai_completion_model_usage} 4 | /// This represents the usage of a completion response. 5 | /// {@endtemplate} 6 | @immutable 7 | final class OpenAICompletionModelUsage { 8 | /// The number of tokens in the prompt. 9 | final int promptTokens; 10 | 11 | /// The number of tokens in the completion. 12 | final int completionTokens; 13 | 14 | /// The total number of tokens in the prompt and completion. 15 | final int totalTokens; 16 | 17 | @override 18 | int get hashCode => 19 | promptTokens.hashCode ^ completionTokens.hashCode ^ totalTokens.hashCode; 20 | 21 | /// {@macro openai_completion_model_usage} 22 | const OpenAICompletionModelUsage({ 23 | required this.promptTokens, 24 | required this.completionTokens, 25 | required this.totalTokens, 26 | }); 27 | 28 | /// {@macro openai_completion_model_usage} 29 | /// This method is used to convert a [Map] object to a [OpenAICompletionModelUsage] object. 30 | factory OpenAICompletionModelUsage.fromMap(Map json) { 31 | return OpenAICompletionModelUsage( 32 | promptTokens: json['prompt_tokens'], 33 | completionTokens: json['completion_tokens'], 34 | totalTokens: json['total_tokens'], 35 | ); 36 | } 37 | 38 | @override 39 | bool operator ==(covariant OpenAICompletionModelUsage other) { 40 | if (identical(this, other)) return true; 41 | 42 | return other.promptTokens == promptTokens && 43 | other.completionTokens == completionTokens && 44 | other.totalTokens == totalTokens; 45 | } 46 | 47 | @override 48 | String toString() => 49 | 'OpenAICompletionModelUsage(promptTokens: $promptTokens, completionTokens: $completionTokens, totalTokens: $totalTokens)'; 50 | } 51 | -------------------------------------------------------------------------------- /lib/src/core/models/edit/edit.dart: -------------------------------------------------------------------------------- 1 | import 'package:collection/collection.dart'; 2 | import 'package:meta/meta.dart'; 3 | import 'sub_models/choice.dart'; 4 | import 'sub_models/usage.dart'; 5 | 6 | export 'sub_models/choice.dart'; 7 | export 'sub_models/usage.dart'; 8 | 9 | /// {@template openai_edit_model} 10 | /// This class is used to represent an OpenAI edit. 11 | /// {@endtemplate} 12 | @immutable 13 | final class OpenAIEditModel { 14 | /// The date the edit was [created]. 15 | final DateTime created; 16 | 17 | /// The [choices] generated by the edit. 18 | final List choices; 19 | 20 | /// The [usage] of the edit, if any. 21 | final OpenAIEditModelUsage? usage; 22 | 23 | /// Weither the edit have at least one choice in [choices]. 24 | bool get haveChoices => choices.isNotEmpty; 25 | 26 | /// Weither the edit have a usage information. 27 | bool get haveUsage => usage != null; 28 | 29 | @override 30 | int get hashCode => created.hashCode ^ choices.hashCode ^ usage.hashCode; 31 | 32 | /// {@macro openai_edit_model} 33 | const OpenAIEditModel({ 34 | required this.created, 35 | required this.choices, 36 | required this.usage, 37 | }); 38 | 39 | ///{@macro openai_edit_model} 40 | /// This method is used to convert a [Map] object to a [OpenAIEditModel] object. 41 | factory OpenAIEditModel.fromMap(Map json) { 42 | return OpenAIEditModel( 43 | created: DateTime.fromMillisecondsSinceEpoch(json['created'] * 1000), 44 | choices: (json['choices'] as List) 45 | .map((e) => OpenAIEditModelChoice.fromMap(e)) 46 | .toList(), 47 | usage: OpenAIEditModelUsage.fromMap(json['usage']), 48 | ); 49 | } 50 | 51 | @override 52 | String toString() => 53 | 'OpenAIEditModel(created: $created, choices: $choices, usage: $usage)'; 54 | 55 | @override 56 | bool operator ==(covariant OpenAIEditModel other) { 57 | if (identical(this, other)) return true; 58 | final listEquals = const DeepCollectionEquality().equals; 59 | 60 | return other.created == created && 61 | listEquals(other.choices, choices) && 62 | other.usage == usage; 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /lib/src/core/models/edit/sub_models/choice.dart: -------------------------------------------------------------------------------- 1 | import 'package:meta/meta.dart'; 2 | 3 | /// {@template openai_edit_model_choice} 4 | /// This class is used to represent a choice generated by an OpenAI edit. 5 | /// {@endtemplate} 6 | @immutable 7 | final class OpenAIEditModelChoice { 8 | /// The text of the choice. 9 | final String text; 10 | 11 | /// The index of the choice. 12 | final int index; 13 | 14 | @override 15 | int get hashCode => text.hashCode ^ index.hashCode; 16 | 17 | /// {@macro openai_edit_model_choice} 18 | const OpenAIEditModelChoice({ 19 | required this.text, 20 | required this.index, 21 | }); 22 | 23 | /// {@macro openai_edit_model_choice} 24 | /// This method is used to convert a [Map] object to a [OpenAIEditModelChoice] object. 25 | factory OpenAIEditModelChoice.fromMap(Map json) { 26 | return OpenAIEditModelChoice( 27 | text: json['text'], 28 | index: json['index'], 29 | ); 30 | } 31 | 32 | @override 33 | bool operator ==(covariant OpenAIEditModelChoice other) { 34 | if (identical(this, other)) return true; 35 | 36 | return other.text == text && other.index == index; 37 | } 38 | 39 | @override 40 | String toString() => 'OpenAIEditModelChoice(text: $text, index: $index)'; 41 | } 42 | -------------------------------------------------------------------------------- /lib/src/core/models/edit/sub_models/usage.dart: -------------------------------------------------------------------------------- 1 | import 'package:meta/meta.dart'; 2 | 3 | /// {@template openai_edit_model_usage} 4 | /// This class is used to represent the usage of an OpenAI completion. 5 | /// {@endtemplate} 6 | @immutable 7 | final class OpenAIEditModelUsage { 8 | /// The number of tokens in the prompt. 9 | final int promptTokens; 10 | 11 | /// The number of tokens in the completion. 12 | final int completionTokens; 13 | 14 | /// The total number of tokens in the prompt and completion. 15 | final int totalTokens; 16 | 17 | @override 18 | int get hashCode => 19 | promptTokens.hashCode ^ completionTokens.hashCode ^ totalTokens.hashCode; 20 | 21 | /// {@macro openai_edit_model_usage} 22 | const OpenAIEditModelUsage({ 23 | required this.promptTokens, 24 | required this.completionTokens, 25 | required this.totalTokens, 26 | }); 27 | 28 | /// {@template openai_edit_model_usage} 29 | /// This method is used to convert a [Map] object to a [OpenAIEditModelUsage] object. 30 | /// {@endtemplate} 31 | factory OpenAIEditModelUsage.fromMap(Map json) { 32 | return OpenAIEditModelUsage( 33 | promptTokens: json['prompt_tokens'], 34 | completionTokens: json['completion_tokens'], 35 | totalTokens: json['total_tokens'], 36 | ); 37 | } 38 | 39 | @override 40 | bool operator ==(covariant OpenAIEditModelUsage other) { 41 | if (identical(this, other)) return true; 42 | 43 | return other.promptTokens == promptTokens && 44 | other.completionTokens == completionTokens && 45 | other.totalTokens == totalTokens; 46 | } 47 | 48 | @override 49 | String toString() => 50 | 'OpenAIEditModelUsage(promptTokens: $promptTokens, completionTokens: $completionTokens, totalTokens: $totalTokens)'; 51 | } 52 | -------------------------------------------------------------------------------- /lib/src/core/models/embedding/embedding.dart: -------------------------------------------------------------------------------- 1 | import 'package:collection/collection.dart'; 2 | import 'package:meta/meta.dart'; 3 | import 'sub-models/data.dart'; 4 | import 'sub-models/usage.dart'; 5 | 6 | export 'sub-models/data.dart'; 7 | export 'sub-models/usage.dart'; 8 | 9 | /// {@template openai_embeddings_model} 10 | /// This class is used to represent an OpenAI embeddings request. 11 | /// {@endtemplate} 12 | @immutable 13 | final class OpenAIEmbeddingsModel { 14 | /// The data returned by the embeddings request. 15 | final List data; 16 | 17 | /// The model used to generate the embeddings. 18 | final String model; 19 | 20 | /// The usage of the embeddings, if any. 21 | final OpenAIEmbeddingsUsageModel? usage; 22 | 23 | /// Weither the embeddings have at least one item in [data]. 24 | bool get haveData => data.isNotEmpty; 25 | 26 | /// Weither the embeddings have a usage information. 27 | bool get haveUsage => usage != null; 28 | 29 | @override 30 | int get hashCode => data.hashCode ^ model.hashCode ^ usage.hashCode; 31 | 32 | /// {@macro openai_embeddings_model} 33 | const OpenAIEmbeddingsModel({ 34 | required this.data, 35 | required this.model, 36 | required this.usage, 37 | }); 38 | 39 | /// {@macro openai_embeddings_model} 40 | /// This method is used to convert a [Map] object to a [OpenAIEmbeddingsModel] object. 41 | factory OpenAIEmbeddingsModel.fromMap(Map map) { 42 | return OpenAIEmbeddingsModel( 43 | data: List.from( 44 | map['data'].map( 45 | (x) => OpenAIEmbeddingsDataModel.fromMap(x as Map), 46 | ), 47 | ), 48 | model: map['model'] as String, 49 | usage: OpenAIEmbeddingsUsageModel.fromMap( 50 | map['usage'] as Map, 51 | ), 52 | ); 53 | } 54 | 55 | @override 56 | bool operator ==(covariant OpenAIEmbeddingsModel other) { 57 | if (identical(this, other)) return true; 58 | final listEquals = const DeepCollectionEquality().equals; 59 | 60 | return listEquals(other.data, data) && 61 | other.model == model && 62 | other.usage == usage; 63 | } 64 | 65 | @override 66 | String toString() => 67 | 'OpenAIEmbeddingsModel(data: $data, model: $model, usage: $usage)'; 68 | } 69 | -------------------------------------------------------------------------------- /lib/src/core/models/embedding/sub-models/data.dart: -------------------------------------------------------------------------------- 1 | import 'package:collection/collection.dart'; 2 | import 'package:meta/meta.dart'; 3 | 4 | /// {@template openai_embeddings_data_model} 5 | /// This class is used to represent the data returned by an OpenAI embeddings request. 6 | /// {@endtemplate} 7 | @immutable 8 | final class OpenAIEmbeddingsDataModel { 9 | /// The embedding of the text. 10 | final List embeddings; 11 | 12 | /// The index of the text. 13 | final int index; 14 | 15 | /// Weither the embeddings have at least one item in [embeddings]. 16 | bool get haveEmbeddings => embeddings.isNotEmpty; 17 | 18 | @override 19 | int get hashCode => embeddings.hashCode ^ index.hashCode; 20 | 21 | /// {@macro openai_embeddings_data_model} 22 | const OpenAIEmbeddingsDataModel({ 23 | required this.embeddings, 24 | required this.index, 25 | }); 26 | 27 | /// {@macro openai_embeddings_data_model} 28 | /// This method is used to convert a [Map] object to a [OpenAIEmbeddingsDataModel] object. 29 | factory OpenAIEmbeddingsDataModel.fromMap(Map map) { 30 | return OpenAIEmbeddingsDataModel( 31 | embeddings: List.from( 32 | (map['embedding'] as List).map( 33 | (e) => e is double ? e : (e as num).toDouble(), 34 | ), 35 | ), 36 | index: map['index'] as int, 37 | ); 38 | } 39 | 40 | @override 41 | bool operator ==(covariant OpenAIEmbeddingsDataModel other) { 42 | if (identical(this, other)) return true; 43 | final listEquals = const DeepCollectionEquality().equals; 44 | 45 | return listEquals(other.embeddings, embeddings) && other.index == index; 46 | } 47 | 48 | @override 49 | String toString() => 50 | 'OpenAIEmbeddingsDataModel(embeddings: $embeddings, index: $index)'; 51 | } 52 | -------------------------------------------------------------------------------- /lib/src/core/models/embedding/sub-models/usage.dart: -------------------------------------------------------------------------------- 1 | import 'package:meta/meta.dart'; 2 | 3 | /// {@template openai_embeddings_usage_model} 4 | /// This class is used to represent the usage of an OpenAI embeddings. 5 | /// {@endtemplate} 6 | @immutable 7 | final class OpenAIEmbeddingsUsageModel { 8 | /// The number of tokens in the prompt. 9 | final int? promptTokens; 10 | 11 | /// The total number of tokens in the prompt and completion. 12 | final int? totalTokens; 13 | 14 | /// Weither the usage have a prompt tokens information. 15 | bool get havePromptTokens => promptTokens != null; 16 | 17 | /// Weither the usage have a total tokens information. 18 | bool get haveTotalTokens => totalTokens != null; 19 | 20 | @override 21 | int get hashCode => promptTokens.hashCode ^ totalTokens.hashCode; 22 | 23 | /// {@macro openai_embeddings_usage_model} 24 | const OpenAIEmbeddingsUsageModel({ 25 | required this.promptTokens, 26 | required this.totalTokens, 27 | }); 28 | 29 | /// {@template openai_embeddings_usage_model} 30 | /// This method is used to convert a [Map] object to a [OpenAIEmbeddingsUsageModel] object. 31 | /// {@endtemplate} 32 | factory OpenAIEmbeddingsUsageModel.fromMap(Map map) { 33 | return OpenAIEmbeddingsUsageModel( 34 | promptTokens: map['prompt_tokens'] as int, 35 | totalTokens: map['total_tokens'] as int, 36 | ); 37 | } 38 | 39 | @override 40 | String toString() => 41 | 'OpenAIEmbeddingsUsageModel(promptTokens: $promptTokens, totalTokens: $totalTokens)'; 42 | 43 | @override 44 | bool operator ==(covariant OpenAIEmbeddingsUsageModel other) { 45 | if (identical(this, other)) return true; 46 | 47 | return other.promptTokens == promptTokens && 48 | other.totalTokens == totalTokens; 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /lib/src/core/models/export.dart: -------------------------------------------------------------------------------- 1 | export 'completion/completion.dart'; 2 | export 'edit/edit.dart'; 3 | export 'embedding/embedding.dart'; 4 | export 'file/file.dart'; 5 | export 'fine_tune/fine_tune.dart'; 6 | export 'fine_tune/sub_models/event.dart'; 7 | export 'tool/tool.dart'; 8 | export 'image/image/image.dart'; 9 | export 'model/model.dart'; 10 | export 'moderation/moderation.dart'; 11 | export '../enum.dart'; 12 | export 'chat/chat.dart'; 13 | export 'audio/audio.dart'; 14 | -------------------------------------------------------------------------------- /lib/src/core/models/file/file.dart: -------------------------------------------------------------------------------- 1 | import 'package:meta/meta.dart'; 2 | 3 | /// {@template openai_file_model} 4 | /// This class is used to represent an OpenAI file. 5 | /// {@endtemplate} 6 | @immutable 7 | final class OpenAIFileModel { 8 | /// The [id]entifier of the file. This is used to reference the file in other API calls. 9 | final String id; 10 | 11 | /// The size of the file in [bytes]. 12 | final int bytes; 13 | 14 | /// The date the file was [created]. 15 | final DateTime createdAt; 16 | 17 | /// The name of the file. 18 | final String fileName; 19 | 20 | /// The [purpose] of the file. 21 | final String purpose; 22 | 23 | @override 24 | int get hashCode { 25 | return id.hashCode ^ 26 | bytes.hashCode ^ 27 | createdAt.hashCode ^ 28 | fileName.hashCode ^ 29 | purpose.hashCode; 30 | } 31 | 32 | /// {@macro openai_file_model} 33 | const OpenAIFileModel({ 34 | required this.id, 35 | required this.bytes, 36 | required this.createdAt, 37 | required this.fileName, 38 | required this.purpose, 39 | }); 40 | 41 | /// {@macro openai_file_model} 42 | /// This method is used to convert a [Map] object to a [OpenAIFileModel] object. 43 | factory OpenAIFileModel.fromMap(Map map) { 44 | return OpenAIFileModel( 45 | id: map['id'] as String, 46 | bytes: map['bytes'] as int, 47 | createdAt: DateTime.fromMillisecondsSinceEpoch(map['created_at'] as int), 48 | fileName: map['filename'] as String, 49 | purpose: map['purpose'] as String, 50 | ); 51 | } 52 | 53 | @override 54 | bool operator ==(covariant OpenAIFileModel other) { 55 | if (identical(this, other)) return true; 56 | 57 | return other.id == id && 58 | other.bytes == bytes && 59 | other.createdAt == createdAt && 60 | other.fileName == fileName && 61 | other.purpose == purpose; 62 | } 63 | 64 | @override 65 | String toString() { 66 | return 'OpenAIFileModel(id: $id, bytes: $bytes, createdAt: $createdAt, fileName: $fileName, purpose: $purpose)'; 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /lib/src/core/models/fine_tune/fine_tune.dart: -------------------------------------------------------------------------------- 1 | import 'package:collection/collection.dart'; 2 | import 'package:meta/meta.dart'; 3 | import 'sub_models/event.dart'; 4 | import 'sub_models/hyper_params.dart'; 5 | import 'sub_models/training_files.dart'; 6 | 7 | export 'sub_models/event.dart'; 8 | export 'sub_models/hyper_params.dart'; 9 | export 'sub_models/training_files.dart'; 10 | export 'stream/fine_tun_event.dart'; 11 | 12 | /// {@template openai_fine_tune_model} 13 | /// This class is used to represent an OpenAI fine-tuning job. 14 | /// {@endtemplate} 15 | @immutable 16 | final class OpenAIFineTuneModel { 17 | /// The [id]entifier of the fine-tuning job. 18 | final String id; 19 | 20 | /// The [model] used for fine-tuning. 21 | final String model; 22 | 23 | /// The date the fine-tuning job was [created]. 24 | final DateTime createdAt; 25 | 26 | /// The [events] generated by the fine-tuning job. 27 | final List? events; 28 | 29 | /// The fine-tuned model. 30 | final String? fineTunedModel; 31 | 32 | /// The hyperparameters used for fine-tuning. 33 | final OpenAIFineTuneHyperParamsModel? hyperparams; 34 | 35 | /// The [id]entifier of the organization that owns the fine-tuning job. 36 | final String? organizationId; 37 | 38 | /// The result files generated by the fine-tuning job. 39 | final List resultFiles; 40 | 41 | /// The status of the fine-tuning job. 42 | final String status; 43 | 44 | /// The validation files used for fine-tuning. 45 | final List? validationFiles; 46 | 47 | /// The training files used for fine-tuning. 48 | final List trainingFiles; 49 | 50 | /// The date the fine-tuning job was last [updated]. 51 | final DateTime? updatedAt; 52 | 53 | /// Weither the fine-tuning job have at least one event in [events]. 54 | bool get haveEvents => events != null; 55 | 56 | /// Weither the fine-tuning job have a fine-tuned model. 57 | bool get haveFineTunedModel => fineTunedModel != null; 58 | 59 | /// Weither the fine-tuning job have hyperparameters. 60 | bool get haveHyperparams => hyperparams != null; 61 | 62 | /// Weither the fine-tuning job have a organization [id]. 63 | bool get haveOrganizationId => organizationId != null; 64 | 65 | /// Weither the fine-tuning job have at least one validation file in [validationFiles]. 66 | bool get haveValidationFiles => validationFiles != null; 67 | 68 | /// Weither the fine-tuning job have at least one training file in [trainingFiles]. 69 | bool get haveTrainingFiles => trainingFiles.isNotEmpty; 70 | 71 | /// Weither the fine-tuning job have a last update date. 72 | bool get haveUpdatedAt => updatedAt != null; 73 | 74 | @override 75 | int get hashCode { 76 | return id.hashCode ^ 77 | model.hashCode ^ 78 | createdAt.hashCode ^ 79 | events.hashCode ^ 80 | fineTunedModel.hashCode ^ 81 | hyperparams.hashCode ^ 82 | organizationId.hashCode ^ 83 | resultFiles.hashCode ^ 84 | status.hashCode ^ 85 | validationFiles.hashCode ^ 86 | trainingFiles.hashCode ^ 87 | updatedAt.hashCode; 88 | } 89 | 90 | /// {@macro openai_fine_tune_model} 91 | const OpenAIFineTuneModel({ 92 | required this.id, 93 | required this.model, 94 | required this.createdAt, 95 | required this.events, 96 | required this.fineTunedModel, 97 | required this.hyperparams, 98 | required this.organizationId, 99 | required this.resultFiles, 100 | required this.status, 101 | required this.validationFiles, 102 | required this.trainingFiles, 103 | required this.updatedAt, 104 | }); 105 | 106 | /// {@macro openai_fine_tune_model} 107 | /// This method is used to convert a [Map] object to a [OpenAIFineTuneModel] object. 108 | factory OpenAIFineTuneModel.fromMap(Map json) { 109 | return OpenAIFineTuneModel( 110 | id: json['id'], 111 | model: json['model'], 112 | createdAt: DateTime.fromMillisecondsSinceEpoch(json['created_at'] * 1000), 113 | events: (json['events'] as List?) 114 | ?.map((e) => OpenAIFineTuneEventModel.fromMap(e)) 115 | .toList(), 116 | fineTunedModel: json['fine_tuned_model'], 117 | hyperparams: OpenAIFineTuneHyperParamsModel.fromMap(json['hyperparams']), 118 | organizationId: json['organization_id'], 119 | resultFiles: 120 | (json['result_files'] as List).map((e) => e.toString()).toList(), 121 | status: json['status'], 122 | validationFiles: 123 | (json['validation_files'] as List).map((e) => e.toString()).toList(), 124 | trainingFiles: (json['training_files'] as List) 125 | .map((e) => OpenAIFineTuneTrainingFilesModel.fromMap(e)) 126 | .toList(), 127 | updatedAt: DateTime.fromMillisecondsSinceEpoch(json['updated_at'] * 1000), 128 | ); 129 | } 130 | 131 | @override 132 | String toString() { 133 | return 'OpenAIFineTuneModel(id: $id, model: $model, createdAt: $createdAt, events: $events, fineTunedModel: $fineTunedModel, hyperparams: $hyperparams, organizationId: $organizationId, resultFiles: $resultFiles, status: $status, validationFiles: $validationFiles, trainingFiles: $trainingFiles, updatedAt: $updatedAt)'; 134 | } 135 | 136 | @override 137 | bool operator ==(covariant OpenAIFineTuneModel other) { 138 | if (identical(this, other)) return true; 139 | final listEquals = const DeepCollectionEquality().equals; 140 | 141 | return other.id == id && 142 | other.model == model && 143 | other.createdAt == createdAt && 144 | listEquals(other.events, events) && 145 | other.fineTunedModel == fineTunedModel && 146 | other.hyperparams == hyperparams && 147 | other.organizationId == organizationId && 148 | listEquals(other.resultFiles, resultFiles) && 149 | other.status == status && 150 | listEquals(other.validationFiles, validationFiles) && 151 | listEquals(other.trainingFiles, trainingFiles) && 152 | other.updatedAt == updatedAt; 153 | } 154 | } 155 | -------------------------------------------------------------------------------- /lib/src/core/models/fine_tune/stream/fine_tun_event.dart: -------------------------------------------------------------------------------- 1 | import 'package:meta/meta.dart'; 2 | 3 | /// {@template openai_fine_tune_event_stream_model} 4 | /// Creates a new instance of [OpenAIFineTuneEventStreamModel]. 5 | /// {@endtemplate} 6 | @immutable 7 | final class OpenAIFineTuneEventStreamModel { 8 | /// The [level] of the event. 9 | final String level; 10 | 11 | /// The [message] of the event. 12 | final String message; 13 | 14 | /// The time the event was [created]. 15 | final DateTime createdAt; 16 | 17 | @override 18 | int get hashCode => level.hashCode ^ message.hashCode ^ createdAt.hashCode; 19 | 20 | /// {@macro openai_fine_tune_event_stream_model} 21 | const OpenAIFineTuneEventStreamModel({ 22 | required this.level, 23 | required this.message, 24 | required this.createdAt, 25 | }); 26 | 27 | /// {@macro openai_fine_tune_event_stream_model} 28 | /// Creates a new instance of [OpenAIFineTuneEventStreamModel] from a [Map]. 29 | factory OpenAIFineTuneEventStreamModel.fromMap(Map json) { 30 | return OpenAIFineTuneEventStreamModel( 31 | level: json['level'] as String, 32 | message: json['message'] as String, 33 | createdAt: DateTime.fromMillisecondsSinceEpoch(json['created_at'] * 1000), 34 | ); 35 | } 36 | 37 | @override 38 | String toString() => 39 | 'OpenAIFineTuneEventStreamModel(level: $level, message: $message, createdAt: $createdAt)'; 40 | 41 | @override 42 | bool operator ==(covariant OpenAIFineTuneEventStreamModel other) { 43 | if (identical(this, other)) return true; 44 | 45 | return other.level == level && 46 | other.message == message && 47 | other.createdAt == createdAt; 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /lib/src/core/models/fine_tune/sub_models/event.dart: -------------------------------------------------------------------------------- 1 | import 'package:meta/meta.dart'; 2 | 3 | /// {@template openai_fine_tune_event_model} 4 | /// This class is used to represent an OpenAI fine-tune event. 5 | /// {@endtemplate} 6 | @immutable 7 | final class OpenAIFineTuneEventModel { 8 | /// The date the event was [created]. 9 | final DateTime createdAt; 10 | 11 | /// The [level] of the event. 12 | final String? level; 13 | 14 | /// The [message] of the event. 15 | final String? message; 16 | 17 | /// Weither the event have a level. 18 | bool get haveLevel => level != null; 19 | 20 | /// Weither the event have a message. 21 | bool get haveMessage => message != null; 22 | 23 | @override 24 | int get hashCode => createdAt.hashCode ^ level.hashCode ^ message.hashCode; 25 | 26 | /// {@macro openai_fine_tune_event_model} 27 | const OpenAIFineTuneEventModel({ 28 | required this.createdAt, 29 | required this.level, 30 | required this.message, 31 | }); 32 | 33 | /// {@macro openai_fine_tune_event_model} 34 | /// This method is used to convert a [Map] object to a [OpenAIFineTuneEventModel] object. 35 | factory OpenAIFineTuneEventModel.fromMap(Map json) { 36 | return OpenAIFineTuneEventModel( 37 | createdAt: DateTime.fromMillisecondsSinceEpoch(json['created_at'] * 1000), 38 | level: json['level'], 39 | message: json['message'], 40 | ); 41 | } 42 | 43 | @override 44 | String toString() => 45 | 'OpenAIFineTuneEventModel(createdAt: $createdAt, level: $level, message: $message)'; 46 | 47 | @override 48 | bool operator ==(covariant OpenAIFineTuneEventModel other) { 49 | if (identical(this, other)) return true; 50 | 51 | return other.createdAt == createdAt && 52 | other.level == level && 53 | other.message == message; 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /lib/src/core/models/fine_tune/sub_models/hyper_params.dart: -------------------------------------------------------------------------------- 1 | import 'package:meta/meta.dart'; 2 | 3 | /// {@template openai_fine_tune_hyper_params_model} 4 | /// This class is used to represent the hyper-parameters used for fine-tuning. 5 | /// {@endtemplate} 6 | @immutable 7 | final class OpenAIFineTuneHyperParamsModel { 8 | /// The batch size used for fine-tuning. 9 | final int? batchSize; 10 | 11 | /// The learning rate multiplier used for fine-tuning. 12 | final double? learningRateMultiplier; 13 | 14 | /// The number of epochs used for fine-tuning. 15 | final int? nEpochs; 16 | 17 | /// The prompt loss weight used for fine-tuning. 18 | final double? promptLossWeight; 19 | 20 | /// Weither the hyper-parameters have a batch size. 21 | bool get haveBatchSize => batchSize != null; 22 | 23 | /// Weither the hyper-parameters have a learning rate multiplier. 24 | bool get haveLearningRateMultiplier => learningRateMultiplier != null; 25 | 26 | /// Weither the hyper-parameters have a number of epochs. 27 | bool get haveNEpochs => nEpochs != null; 28 | 29 | /// Weither the hyper-parameters have a prompt loss weight. 30 | bool get havePromptLossWeight => promptLossWeight != null; 31 | 32 | @override 33 | int get hashCode { 34 | return batchSize.hashCode ^ 35 | learningRateMultiplier.hashCode ^ 36 | nEpochs.hashCode ^ 37 | promptLossWeight.hashCode; 38 | } 39 | 40 | /// {@macro openai_fine_tune_hyper_params_model} 41 | const OpenAIFineTuneHyperParamsModel({ 42 | required this.batchSize, 43 | required this.learningRateMultiplier, 44 | required this.nEpochs, 45 | required this.promptLossWeight, 46 | }); 47 | 48 | /// {@template openai_fine_tune_hyper_params_model_fromMap} 49 | /// This method is used to convert a [Map] object to a [OpenAIFineTuneHyperParamsModel] object. 50 | /// {@endtemplate} 51 | factory OpenAIFineTuneHyperParamsModel.fromMap(Map json) { 52 | return OpenAIFineTuneHyperParamsModel( 53 | batchSize: json['batch_size'], 54 | learningRateMultiplier: json['learning_rate_multiplier'], 55 | nEpochs: json['n_epochs'], 56 | promptLossWeight: json['prompt_loss_weight'], 57 | ); 58 | } 59 | 60 | @override 61 | String toString() { 62 | return 'OpenAIFineTuneHyperParamsModel(batchSize: $batchSize, learningRateMultiplier: $learningRateMultiplier, nEpochs: $nEpochs, promptLossWeight: $promptLossWeight)'; 63 | } 64 | 65 | @override 66 | bool operator ==(covariant OpenAIFineTuneHyperParamsModel other) { 67 | if (identical(this, other)) return true; 68 | 69 | return other.batchSize == batchSize && 70 | other.learningRateMultiplier == learningRateMultiplier && 71 | other.nEpochs == nEpochs && 72 | other.promptLossWeight == promptLossWeight; 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /lib/src/core/models/fine_tune/sub_models/training_files.dart: -------------------------------------------------------------------------------- 1 | import 'package:meta/meta.dart'; 2 | 3 | /// {@template openai_fine_tune_training_files_model} 4 | /// This class is used to represent an OpenAI fine-tune training file. 5 | /// {@endtemplate} 6 | @immutable 7 | final class OpenAIFineTuneTrainingFilesModel { 8 | /// The [id]entifier of the file. 9 | final String id; 10 | 11 | /// The size of the file in [bytes]. 12 | final int bytes; 13 | 14 | /// The time the file was [created]. 15 | final DateTime createdAt; 16 | 17 | /// The name of the file. 18 | final String filename; 19 | 20 | /// The [purpose] of the file. 21 | final String? purpose; 22 | 23 | /// Weither the file have a purpose. 24 | bool get havePurpose => purpose != null; 25 | 26 | @override 27 | int get hashCode { 28 | return id.hashCode ^ 29 | bytes.hashCode ^ 30 | createdAt.hashCode ^ 31 | filename.hashCode ^ 32 | purpose.hashCode; 33 | } 34 | 35 | /// {@macro openai_fine_tune_training_files_model} 36 | const OpenAIFineTuneTrainingFilesModel({ 37 | required this.id, 38 | required this.bytes, 39 | required this.createdAt, 40 | required this.filename, 41 | required this.purpose, 42 | }); 43 | 44 | /// {@macro openai_fine_tune_training_files_model} 45 | /// This method is used to convert a [Map] object to a [OpenAIFineTuneTrainingFilesModel] object. 46 | factory OpenAIFineTuneTrainingFilesModel.fromMap(Map json) { 47 | return OpenAIFineTuneTrainingFilesModel( 48 | id: json['id'], 49 | bytes: json['bytes'], 50 | createdAt: DateTime.fromMillisecondsSinceEpoch(json['created_at'] * 1000), 51 | filename: json['filename'], 52 | purpose: json['purpose'], 53 | ); 54 | } 55 | 56 | @override 57 | String toString() { 58 | return 'OpenAIFineTuneTrainingFilesModel(id: $id, bytes: $bytes, createdAt: $createdAt, filename: $filename, purpose: $purpose)'; 59 | } 60 | 61 | @override 62 | bool operator ==(covariant OpenAIFineTuneTrainingFilesModel other) { 63 | if (identical(this, other)) return true; 64 | 65 | return other.id == id && 66 | other.bytes == bytes && 67 | other.createdAt == createdAt && 68 | other.filename == filename && 69 | other.purpose == purpose; 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /lib/src/core/models/image/image/image.dart: -------------------------------------------------------------------------------- 1 | import 'package:collection/collection.dart'; 2 | import 'package:meta/meta.dart'; 3 | import 'sub_models/data.dart'; 4 | 5 | export 'sub_models/data.dart'; 6 | 7 | @immutable 8 | final class OpenAIImageModel { 9 | /// The time the image was [created]. 10 | final DateTime created; 11 | 12 | /// The data of the image. 13 | final List data; 14 | 15 | /// Weither the image have some [data]. 16 | bool get haveData => data.isNotEmpty; 17 | 18 | @override 19 | int get hashCode => created.hashCode ^ data.hashCode; 20 | 21 | /// This class is used to represent an OpenAI image. 22 | const OpenAIImageModel({ 23 | required this.created, 24 | required this.data, 25 | }); 26 | 27 | /// This method is used to convert a [Map] object to a [OpenAIImageModel] object. 28 | factory OpenAIImageModel.fromMap(Map json) { 29 | return OpenAIImageModel( 30 | created: DateTime.fromMillisecondsSinceEpoch(json['created'] * 1000), 31 | data: (json['data'] as List) 32 | .map((e) => OpenAIImageData.fromMap(e)) 33 | .toList(), 34 | ); 35 | } 36 | 37 | @override 38 | String toString() => 'OpenAIImageModel(created: $created, data: $data)'; 39 | 40 | @override 41 | bool operator ==(covariant OpenAIImageModel other) { 42 | if (identical(this, other)) return true; 43 | final listEquals = const DeepCollectionEquality().equals; 44 | 45 | return other.created == created && listEquals(other.data, data); 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /lib/src/core/models/image/image/sub_models/data.dart: -------------------------------------------------------------------------------- 1 | import 'package:meta/meta.dart'; 2 | 3 | /// {@template openai_image_data_model} 4 | /// This class is used to represent an OpenAI image data. 5 | /// {@endtemplate} 6 | @immutable 7 | final class OpenAIImageData { 8 | /// The [URL] of the image. 9 | final String? url; 10 | 11 | /// The [b64Json] data. 12 | final String? b64Json; 13 | 14 | /// The revised prompt. 15 | final String? revisedPrompt; 16 | 17 | /// Weither the image have a [URL] result. 18 | bool get haveUrl => url != null; 19 | 20 | /// Weither the image have a [b64Json] result. 21 | bool get haveB64Json => b64Json != null; 22 | 23 | /// Weither the image have a revised prompt. 24 | bool get haveRevisedPrompt => revisedPrompt != null; 25 | 26 | @override 27 | int get hashCode => url.hashCode ^ b64Json.hashCode ^ revisedPrompt.hashCode; 28 | 29 | /// This class is used to represent an OpenAI image data. 30 | const OpenAIImageData({ 31 | required this.url, 32 | required this.b64Json, 33 | required this.revisedPrompt, 34 | }); 35 | 36 | /// This method is used to convert a [Map] object to a [OpenAIImageData] object. 37 | factory OpenAIImageData.fromMap(Map json) { 38 | return OpenAIImageData( 39 | url: json['url'], 40 | b64Json: json['b64_json'], 41 | revisedPrompt: json['revised_prompt'], 42 | ); 43 | } 44 | 45 | @override 46 | bool operator ==(covariant OpenAIImageData other) { 47 | if (identical(this, other)) return true; 48 | 49 | return other.url == url && 50 | other.b64Json == b64Json && 51 | other.revisedPrompt == revisedPrompt; 52 | } 53 | 54 | @override 55 | String toString() => 56 | 'OpenAIImageData(url: $url, b64Json: $b64Json, revisedPrompt: $revisedPrompt)'; 57 | } 58 | -------------------------------------------------------------------------------- /lib/src/core/models/model/model.dart: -------------------------------------------------------------------------------- 1 | import 'package:collection/collection.dart'; 2 | import 'package:meta/meta.dart'; 3 | 4 | import 'sub_models/permission.dart'; 5 | 6 | /// {@template openai_model_model} 7 | /// This class is used to represent an OpenAI model. 8 | /// {@endtemplate} 9 | @immutable 10 | final class OpenAIModelModel { 11 | /// The [id]entifier of the model. 12 | final String id; 13 | 14 | /// The name of the organization that owns the model. 15 | final String ownedBy; 16 | 17 | /// The [permission]s of the model. 18 | final List? permission; 19 | 20 | /// Weither the model have at least one permission in [permission]. 21 | bool get havePermission => permission != null; 22 | 23 | @override 24 | int get hashCode => id.hashCode ^ ownedBy.hashCode ^ permission.hashCode; 25 | 26 | /// {@macro openai_model_model} 27 | const OpenAIModelModel({ 28 | required this.id, 29 | required this.ownedBy, 30 | required this.permission, 31 | }); 32 | 33 | /// This method is used to convert a [Map] object to a [OpenAIModelModel] object. 34 | factory OpenAIModelModel.fromMap(Map json) { 35 | // Perform a null check, and if 'permission' is null, use an empty list or null. 36 | final permissionJson = json['permission'] as List?; 37 | final permissions = permissionJson != null 38 | ? permissionJson 39 | .map((e) => 40 | OpenAIModelModelPermission.fromMap(e as Map)) 41 | .toList() 42 | : []; 43 | 44 | return OpenAIModelModel( 45 | id: json['id'], 46 | ownedBy: json['owned_by'], 47 | permission: permissions, 48 | ); 49 | } 50 | 51 | @override 52 | String toString() => 53 | 'OpenAIModelModel(id: $id, ownedBy: $ownedBy, permission: $permission)'; 54 | 55 | @override 56 | bool operator ==(covariant OpenAIModelModel other) { 57 | if (identical(this, other)) return true; 58 | final listEquals = const DeepCollectionEquality().equals; 59 | 60 | return other.id == id && 61 | other.ownedBy == ownedBy && 62 | listEquals(other.permission, permission); 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /lib/src/core/models/model/sub_models/permission.dart: -------------------------------------------------------------------------------- 1 | import 'package:meta/meta.dart'; 2 | 3 | /// {@template openai_model_model_permission} 4 | /// This class is used to represent an OpenAI model permission. 5 | /// {@endtemplate} 6 | @immutable 7 | final class OpenAIModelModelPermission { 8 | /// The [id]entifier of the permission. 9 | final String? id; 10 | 11 | /// The time the permission was [created]. 12 | final DateTime? created; 13 | 14 | /// Whether the permission allows the user to create engines. 15 | final bool? allowCreateEngine; 16 | 17 | /// Whether the permission allows the user to sample from the model. 18 | final bool? allowSampling; 19 | 20 | /// Whether the permission allows the user to view logprobs. 21 | final bool? allowLogprobs; 22 | 23 | /// Whether the permission allows the user to search indices. 24 | final bool? allowSearchIndices; 25 | 26 | /// Whether the permission allows the user to view the model. 27 | final bool? allowView; 28 | 29 | /// Whether the permission allows the user to fine-tune the model. 30 | final bool? allowFineTuning; 31 | 32 | /// The organization of the permission. 33 | final String? organization; 34 | 35 | /// The group of the permission. 36 | final String? group; 37 | 38 | /// Whether the permission is blocking. 39 | final bool? isBlocking; 40 | 41 | @override 42 | int get hashCode { 43 | return id.hashCode ^ 44 | created.hashCode ^ 45 | allowCreateEngine.hashCode ^ 46 | allowSampling.hashCode ^ 47 | allowLogprobs.hashCode ^ 48 | allowSearchIndices.hashCode ^ 49 | allowView.hashCode ^ 50 | allowFineTuning.hashCode ^ 51 | organization.hashCode ^ 52 | group.hashCode ^ 53 | isBlocking.hashCode; 54 | } 55 | 56 | /// This class is used to represent an OpenAI model permission, it's used in [OpenAIModelModel]. 57 | const OpenAIModelModelPermission({ 58 | this.id, 59 | this.created, 60 | this.allowCreateEngine, 61 | this.allowSampling, 62 | this.allowLogprobs, 63 | this.allowSearchIndices, 64 | this.allowView, 65 | this.allowFineTuning, 66 | this.organization, 67 | this.group, 68 | this.isBlocking, 69 | }); 70 | 71 | /// This method is used to convert a [Map] object to a [OpenAIModelModelPermission] object. 72 | factory OpenAIModelModelPermission.fromMap(Map json) { 73 | return OpenAIModelModelPermission( 74 | id: json['id'], 75 | created: 76 | DateTime.fromMillisecondsSinceEpoch((json['created'] ?? 0) * 1000), 77 | allowCreateEngine: json['allow_create_engine'], 78 | allowSampling: json['allow_sampling'], 79 | allowLogprobs: json['allow_logprobs'], 80 | allowSearchIndices: json['allow_search_indices'], 81 | allowView: json['allow_view'], 82 | allowFineTuning: json['allow_fine_tuning'], 83 | organization: json['organization'], 84 | group: json['group'], 85 | isBlocking: json['is_blocking'], 86 | ); 87 | } 88 | 89 | @override 90 | String toString() { 91 | return 'OpenAIModelModelPermission(id: $id, created: $created, allowCreateEngine: $allowCreateEngine, allowSampling: $allowSampling, allowLogprobs: $allowLogprobs, allowSearchIndices: $allowSearchIndices, allowView: $allowView, allowFineTuning: $allowFineTuning, organization: $organization, group: $group, isBlocking: $isBlocking)'; 92 | } 93 | 94 | @override 95 | bool operator ==(covariant OpenAIModelModelPermission other) { 96 | if (identical(this, other)) return true; 97 | 98 | return other.id == id && 99 | other.created == created && 100 | other.allowCreateEngine == allowCreateEngine && 101 | other.allowSampling == allowSampling && 102 | other.allowLogprobs == allowLogprobs && 103 | other.allowSearchIndices == allowSearchIndices && 104 | other.allowView == allowView && 105 | other.allowFineTuning == allowFineTuning && 106 | other.organization == organization && 107 | other.group == group && 108 | other.isBlocking == isBlocking; 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /lib/src/core/models/moderation/moderation.dart: -------------------------------------------------------------------------------- 1 | // ignore_for_file: public_member_api_docs, sort_constructors_first 2 | import 'package:collection/collection.dart'; 3 | import 'package:meta/meta.dart'; 4 | 5 | import 'sub_models/result.dart'; 6 | 7 | export 'sub_models/result.dart'; 8 | 9 | /// {@template openai_moderation_model} 10 | /// This class is used to represent an OpenAI moderation job. 11 | /// {@endtemplate} 12 | @immutable 13 | final class OpenAIModerationModel { 14 | /// The [id]entifier of the moderation job. 15 | final String id; 16 | 17 | /// The [model] used for moderation. 18 | final String model; 19 | 20 | /// The [results] of the moderation job. 21 | final List results; 22 | 23 | /// Weither the moderation job have at least one result in [results]. 24 | bool get haveResults => results.isNotEmpty; 25 | 26 | @override 27 | int get hashCode => id.hashCode ^ model.hashCode ^ results.hashCode; 28 | 29 | /// {@macro openai_moderation_model} 30 | const OpenAIModerationModel({ 31 | required this.id, 32 | required this.model, 33 | required this.results, 34 | }); 35 | 36 | /// This method is used to convert a [Map] object to a [OpenAIModerationModel] object. 37 | factory OpenAIModerationModel.fromMap(Map json) { 38 | return OpenAIModerationModel( 39 | id: json['id'], 40 | model: json['model'], 41 | results: List.from( 42 | json['results'].map( 43 | (x) => OpenAIModerationResultModel.fromMap(x), 44 | ), 45 | ), 46 | ); 47 | } 48 | 49 | @override 50 | String toString() => 51 | 'OpenAIModerationModel(id: $id, model: $model, results: $results)'; 52 | 53 | @override 54 | bool operator ==(covariant OpenAIModerationModel other) { 55 | if (identical(this, other)) return true; 56 | final listEquals = const DeepCollectionEquality().equals; 57 | 58 | return other.id == id && 59 | other.model == model && 60 | listEquals(other.results, results); 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /lib/src/core/models/moderation/sub_models/catgeories.dart: -------------------------------------------------------------------------------- 1 | import 'package:meta/meta.dart'; 2 | 3 | /// {@template openai_moderation_result_categories_model} 4 | /// This class is used to represent an OpenAI moderation job result categories. 5 | /// {@endtemplate} 6 | @immutable 7 | final class OpenAIModerationResultCategoriesModel { 8 | /// The hate category. 9 | final bool hate; 10 | 11 | /// The hate and threatening category. 12 | final bool hateAndThreatening; 13 | 14 | /// The self harm category. 15 | final bool selfHarm; 16 | 17 | /// The sexual category. 18 | final bool sexual; 19 | 20 | /// The sexual and minors category. 21 | final bool sexualAndMinors; 22 | 23 | /// The violence category. 24 | final bool violence; 25 | 26 | /// The violence and graphic category. 27 | final bool violenceAndGraphic; 28 | 29 | /// Whether hate is detected or not. 30 | bool get isHate => hate; 31 | 32 | /// Whether hate and threatening is detected or not. 33 | bool get isHateAndThreatening => hateAndThreatening; 34 | 35 | /// Whether self harm is detected or not. 36 | bool get isSelfHarm => selfHarm; 37 | 38 | /// Whether sexual is detected or not. 39 | bool get isSexual => sexual; 40 | 41 | /// Whether sexual and minors is detected or not. 42 | bool get isSexualAndMinors => sexualAndMinors; 43 | 44 | /// Whether violence is detected or not. 45 | bool get isViolence => violence; 46 | 47 | /// Whether violence and graphic is detected or not. 48 | bool get isViolenceAndGraphic => violenceAndGraphic; 49 | 50 | /// Whether the moderation request is safe or not. 51 | bool get isSafe => 52 | !hate && 53 | !hateAndThreatening && 54 | !selfHarm && 55 | !sexual && 56 | !sexualAndMinors && 57 | !violence && 58 | !violenceAndGraphic; 59 | 60 | /// Whether the moderation request is not safe or not. 61 | bool get isNotSafe => !isSafe; 62 | 63 | @override 64 | int get hashCode { 65 | return hate.hashCode ^ 66 | hateAndThreatening.hashCode ^ 67 | selfHarm.hashCode ^ 68 | sexual.hashCode ^ 69 | sexualAndMinors.hashCode ^ 70 | violence.hashCode ^ 71 | violenceAndGraphic.hashCode; 72 | } 73 | 74 | /// This class is used to represent an OpenAI moderation job result categories. 75 | const OpenAIModerationResultCategoriesModel({ 76 | required this.hate, 77 | required this.hateAndThreatening, 78 | required this.selfHarm, 79 | required this.sexual, 80 | required this.sexualAndMinors, 81 | required this.violence, 82 | required this.violenceAndGraphic, 83 | }); 84 | 85 | /// This method is used to convert a [Map] object to a [OpenAIModerationResultCategoriesModel] object. 86 | factory OpenAIModerationResultCategoriesModel.fromMap( 87 | Map json, 88 | ) { 89 | return OpenAIModerationResultCategoriesModel( 90 | hate: json['hate'], 91 | hateAndThreatening: json['hate/threatening'], 92 | selfHarm: json['self-harm'], 93 | sexual: json['sexual'], 94 | sexualAndMinors: json['sexual/minors'], 95 | violence: json['violence'], 96 | violenceAndGraphic: json['violence/graphic'], 97 | ); 98 | } 99 | 100 | @override 101 | String toString() { 102 | return 'OpenAIModerationResultCategoriesModel(hate: $hate, hateAndThreatening: $hateAndThreatening, selfHarm: $selfHarm, sexual: $sexual, sexualAndMinors: $sexualAndMinors, violence: $violence, violenceAndGraphic: $violenceAndGraphic)'; 103 | } 104 | 105 | @override 106 | bool operator ==(covariant OpenAIModerationResultCategoriesModel other) { 107 | if (identical(this, other)) return true; 108 | 109 | return other.hate == hate && 110 | other.hateAndThreatening == hateAndThreatening && 111 | other.selfHarm == selfHarm && 112 | other.sexual == sexual && 113 | other.sexualAndMinors == sexualAndMinors && 114 | other.violence == violence && 115 | other.violenceAndGraphic == violenceAndGraphic; 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /lib/src/core/models/moderation/sub_models/catgeories_scores.dart: -------------------------------------------------------------------------------- 1 | import 'package:meta/meta.dart'; 2 | 3 | /// {@template openai_moderation_result_scores_model} 4 | /// This class is used to represent an OpenAI moderation job result scores. 5 | /// {@endtemplate} 6 | @immutable 7 | final class OpenAIModerationResultScoresModel { 8 | /// The hate score of the moderation job. 9 | final double hate; 10 | 11 | /// The hate and threatening score of the moderation job. 12 | final double hateAndThreatening; 13 | 14 | /// The self harm score of the moderation job. 15 | final double selfHarm; 16 | 17 | /// The sexual score of the moderation job. 18 | final double sexual; 19 | 20 | /// The sexual and minors score of the moderation job. 21 | final double sexualAndMinors; 22 | 23 | /// The violence score of the moderation job. 24 | final double violence; 25 | 26 | /// The violence and graphic score of the moderation job. 27 | final double violenceAndGraphic; 28 | 29 | @override 30 | int get hashCode { 31 | return hate.hashCode ^ 32 | hateAndThreatening.hashCode ^ 33 | selfHarm.hashCode ^ 34 | sexual.hashCode ^ 35 | sexualAndMinors.hashCode ^ 36 | violence.hashCode ^ 37 | violenceAndGraphic.hashCode; 38 | } 39 | 40 | /// This class is used to represent an OpenAI moderation job result scores. 41 | const OpenAIModerationResultScoresModel({ 42 | required this.hate, 43 | required this.hateAndThreatening, 44 | required this.selfHarm, 45 | required this.sexual, 46 | required this.sexualAndMinors, 47 | required this.violence, 48 | required this.violenceAndGraphic, 49 | }); 50 | 51 | /// This method is used to convert a [Map] object to a [OpenAIModerationResultScoresModel] object. 52 | factory OpenAIModerationResultScoresModel.fromMap( 53 | Map json, 54 | ) { 55 | return OpenAIModerationResultScoresModel( 56 | hate: json['hate'], 57 | hateAndThreatening: json['hate/threatening'], 58 | selfHarm: json['self-harm'], 59 | sexual: json['sexual'], 60 | sexualAndMinors: json['sexual/minors'], 61 | violence: json['violence'], 62 | violenceAndGraphic: json['violence/graphic'], 63 | ); 64 | } 65 | 66 | @override 67 | String toString() { 68 | return 'OpenAIModerationResultScoresModel(hate: $hate, hateAndThreatening: $hateAndThreatening, selfHarm: $selfHarm, sexual: $sexual, sexualAndMinors: $sexualAndMinors, violence: $violence, violenceAndGraphic: $violenceAndGraphic)'; 69 | } 70 | 71 | @override 72 | bool operator ==(covariant OpenAIModerationResultScoresModel other) { 73 | if (identical(this, other)) return true; 74 | 75 | return other.hate == hate && 76 | other.hateAndThreatening == hateAndThreatening && 77 | other.selfHarm == selfHarm && 78 | other.sexual == sexual && 79 | other.sexualAndMinors == sexualAndMinors && 80 | other.violence == violence && 81 | other.violenceAndGraphic == violenceAndGraphic; 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /lib/src/core/models/moderation/sub_models/result.dart: -------------------------------------------------------------------------------- 1 | import 'package:meta/meta.dart'; 2 | 3 | import 'catgeories.dart'; 4 | import 'catgeories_scores.dart'; 5 | 6 | export 'catgeories.dart'; 7 | export 'catgeories_scores.dart'; 8 | 9 | /// {@template openai_moderation_result_model} 10 | /// This class is used to represent an OpenAI moderation job result. 11 | /// {@endtemplate} 12 | @immutable 13 | final class OpenAIModerationResultModel { 14 | /// The categories of the moderation job. 15 | final OpenAIModerationResultCategoriesModel categories; 16 | 17 | /// The category scores of the moderation job. 18 | final OpenAIModerationResultScoresModel categoryScores; 19 | 20 | /// The flagged status of the moderation job. 21 | final bool flagged; 22 | 23 | @override 24 | int get hashCode => 25 | categories.hashCode ^ categoryScores.hashCode ^ flagged.hashCode; 26 | 27 | /// {@macro openai_moderation_result_model} 28 | const OpenAIModerationResultModel({ 29 | required this.categories, 30 | required this.categoryScores, 31 | required this.flagged, 32 | }); 33 | 34 | /// This method is used to convert a [Map] object to a [OpenAIModerationResultModel] object. 35 | factory OpenAIModerationResultModel.fromMap(Map json) { 36 | return OpenAIModerationResultModel( 37 | categories: OpenAIModerationResultCategoriesModel.fromMap( 38 | json['categories'], 39 | ), 40 | categoryScores: OpenAIModerationResultScoresModel.fromMap( 41 | json['category_scores'], 42 | ), 43 | flagged: json['flagged'], 44 | ); 45 | } 46 | 47 | @override 48 | String toString() => 49 | 'OpenAIModerationResultModel(categories: $categories, categoryScores: $categoryScores, flagged: $flagged)'; 50 | 51 | @override 52 | bool operator ==(covariant OpenAIModerationResultModel other) { 53 | if (identical(this, other)) return true; 54 | 55 | return other.categories == categories && 56 | other.categoryScores == categoryScores && 57 | other.flagged == flagged; 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /lib/src/core/models/tool/function/function.dart: -------------------------------------------------------------------------------- 1 | // ignore_for_file: public_member_api_docs, sort_constructors_first 2 | import 'dart:convert'; 3 | 4 | import 'package:collection/collection.dart'; 5 | 6 | import 'property.dart'; 7 | 8 | export 'property.dart'; 9 | 10 | /// {@template openai_function_model} 11 | /// This class is used to represent an OpenAI function. 12 | /// {@endtemplate} 13 | class OpenAIFunctionModel { 14 | /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain 15 | /// underscores and dashes, with a maximum length of 64. 16 | final String name; 17 | 18 | /// The description of what the function does. 19 | final String? description; 20 | 21 | /// The parameters the functions accepts, described as a 22 | /// [JSON Schema](https://json-schema.org/understanding-json-schema) object. 23 | final Map parametersSchema; 24 | 25 | /// Weither the function have a description. 26 | bool get haveDescription => description != null; 27 | 28 | @override 29 | int get hashCode => 30 | name.hashCode ^ description.hashCode ^ parametersSchema.hashCode; 31 | 32 | /// {@macro openai_function_model} 33 | const OpenAIFunctionModel({ 34 | required this.name, 35 | required this.parametersSchema, 36 | this.description, 37 | }); 38 | 39 | /// {@macro openai_function_model} 40 | /// This a factory constructor that allows you to create a new function with valid parameters schema. 41 | factory OpenAIFunctionModel.withParameters({ 42 | required String name, 43 | String? description, 44 | required Iterable parameters, 45 | }) { 46 | return OpenAIFunctionModel( 47 | name: name, 48 | description: description, 49 | parametersSchema: OpenAIFunctionProperty.object( 50 | name: '', 51 | properties: parameters, 52 | ).typeMap(), 53 | ); 54 | } 55 | 56 | /// This method is used to convert a [Map] object to a [OpenAIFunctionModel] object. 57 | factory OpenAIFunctionModel.fromMap(Map map) { 58 | return OpenAIFunctionModel( 59 | name: map['name'], 60 | parametersSchema: jsonDecode(map['arguments']) as Map, 61 | ); 62 | } 63 | 64 | /// This method is used to convert a [OpenAIFunctionModel] object to a [Map] object. 65 | Map toMap() { 66 | return { 67 | 'name': name, 68 | if (description != null) 'description': description, 69 | 'parameters': parametersSchema, 70 | }; 71 | } 72 | 73 | @override 74 | String toString() => 75 | 'OpenAIFunctionModel(name: $name, description: $description, parametersSchema: $parametersSchema)'; 76 | 77 | @override 78 | bool operator ==(covariant OpenAIFunctionModel other) { 79 | if (identical(this, other)) return true; 80 | final mapEquals = const DeepCollectionEquality().equals; 81 | 82 | return other.name == name && 83 | other.description == description && 84 | mapEquals(other.parametersSchema, parametersSchema); 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /lib/src/core/models/tool/function/function_call.dart: -------------------------------------------------------------------------------- 1 | import 'package:meta/meta.dart'; 2 | 3 | /// {@template function_call} 4 | /// Controls how the model responds to function calls. 5 | /// {@endtemplate} 6 | @immutable 7 | class FunctionCall { 8 | /// Force the model to respond to the end-user instead of calling a function. 9 | static const none = FunctionCall._(value: 'none'); 10 | 11 | /// The model can pick between an end-user or calling a function. 12 | static const auto = FunctionCall._(value: 'auto'); 13 | 14 | /// The value of the function call. 15 | final value; 16 | 17 | @override 18 | int get hashCode => value.hashCode; 19 | 20 | /// {@macro function_call} 21 | const FunctionCall._({required this.value}); 22 | 23 | /// Specifying a particular function forces the model to call that function. 24 | factory FunctionCall.forFunction(String functionName) { 25 | return FunctionCall._(value: { 26 | 'name': functionName, 27 | }); 28 | } 29 | 30 | @override 31 | String toString() => value.toString(); 32 | 33 | @override 34 | bool operator ==(covariant FunctionCall other) { 35 | if (identical(this, other)) return true; 36 | 37 | return other.value == value; 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /lib/src/core/models/tool/function/function_call_response.dart: -------------------------------------------------------------------------------- 1 | // ignore_for_file: public_member_api_docs, sort_constructors_first 2 | import 'dart:convert'; 3 | 4 | import 'package:collection/collection.dart'; 5 | import 'package:meta/meta.dart'; 6 | 7 | /// {@template function_call_response} 8 | /// This class is used to represent an OpenAI function call response. 9 | /// {@endtemplate} 10 | @immutable 11 | class FunctionCallResponse { 12 | /// The name of the function that the model wants to call. 13 | final String? name; 14 | 15 | /// The arguments that the model wants to pass to the function. 16 | final Map? arguments; 17 | 18 | /// Weither the response have a name. 19 | bool get haveName => name != null; 20 | 21 | /// Weither the response have arguments. 22 | bool get haveArguments => arguments != null; 23 | 24 | @override 25 | int get hashCode => name.hashCode ^ arguments.hashCode; 26 | 27 | /// {@macro function_call_response} 28 | const FunctionCallResponse({ 29 | required this.name, 30 | required this.arguments, 31 | }); 32 | 33 | /// This method is used to convert a [Map] object to a [FunctionCallResponse] object. 34 | factory FunctionCallResponse.fromMap(Map map) { 35 | final argsField = map['arguments']; 36 | 37 | Map arguments; 38 | 39 | try { 40 | arguments = jsonDecode(argsField); 41 | } catch (e) { 42 | arguments = {}; 43 | } 44 | 45 | return FunctionCallResponse( 46 | name: map['name'], 47 | arguments: arguments, 48 | ); 49 | } 50 | 51 | /// This method is used to convert a [FunctionCallResponse] object to a [Map] object. 52 | Map toMap() { 53 | return { 54 | 'name': name, 55 | 'arguments': json.encode(arguments), 56 | }; 57 | } 58 | 59 | @override 60 | String toString() => 61 | 'FunctionCallResponse(name: $name, arguments: $arguments)'; 62 | 63 | @override 64 | bool operator ==(covariant FunctionCallResponse other) { 65 | if (identical(this, other)) return true; 66 | final mapEquals = const DeepCollectionEquality().equals; 67 | 68 | return other.name == name && mapEquals(other.arguments, arguments); 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /lib/src/core/models/tool/function/stream_function_call_response.dart: -------------------------------------------------------------------------------- 1 | // ignore_for_file: public_member_api_docs, sort_constructors_first 2 | import 'package:meta/meta.dart'; 3 | 4 | /// {@template stream_function_call_response_model} 5 | /// This class is used to represent a stream function call response. 6 | /// {@endtemplate} 7 | @immutable 8 | class StreamFunctionCallResponse { 9 | /// The name of the function that the model wants to call. 10 | final String? name; 11 | 12 | /// The arguments that the model wants to pass to the function. 13 | final String? arguments; 14 | 15 | // Weither the function have a name or not. 16 | bool get hasName => name != null; 17 | 18 | // Weither the function have arguments or not. 19 | bool get hasArguments => arguments != null; 20 | 21 | @override 22 | int get hashCode => name.hashCode ^ arguments.hashCode; 23 | 24 | /// {@macro stream_function_call_response_model} 25 | const StreamFunctionCallResponse({ 26 | required this.name, 27 | required this.arguments, 28 | }); 29 | 30 | /// This method is used to convert a [Map] object to a [StreamFunctionCallResponse] object. 31 | factory StreamFunctionCallResponse.fromMap(Map map) { 32 | return StreamFunctionCallResponse( 33 | name: map['name'], 34 | arguments: map['arguments'], 35 | ); 36 | } 37 | 38 | /// This method is used to convert a [StreamFunctionCallResponse] object to a [Map] object. 39 | Map toMap() { 40 | return { 41 | 'name': name, 42 | 'arguments': arguments, 43 | }; 44 | } 45 | 46 | @override 47 | String toString() => 48 | 'StreamFunctionCallResponse(name: $name, arguments: $arguments)'; 49 | 50 | @override 51 | bool operator ==(covariant StreamFunctionCallResponse other) { 52 | if (identical(this, other)) return true; 53 | 54 | return other.name == name && other.arguments == arguments; 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /lib/src/core/models/tool/tool.dart: -------------------------------------------------------------------------------- 1 | // ignore_for_file: public_member_api_docs, sort_constructors_first 2 | import 'package:meta/meta.dart'; 3 | 4 | import 'function/function.dart'; 5 | 6 | export 'function/function.dart'; 7 | 8 | /// {@template openai_tool_model} 9 | /// This class is used to represent an OpenAI tool. 10 | /// {@endtemplate} 11 | @immutable 12 | class OpenAIToolModel { 13 | /// The type of the tool. 14 | final String type; 15 | 16 | /// The function of the tool. 17 | final OpenAIFunctionModel function; 18 | 19 | @override 20 | int get hashCode => type.hashCode ^ function.hashCode; 21 | 22 | /// {@macro openai_tool_model} 23 | const OpenAIToolModel({ 24 | required this.type, 25 | required this.function, 26 | }); 27 | 28 | /// This method is used to convert a [Map] object to a [OpenAIToolModel] object. 29 | factory OpenAIToolModel.fromMap(Map map) { 30 | return OpenAIToolModel( 31 | type: map['type'], 32 | function: OpenAIFunctionModel.fromMap(map['function']), 33 | ); 34 | } 35 | 36 | /// This method is used to convert a [OpenAIToolModel] object to a [Map] object. 37 | Map toMap() { 38 | return { 39 | 'type': type, 40 | 'function': function.toMap(), 41 | }; 42 | } 43 | 44 | @override 45 | bool operator ==(covariant OpenAIToolModel other) { 46 | if (identical(this, other)) return true; 47 | 48 | return other.type == type && other.function == function; 49 | } 50 | 51 | @override 52 | String toString() => 'OpenAIToolModel(type: $type, function: $function)'; 53 | } 54 | -------------------------------------------------------------------------------- /lib/src/core/utils/extensions.dart: -------------------------------------------------------------------------------- 1 | import 'dart:convert'; 2 | 3 | extension StringExtension on String { 4 | bool get canBeParsedToJson { 5 | try { 6 | final _ = jsonDecode(this); 7 | 8 | return true; 9 | } catch (e) { 10 | return false; 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /lib/src/core/utils/streaming_http_client_default.dart: -------------------------------------------------------------------------------- 1 | createClient() => 2 | UnsupportedError('Cannot create a client without dart:html or dart:io.'); 3 | -------------------------------------------------------------------------------- /lib/src/core/utils/streaming_http_client_io.dart: -------------------------------------------------------------------------------- 1 | import 'package:http/http.dart' as http; 2 | 3 | http.Client createClient() => http.Client(); 4 | -------------------------------------------------------------------------------- /lib/src/core/utils/streaming_http_client_web.dart: -------------------------------------------------------------------------------- 1 | import 'package:fetch_client/fetch_client.dart' as fetch; 2 | 3 | fetch.FetchClient createClient() => 4 | fetch.FetchClient(mode: fetch.RequestMode.cors); 5 | -------------------------------------------------------------------------------- /lib/src/instance/audio/audio.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/src/core/builder/base_api_url.dart'; 2 | import 'package:dart_openai/src/core/networking/client.dart'; 3 | 4 | import 'dart:io'; 5 | 6 | import '../../../dart_openai.dart'; 7 | import '../../core/base/audio/audio.dart'; 8 | import '../../core/constants/strings.dart'; 9 | import '../../core/utils/logger.dart'; 10 | 11 | /// {@template openai_audio} 12 | /// This class is responsible for handling all audio requests, such as creating a transcription or translation for a given audio file. 13 | /// {@endtemplate} 14 | interface class OpenAIAudio implements OpenAIAudioBase { 15 | @override 16 | String get endpoint => OpenAIStrings.endpoints.audio; 17 | 18 | /// {@macro openai_audio} 19 | OpenAIAudio() { 20 | OpenAILogger.logEndpoint(endpoint); 21 | } 22 | 23 | /// Creates a transcription for a given audio file. 24 | /// 25 | /// [file] is the [File] audio which is the audio file to be transcribed. 26 | /// 27 | /// [model] is the model which to use for the transcription. 28 | /// 29 | /// [prompt] is an optional text to guide the model's style or continue a previous audio segment. The prompt should be in English. 30 | /// 31 | /// [responseFormat] is an optional format for the transcription. The default is [OpenAIAudioResponseFormat.json]. 32 | /// 33 | /// [temperature] is the sampling temperature for the request. 34 | /// 35 | /// [language] is the language of the input audio. Supplying the input language in **ISO-639-1** format will improve accuracy and latency. 36 | /// 37 | /// [timestamp_granularities] The timestamp granularities to populate for this transcription. response_format must be set verbose_json to use timestamp granularities. Either: word or segment, both doesnt work. 38 | /// 39 | /// Example: 40 | /// ```dart 41 | /// final transcription = await openai.audio.createTranscription( 42 | /// file: File("audio.mp3"), 43 | /// model: "whisper-1", 44 | /// prompt: "This is a prompt", 45 | /// responseFormat: OpenAIAudioResponseFormat.srt, 46 | /// temperature: 0.5, 47 | /// ); 48 | /// ``` 49 | @override 50 | Future createTranscription({ 51 | required File file, 52 | required String model, 53 | String? prompt, 54 | OpenAIAudioResponseFormat? responseFormat, 55 | double? temperature, 56 | String? language, 57 | List? timestamp_granularities, 58 | }) async { 59 | return await OpenAINetworkingClient.fileUpload( 60 | file: file, 61 | to: BaseApiUrlBuilder.build(endpoint + "/transcriptions"), 62 | body: { 63 | "model": model, 64 | if (prompt != null) "prompt": prompt, 65 | if (responseFormat != null) "response_format": responseFormat.name, 66 | if (temperature != null) "temperature": temperature.toString(), 67 | if (language != null) "language": language, 68 | if (timestamp_granularities != null) 69 | "timestamp_granularities[]": 70 | timestamp_granularities.map((e) => e.name).join(","), 71 | }, 72 | onSuccess: (Map response) { 73 | return OpenAIAudioModel.fromMap(response); 74 | }, 75 | responseMapAdapter: (res) { 76 | return {"text": res}; 77 | }, 78 | ); 79 | } 80 | 81 | /// Creates a translation for a given audio file. 82 | /// 83 | /// [file] is the [File] audio which is the audio file to be transcribed. 84 | /// 85 | /// [model] is the model which to use for the transcription. 86 | /// 87 | /// [prompt] is an optional text to guide the model's style or continue a previous audio segment. The prompt should be in English. 88 | /// 89 | /// [responseFormat] is an optional format for the transcription. The default is [OpenAIAudioResponseFormat.json]. 90 | /// 91 | /// [temperature] is the sampling temperature for the request. 92 | /// 93 | /// Example: 94 | /// ```dart 95 | /// final translation = await openai.audio.createTranslation( 96 | /// file: File("audio.mp3"), 97 | /// model: "whisper-1", 98 | /// prompt: "This is a prompt", 99 | /// responseFormat: OpenAIAudioResponseFormat.text, 100 | /// ); 101 | /// ``` 102 | @override 103 | Future createTranslation({ 104 | required File file, 105 | required String model, 106 | String? prompt, 107 | OpenAIAudioResponseFormat? responseFormat, 108 | double? temperature, 109 | }) async { 110 | return await OpenAINetworkingClient.fileUpload( 111 | file: file, 112 | to: BaseApiUrlBuilder.build(endpoint + "/translations"), 113 | body: { 114 | "model": model, 115 | if (prompt != null) "prompt": prompt, 116 | if (responseFormat != null) "response_format": responseFormat.name, 117 | if (temperature != null) "temperature": temperature.toString(), 118 | }, 119 | onSuccess: (Map response) { 120 | return OpenAIAudioModel.fromMap(response); 121 | }, 122 | responseMapAdapter: (res) { 123 | return {"text": res}; 124 | }, 125 | ); 126 | } 127 | 128 | @override 129 | Future createSpeech({ 130 | required String model, 131 | required String input, 132 | required String voice, 133 | OpenAIAudioSpeechResponseFormat? responseFormat, 134 | double? speed, 135 | String outputFileName = "output", 136 | Directory? outputDirectory, 137 | }) async { 138 | return await OpenAINetworkingClient.postAndExpectFileResponse( 139 | to: BaseApiUrlBuilder.build(endpoint + "/speech"), 140 | body: { 141 | "model": model, 142 | "input": input, 143 | "voice": voice, 144 | if (responseFormat != null) "response_format": responseFormat.name, 145 | if (speed != null) "speed": speed, 146 | }, 147 | onFileResponse: (File res) { 148 | return res; 149 | }, 150 | outputFileName: outputFileName, 151 | outputDirectory: outputDirectory, 152 | ); 153 | } 154 | } 155 | -------------------------------------------------------------------------------- /lib/src/instance/edits/edits.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/src/core/models/edit/edit.dart'; 2 | import 'package:meta/meta.dart'; 3 | 4 | import '../../core/base/edits/edits.dart'; 5 | import '../../core/builder/base_api_url.dart'; 6 | import '../../core/constants/strings.dart'; 7 | import '../../core/networking/client.dart'; 8 | import '../../core/utils/logger.dart'; 9 | 10 | import 'package:http/http.dart' as http; 11 | 12 | /// {@template openai_edits} 13 | /// The class that handles all the requests related to the edits in the OpenAI API. 14 | /// {@endtemplate} 15 | @immutable 16 | @protected 17 | interface class OpenAIEdits implements OpenAIEditsBase { 18 | @override 19 | String get endpoint => OpenAIStrings.endpoints.edits; 20 | 21 | /// {@macro openai_edits} 22 | OpenAIEdits() { 23 | OpenAILogger.logEndpoint(endpoint); 24 | } 25 | 26 | /// Given a [prompt] and an instruction, this method will return an edited version of the prompt. 27 | /// 28 | /// [model] is id of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this method. 29 | /// 30 | /// [input] is the input text to use as a starting point for the edit. 31 | /// 32 | /// [instruction] is the instruction that tells the model how to edit the prompt. 33 | /// 34 | /// [n] defines how many edits to generate for the input and instruction. 35 | /// 36 | /// [temperature] defines what sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. 37 | /// 38 | /// [topP] defines an alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. 39 | /// 40 | /// 41 | /// Example: 42 | ///```dart 43 | /// OpenAIEditModel edit = await OpenAI.instance.edit.create( 44 | /// model: "text-davinci-edit-001"; 45 | /// instruction: "remote all '!'from input text", 46 | /// input: "Hello!!, I! need to be ! somethi!ng" 47 | /// n: 1, 48 | /// temperature: 0.8, 49 | /// ); 50 | ///``` 51 | @override 52 | Future create({ 53 | required String model, 54 | String? input, 55 | required String? instruction, 56 | int? n, 57 | double? temperature, 58 | double? topP, 59 | http.Client? client, 60 | }) async { 61 | return await OpenAINetworkingClient.post( 62 | to: BaseApiUrlBuilder.build(endpoint), 63 | body: { 64 | "model": model, 65 | "instruction": instruction, 66 | if (input != null) "input": input, 67 | if (n != null) "n": n, 68 | if (temperature != null) "temperature": temperature, 69 | if (topP != null) "top_p": topP, 70 | }, 71 | onSuccess: (Map response) { 72 | return OpenAIEditModel.fromMap(response); 73 | }, 74 | ); 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /lib/src/instance/embedding/embedding.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/src/core/builder/base_api_url.dart'; 2 | import 'package:dart_openai/src/core/models/embedding/embedding.dart'; 3 | import 'package:meta/meta.dart'; 4 | 5 | import '../../core/base/embeddings/base.dart'; 6 | import '../../core/constants/strings.dart'; 7 | import '../../core/networking/client.dart'; 8 | import '../../core/utils/logger.dart'; 9 | 10 | import 'package:http/http.dart' as http; 11 | 12 | /// {@template openai_embedding} 13 | /// Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. 14 | /// {@endtemplate} 15 | @immutable 16 | @protected 17 | interface class OpenAIEmbedding implements OpenAIEmbeddingBase { 18 | @override 19 | String get endpoint => OpenAIStrings.endpoints.embeddings; 20 | 21 | /// {@macro openai_embedding} 22 | OpenAIEmbedding() { 23 | OpenAILogger.logEndpoint(endpoint); 24 | } 25 | 26 | /// Creates an embedding vector representing the input text. 27 | /// 28 | /// [model] is the id of the model to use for completion. 29 | /// 30 | /// You can get a list of available models using the [OpenAI.instance.model.list] method, or by visiting the [Models Overview](https://platform.openai.com/docs/models/overview) page. 31 | /// 32 | /// [input] is the prompt(s) to generate completions for, encoded as a [String], [List] of strings or tokens. 33 | /// If the type of [input] is not [String] or [List], an assert will be thrown, or it will be converted to a [String] using the [input.toString()] method. 34 | /// 35 | /// [user] is the user ID to associate with the request. This is used to prevent abuse of the API. 36 | /// 37 | /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). 38 | 39 | /// Example: 40 | ///```dart 41 | /// OpenAIEmbeddingsModel embeddings = await OpenAI.instance.embedding.create( 42 | /// model: "text-embedding-ada-002", 43 | /// input: "This is a text input just to test", 44 | /// ); 45 | ///``` 46 | @override 47 | Future create({ 48 | required String model, 49 | required input, 50 | String? user, 51 | http.Client? client, 52 | }) async { 53 | assert( 54 | input is String || input is List, 55 | "The input field should be a String, or a List", 56 | ); 57 | 58 | return await OpenAINetworkingClient.post( 59 | onSuccess: (Map response) { 60 | return OpenAIEmbeddingsModel.fromMap(response); 61 | }, 62 | to: BaseApiUrlBuilder.build(endpoint), 63 | body: { 64 | "model": model, 65 | if (input != null) "input": input, 66 | if (user != null) "user": user, 67 | }, 68 | ); 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /lib/src/instance/files/files.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/src/core/builder/base_api_url.dart'; 2 | import 'package:dart_openai/src/core/models/file/file.dart'; 3 | import 'package:dart_openai/src/core/networking/client.dart'; 4 | import 'package:meta/meta.dart'; 5 | 6 | import 'dart:io'; 7 | 8 | import '../../core/base/files/base.dart'; 9 | import '../../core/constants/strings.dart'; 10 | import '../../core/utils/logger.dart'; 11 | 12 | import 'package:http/http.dart' as http; 13 | 14 | /// {@template openai_files} 15 | /// This class is responsible for handling all files requests, such as uploading a file to be used across various endpoints/features. 16 | /// {@endtemplate} 17 | @immutable 18 | @protected 19 | interface class OpenAIFiles implements OpenAIFilesBase { 20 | @override 21 | String get endpoint => OpenAIStrings.endpoints.files; 22 | 23 | /// {@macro openai_files} 24 | OpenAIFiles() { 25 | OpenAILogger.logEndpoint(endpoint); 26 | } 27 | 28 | /// This method fetches for your files list that exists in your OPenAI account. 29 | /// 30 | /// Example: 31 | ///```dart 32 | /// List files = await OpenAI.instance.file.list(); 33 | /// print(files.first.id); 34 | ///``` 35 | @override 36 | Future> list({ 37 | http.Client? client, 38 | }) async { 39 | return await OpenAINetworkingClient.get( 40 | from: BaseApiUrlBuilder.build(endpoint), 41 | client: client, 42 | onSuccess: (Map response) { 43 | final List filesList = response["data"]; 44 | 45 | return filesList.map((e) => OpenAIFileModel.fromMap(e)).toList(); 46 | }, 47 | ); 48 | } 49 | 50 | /// Fetches for a single file by it's id and returns informations about it. 51 | /// 52 | /// Example: 53 | ///```dart 54 | /// OpenAIFileModel file = await OpenAI.instance.file.retrieve("FILE ID"); 55 | /// 56 | /// print(file); 57 | ///``` 58 | @override 59 | Future retrieve( 60 | String fileId, { 61 | http.Client? client, 62 | }) async { 63 | final String fileIdEndpoint = "/$fileId"; 64 | 65 | return await OpenAINetworkingClient.get( 66 | from: BaseApiUrlBuilder.build(endpoint + fileIdEndpoint), 67 | onSuccess: (Map response) { 68 | return OpenAIFileModel.fromMap(response); 69 | }, 70 | ); 71 | } 72 | 73 | /// Fetches for a single file content by it's id. 74 | /// 75 | /// Example: 76 | /// ```dart 77 | /// dynamic fileContent = await OpenAI.instance.file.retrieveContent("FILE ID"); 78 | /// 79 | /// print(fileContent); 80 | /// ``` 81 | @override 82 | Future retrieveContent( 83 | String fileId, { 84 | http.Client? client, 85 | }) async { 86 | final String fileIdEndpoint = "/$fileId/content"; 87 | 88 | return await OpenAINetworkingClient.get( 89 | from: BaseApiUrlBuilder.build(endpoint + fileIdEndpoint), 90 | returnRawResponse: true, 91 | ); 92 | } 93 | 94 | /// Upload a file that contains document(s) to be used across various endpoints/ 95 | /// features. Currently, the size of all the files uploaded by one organization can be 96 | /// up to 1 GB. Please contact us if you need to increase the storage limit. 97 | /// 98 | /// [file] is the `jsonl` file to be uploaded, If the [purpose] is set to "fine-tune", each line is a JSON record with "prompt" and "completion. 99 | /// 100 | /// [purpose] Use "fine-tune" for Fine-tuning. This allows us to validate the format of the uploaded file. 101 | /// 102 | /// 103 | /// Example: 104 | /// ```dart 105 | /// OpenAIFileModel uploadedFile = await OpenAI.instance.file.upload( 106 | /// file: File("/* FILE PATH HERE */"), 107 | /// purpose: "fine-tuning", 108 | /// ); 109 | /// ``` 110 | @override 111 | Future upload({ 112 | required File file, 113 | required String purpose, 114 | }) async { 115 | return await OpenAINetworkingClient.fileUpload( 116 | to: BaseApiUrlBuilder.build(endpoint), 117 | body: { 118 | "purpose": purpose, 119 | }, 120 | file: file, 121 | onSuccess: (Map response) { 122 | return OpenAIFileModel.fromMap(response); 123 | }, 124 | ); 125 | } 126 | 127 | /// This method deleted an existent file from your account used it's id. 128 | /// 129 | /// 130 | /// ```dart 131 | /// bool isFileDeleted = await OpenAI.instance.file.delete("/* FILE ID */"); 132 | /// 133 | /// print(isFileDeleted); 134 | /// ``` 135 | @override 136 | Future delete( 137 | String fileId, { 138 | http.Client? client, 139 | }) async { 140 | final String fileIdEndpoint = "/$fileId"; 141 | 142 | return await OpenAINetworkingClient.delete( 143 | from: BaseApiUrlBuilder.build(endpoint + fileIdEndpoint), 144 | onSuccess: (Map response) { 145 | final bool isDeleted = response["deleted"] as bool; 146 | 147 | return isDeleted; 148 | }, 149 | ); 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /lib/src/instance/model/model.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/src/core/base/model/base.dart'; 2 | import 'package:dart_openai/src/core/models/model/model.dart'; 3 | import 'package:dart_openai/src/core/utils/logger.dart'; 4 | import '../../core/builder/base_api_url.dart'; 5 | import '../../core/constants/strings.dart'; 6 | import '../../core/networking/client.dart'; 7 | import 'package:meta/meta.dart'; 8 | 9 | import 'package:http/http.dart' as http; 10 | 11 | /// {@template openai_model} 12 | /// The class that handles all the requests related to the models in the OpenAI API. 13 | /// {@endtemplate} 14 | @immutable 15 | @protected 16 | interface class OpenAIModel implements OpenAIModelBase { 17 | @override 18 | String get endpoint => OpenAIStrings.endpoints.models; 19 | 20 | /// {@macro openai_model} 21 | OpenAIModel() { 22 | OpenAILogger.logEndpoint(endpoint); 23 | } 24 | 25 | /// Lists all the models available in the OpenAI API and returns a list of [OpenAIModelModel] objects. 26 | /// Refer to [Models](https://platform.openai.com/docs/models/models) for more information about the available models. 27 | /// 28 | /// Example: 29 | /// ```dart 30 | /// List models = await OpenAI.instance.model.list(); 31 | /// print(models.first.id); 32 | /// ``` 33 | @override 34 | Future> list({ 35 | http.Client? client, 36 | }) async { 37 | return await OpenAINetworkingClient.get>( 38 | from: BaseApiUrlBuilder.build( 39 | endpoint, 40 | ), 41 | onSuccess: (Map response) { 42 | final List data = response['data']; 43 | 44 | return data.map((model) => OpenAIModelModel.fromMap(model)).toList(); 45 | }, 46 | client: client, 47 | ); 48 | } 49 | 50 | /// Retrieves a model by it's id and returns a [OpenAIModelModel] object, if the model is not found, it will throw a [RequestFailedException]. 51 | /// 52 | /// [id] is the id of the model to use for this request. 53 | /// 54 | /// Example: 55 | /// ```dart 56 | /// OpenAIModelModel model = await OpenAI.instance.model.retrieve("text-davinci-003"); 57 | /// print(model.id) 58 | /// ``` 59 | @override 60 | Future retrieve( 61 | String id, { 62 | http.Client? client, 63 | }) async { 64 | return await OpenAINetworkingClient.get( 65 | from: BaseApiUrlBuilder.build(endpoint, id), 66 | onSuccess: (Map response) { 67 | return OpenAIModelModel.fromMap(response); 68 | }, 69 | client: client, 70 | ); 71 | } 72 | 73 | /// Deletes a fine-tuned model, returns [true] if the model did been deleted successfully, if the model is not found, it will throw a [RequestFailedException]. 74 | /// 75 | /// [fineTuneId] is the id of the fine-tuned model to delete. 76 | /// 77 | /// Example: 78 | /// ```dart 79 | /// bool deleted = await OpenAI.instance.fineTune.delete("fine-tune-id"); 80 | /// ``` 81 | @override 82 | Future delete( 83 | String fineTuneId, { 84 | http.Client? client, 85 | }) async { 86 | final String fineTuneModelDelete = "$endpoint/$fineTuneId"; 87 | 88 | return await OpenAINetworkingClient.delete( 89 | from: BaseApiUrlBuilder.build(fineTuneModelDelete), 90 | onSuccess: (Map response) { 91 | return response['deleted']; 92 | }, 93 | client: client, 94 | ); 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /lib/src/instance/moderations/moderations.dart: -------------------------------------------------------------------------------- 1 | import 'package:dart_openai/src/core/builder/base_api_url.dart'; 2 | import 'package:dart_openai/src/core/constants/strings.dart'; 3 | import 'package:dart_openai/src/core/models/moderation/moderation.dart'; 4 | import 'package:dart_openai/src/core/networking/client.dart'; 5 | 6 | import '../../core/base/moderations/base.dart'; 7 | import 'package:meta/meta.dart'; 8 | 9 | import '../../core/utils/logger.dart'; 10 | 11 | import 'package:http/http.dart' as http; 12 | 13 | /// {@template openai_moderation} 14 | /// The class that handles all the requests related to the moderation in the OpenAI API. 15 | /// {@endtemplate} 16 | @immutable 17 | @protected 18 | interface class OpenAIModeration implements OpenAIModerationBase { 19 | @override 20 | String get endpoint => OpenAIStrings.endpoints.moderation; 21 | 22 | /// {@macro openai_moderation} 23 | OpenAIModeration() { 24 | OpenAILogger.logEndpoint(endpoint); 25 | } 26 | 27 | /// Creates a moderation request. 28 | /// 29 | /// 30 | /// [input] is the input text to classify. 31 | /// 32 | /// 33 | /// [model] is the used model for this operation, two content moderation models are available: "text-moderation-stable" and "text-moderation-latest". 34 | /// The default is text-moderation-latest which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use text-moderation-stable, we will provide advanced notice before updating the model. Accuracy of text-moderation-stable may be slightly lower than for text-moderation-latest. 35 | /// 36 | /// 37 | /// Example: 38 | /// ```dart 39 | /// final moderation = await openai.moderation.create( 40 | /// input: "I will kill your mates before I will cut your head off", 41 | /// ); 42 | /// 43 | /// print(moderation.results); // ... 44 | /// print(moderation.results.first.categories.hate); // ... 45 | /// ``` 46 | @override 47 | Future create({ 48 | required String input, 49 | String? model, 50 | http.Client? client, 51 | }) async { 52 | return await OpenAINetworkingClient.post( 53 | onSuccess: (Map response) { 54 | return OpenAIModerationModel.fromMap(response); 55 | }, 56 | body: { 57 | "input": input, 58 | if (model != null) "model": model, 59 | }, 60 | to: BaseApiUrlBuilder.build(endpoint), 61 | ); 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /pubspec.yaml: -------------------------------------------------------------------------------- 1 | name: dart_openai 2 | description: Dart SDK for openAI Apis (GPT-3 & DALL-E), integrate easily the power of OpenAI's state-of-the-art AI models into their Dart applications. 3 | version: 5.1.0 4 | homepage: https://github.com/anasfik/openai 5 | repository: https://github.com/anasfik/openai 6 | documentation: https://github.com/anasfik/openai/blob/main/README.md 7 | issue_tracker: https://github.com/anasfik/openai/issues 8 | 9 | environment: 10 | sdk: ">=3.0.0 <4.0.0" 11 | 12 | dependencies: 13 | http: ^1.1.0 14 | meta: ^1.9.1 15 | collection: ^1.17.2 16 | fetch_client: ^1.0.2 17 | 18 | dev_dependencies: 19 | dart_code_metrics: ^4.19.2 20 | lints: ^3.0.0 21 | test: ^1.24.3 22 | --------------------------------------------------------------------------------