├── .editorconfig ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.yml │ ├── config.yml │ └── feature_request.yml └── workflows │ └── main-ci.yaml ├── .gitignore ├── AGENTS.md ├── LICENSE ├── README.md ├── dscanner.ini ├── dub.json ├── examples ├── audio_speech │ ├── config.json │ ├── dub.sdl │ ├── dub.selections.json │ └── source │ │ └── app.d ├── audio_transcription │ ├── dub.sdl │ ├── dub.selections.json │ └── source │ │ └── app.d ├── audio_translation │ ├── dub.sdl │ ├── dub.selections.json │ └── source │ │ └── app.d ├── chat │ ├── .gitignore │ ├── config.json │ ├── dub.sdl │ ├── dub.selections.json │ └── source │ │ └── app.d ├── chat_db │ ├── .gitignore │ ├── config.json │ ├── dub.sdl │ ├── dub.selections.json │ └── source │ │ └── app.d ├── chat_tools │ ├── dub.sdl │ ├── dub.selections.json │ └── source │ │ └── app.d ├── chat_vision │ ├── .gitattributes │ ├── assets │ │ └── cat.png │ ├── dub.sdl │ ├── dub.selections.json │ └── source │ │ └── app.d ├── completion │ ├── .gitignore │ ├── config.json │ ├── dub.sdl │ ├── dub.selections.json │ └── source │ │ └── app.d ├── embedding │ ├── .gitignore │ ├── config.json │ ├── dub.sdl │ ├── dub.selections.json │ └── source │ │ └── app.d ├── models │ ├── .gitignore │ ├── config.json │ ├── dub.sdl │ ├── dub.selections.json │ └── source │ │ └── app.d ├── moderation │ ├── .gitignore │ ├── config.json │ ├── dub.sdl │ ├── dub.selections.json │ └── source │ │ └── app.d ├── reasoning │ ├── config.json │ ├── dub.sdl │ ├── dub.selections.json │ └── source │ │ └── app.d └── structured_output │ ├── .gitignore │ ├── config.json │ ├── dub.sdl │ ├── dub.selections.json │ └── source │ └── app.d └── source └── openai ├── audio.d ├── chat.d ├── clients └── openai.d ├── common.d ├── completion.d ├── embedding.d ├── models.d ├── moderation.d └── package.d /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | charset = utf-8 5 | end_of_line = lf 6 | insert_final_newline = true 7 | trim_trailing_whitespace = true 8 | 9 | [*.md] 10 | trim_trailing_whitespace = false 11 | 12 | [*.d] 13 | indent_style = space 14 | indent_size = 4 15 | tab_width = 4 16 | 17 | max_line_length = 120 18 | dfmt_soft_max_line_length = 100 19 | 20 | dfmt_brace_style = allman 21 | dfmt_keep_line_breaks = true 22 | dfmt_align_switch_statements = true 23 | dfmt_reflow_property_chains = true 24 | dfmt_single_indent = true 25 | dfmt_space_after_keywords = true 26 | dfmt_space_before_function_parameters = false 27 | dfmt_split_operator_at_line_end = true 28 | dfmt_compact_labeled_statements = true 29 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.yml: -------------------------------------------------------------------------------- 1 | name: Bug report 2 | description: Report an issue or bug with this library 3 | labels: ['bug'] 4 | body: 5 | - type: markdown 6 | attributes: 7 | value: | 8 | Thanks for taking the time to fill out this bug report! 9 | - type: checkboxes 10 | id: non_api 11 | attributes: 12 | label: Confirm this is a D library issue and not an underlying OpenAI API issue 13 | description: Issues with the underlying OpenAI API should be reported on our [Developer Community](https://community.openai.com/c/api/7) 14 | options: 15 | - label: This is an issue with the Node library 16 | required: true 17 | - type: textarea 18 | id: what-happened 19 | attributes: 20 | label: Describe the bug 21 | description: A clear and concise description of what the bug is, and any additional context. 22 | placeholder: Tell us what you see! 23 | validations: 24 | required: true 25 | - type: textarea 26 | id: repro-steps 27 | attributes: 28 | label: To Reproduce 29 | description: Steps to reproduce the behavior. 30 | placeholder: | 31 | 1. Fetch a '...' 32 | 2. Update the '....' 33 | 3. See error 34 | validations: 35 | required: true 36 | - type: textarea 37 | id: code-snippets 38 | attributes: 39 | label: Code snippets 40 | description: If applicable, add code snippets to help explain your problem. 41 | render: Dlang 42 | validations: 43 | required: false 44 | - type: textarea 45 | id: build-commands 46 | attributes: 47 | label: Build commands 48 | description: If applicable, add build commands to help explain your problem. 49 | placeholder: | 50 | dub build --compiler=... --arch=... ... 51 | dub run --compiler=... --arch=... ... 52 | render: console 53 | validations: 54 | required: false 55 | - type: textarea 56 | id: expected-behavior 57 | attributes: 58 | label: Expected Behavior 59 | description: Please describe what you expected to happen after performing the aforementioned steps. 60 | placeholder: After doing '...', I expected '...' to happen. 61 | validations: 62 | required: true 63 | - type: input 64 | id: os 65 | attributes: 66 | label: OS 67 | placeholder: Windows, Linux, macOS, Ubuntu, ... (arch:x86/x86_64) 68 | validations: 69 | required: true 70 | - type: input 71 | id: language-version 72 | attributes: 73 | label: Compiler 74 | placeholder: dmd 2.105.0, ldc2 1.34.0 75 | validations: 76 | required: true 77 | - type: input 78 | id: lib-version 79 | attributes: 80 | label: Library version 81 | placeholder: openai-d v1.0.0 82 | validations: 83 | required: true 84 | - type: input 85 | id: other-libs 86 | attributes: 87 | label: Using libraries or dependencies 88 | placeholder: vibe-d, mir-algorithm, etc. 89 | validations: 90 | required: false -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: OpenAI support 4 | url: https://help.openai.com/ 5 | about: | 6 | Please only file issues here that you believe represent actual bugs or feature requests for the OpenAI Node library. 7 | If you're having general trouble with the OpenAI API, please visit our help center to get support. 8 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.yml: -------------------------------------------------------------------------------- 1 | name: Feature request 2 | description: Suggest an idea for this library 3 | labels: ['feature-request'] 4 | body: 5 | - type: markdown 6 | attributes: 7 | value: | 8 | Thanks for taking the time to fill out this feature request! 9 | - type: checkboxes 10 | id: non_api 11 | attributes: 12 | label: Confirm this is a feature request for the D language library and not the underlying OpenAI API. 13 | description: Feature requests for the underlying OpenAI API should be reported on our [Developer Community](https://community.openai.com/c/api/7) 14 | options: 15 | - label: This is a feature request for the D language library. 16 | required: true 17 | - type: textarea 18 | id: feature 19 | attributes: 20 | label: Describe the feature or improvement you're requesting 21 | description: A clear and concise description of what you want to happen. 22 | validations: 23 | required: true 24 | - type: textarea 25 | id: context 26 | attributes: 27 | label: Additional context 28 | description: Add any other context or information about the feature request here. 29 | -------------------------------------------------------------------------------- /.github/workflows/main-ci.yaml: -------------------------------------------------------------------------------- 1 | name: 'main CI' 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | 7 | jobs: 8 | build-ubuntu: 9 | runs-on: ubuntu-latest 10 | 11 | strategy: 12 | matrix: 13 | dlang: [dmd-latest, ldc-latest] 14 | 15 | steps: 16 | - uses: actions/checkout@v2 17 | - name: Setup Dlang 18 | uses: dlang-community/setup-dlang@v1 19 | with: 20 | compiler: ${{ matrix.dlang }} 21 | - name: Build 22 | run: dub build --compiler=$DC 23 | - name: Lint 24 | run: dub lint --dscanner-config dscanner.ini 25 | - name: Test 26 | run: dub test --compiler=$DC 27 | - name: Build examples 28 | run: | 29 | for dir in examples/*; do 30 | (cd "$dir" && dub build --compiler=$DC) 31 | done 32 | 33 | build-windows: 34 | runs-on: windows-latest 35 | 36 | strategy: 37 | matrix: 38 | dlang: [dmd-latest, ldc-latest] 39 | 40 | steps: 41 | - uses: actions/checkout@v2 42 | - name: Setup Dlang 43 | uses: dlang-community/setup-dlang@v1 44 | with: 45 | compiler: ${{ matrix.dlang }} 46 | - name: Build 47 | run: dub build --compiler=$Env:DC 48 | - name: Lint 49 | run: dub lint --dscanner-config dscanner.ini 50 | - name: Test 51 | run: dub test --compiler=$Env:DC 52 | - name: Build examples 53 | shell: pwsh 54 | run: | 55 | Get-ChildItem examples -Directory | ForEach-Object { 56 | Push-Location $_.FullName 57 | dub build --compiler=$Env:DC 58 | Pop-Location 59 | } 60 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .dub 2 | docs.json 3 | __dummy.html 4 | docs/ 5 | /taskchain 6 | taskchain.so 7 | taskchain.dylib 8 | taskchain.dll 9 | taskchain.a 10 | taskchain.lib 11 | taskchain-test-* 12 | *.exe 13 | *.pdb 14 | *.o 15 | *.obj 16 | *.lst 17 | dub.selections.json 18 | !examples/**/dub.selections.json 19 | openai-d-test-library 20 | -------------------------------------------------------------------------------- /AGENTS.md: -------------------------------------------------------------------------------- 1 | # AGENTS.md 2 | 3 | ## 1. Project Overview 4 | 5 | * Repository: `https://github.com/lempiji/openai-d` 6 | * Name: openai-d 7 | * Description: An unofficial OpenAI API client library for the D programming language, supporting the Azure OpenAI Service. 8 | * Installation: Published on Dub registry; install via `dub add openai-d` 9 | * Language: D (100%) 10 | * License: MIT 11 | 12 | ## 2. Development Setup 13 | 14 | * Ensure `dmd` and `dub` are available using the stable installer script (`/workspace/dlang/install.sh`). 15 | 16 | ## 3. Testing 17 | 18 | * Run all tests: 19 | 20 | ```bash 21 | dub test 22 | ``` 23 | * Guidelines: 24 | 25 | * Use `unittest` blocks; add tests for private methods when behavior is unclear. 26 | * After any code change, verify tests pass before committing. 27 | 28 | ## 4. Coverage Report 29 | 30 | * Generate coverage data: 31 | 32 | ```bash 33 | dub test --coverage --coverage-ctfe 34 | ``` 35 | * Check results: Inspect the last two lines of each `source-openai-*.lst` file in the project root for coverage percentages. 36 | 37 | ## 5. Formatter 38 | 39 | * Before running `dub run dfmt`, ensure the formatter is installed. One-line 40 | installation command: 41 | 42 | ```bash 43 | dub fetch dfmt && dub run dfmt -- --version 44 | ``` 45 | Network access or a setup script is required for this step. 46 | 47 | * Library source: 48 | 49 | ```bash 50 | dub run dfmt -- source 51 | ``` 52 | * Examples folder: 53 | 54 | ```bash 55 | dub run dfmt -- examples 56 | ``` 57 | 58 | ## 6. Linter 59 | 60 | * Run linter: 61 | 62 | ```bash 63 | dub lint --dscanner-config dscanner.ini 64 | ``` 65 | 66 | ## 7. Development Workflow 67 | 68 | 1. Modify code. 69 | 2. Run formatter and linter 70 | 3. Run tests and coverage 71 | 4. Build all examples. In each directory under `examples/` (e.g., `examples/chat`, `examples/chat_vision`), run `dub build` to verify that the example compiles successfully. The command should succeed without errors. 72 | 5. If all checks pass, commit changes and open a pull request. 73 | 74 | ## 8. CI/CD & PR Guidelines 75 | 76 | * CI: GitHub Actions workflows are defined in `.github/workflows/`. 77 | * PR title format: `[] ` 78 | * Commit messages: Follow Conventional Commits. 79 | * Pre-merge checks: Formatter → Linter → Tests → Coverage report. 80 | 81 | ## 9. Directory Structure 82 | 83 | ``` 84 | / 85 | ├─ .github/ # GitHub Actions workflows 86 | ├─ source/ # Library source code 87 | ├─ examples/ # Sample usage code 88 | ├─ dub.json # Dub package definitions 89 | └─ AGENTS.md # This document 90 | ``` 91 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OpenAI API for D 2 | 3 | [![main CI](https://github.com/lempiji/openai-d/actions/workflows/main-ci.yaml/badge.svg)](https://github.com/lempiji/openai-d/actions/workflows/main-ci.yaml) 4 | [![Latest Release](https://img.shields.io/github/v/release/lempiji/openai-d.svg)](https://github.com/lempiji/openai-d/releases) 5 | [![DUB](https://img.shields.io/dub/v/openai-d.svg)](https://code.dlang.org/packages/openai-d) 6 | ![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg) 7 | ![GitHub stars](https://img.shields.io/github/stars/lempiji/openai-d.svg) 8 | 9 | This library provides unofficial D clients for [OpenAI API](https://platform.openai.com) 10 | 11 | ## Features 12 | 13 | ### Endpoint 14 | 15 | - [x] OpenAI 16 | - [x] Azure OpenAI Service 17 | 18 | ### API 19 | 20 | #### OpenAI 21 | 22 | - [ ] [Responses API](https://platform.openai.com/docs/api-reference/responses) (TODO) 23 | - [x] [Chat](https://platform.openai.com/docs/api-reference/chat) 24 | - [x] tools (function_call) 25 | - [x] structured output 26 | - [x] input vision 27 | - [ ] stream 28 | - [ ] [Realtime (Beta)](https://platform.openai.com/docs/api-reference/realtime) (TODO) 29 | - [x] [Audio](https://platform.openai.com/docs/api-reference/audio) 30 | - [x] speech 31 | - [x] transcription 32 | - [x] translations 33 | - [ ] [Images](https://platform.openai.com/docs/api-reference/images) (TODO) 34 | - [x] [Embeddings](https://platform.openai.com/docs/api-reference/embeddings) 35 | - [ ] [Evals](https://platform.openai.com/docs/api-reference/evals) (TODO) 36 | - [ ] [Fine-tunings](https://platform.openai.com/docs/api-reference/fine-tuning) (TODO) 37 | - [ ] [Graders](https://platform.openai.com/docs/api-reference/graders) (TODO) 38 | - [ ] [Batch](https://platform.openai.com/docs/api-reference/batch) (TODO) 39 | - [ ] [Files](https://platform.openai.com/docs/api-reference/files) (TODO) 40 | - [ ] [Uploads](https://platform.openai.com/docs/api-reference/uploads) (TODO) 41 | - [x] [Models](https://platform.openai.com/docs/api-reference/models) 42 | - [x] [Moderations](https://platform.openai.com/docs/api-reference/moderations) 43 | - [ ] [Vector stores](https://platform.openai.com/docs/api-reference/vector-stores) (TODO) 44 | - [ ] [Containers](https://platform.openai.com/docs/api-reference/containers) (TODO) 45 | - [ ] [Assistants (Beta)](https://platform.openai.com/docs/api-reference/assistants) (TODO) 46 | - [ ] [Administration](https://platform.openai.com/docs/api-reference/administration) (TODO) 47 | 48 | __legacy__ 49 | - [x] Completions (Legacy) 50 | 51 | __deprecated__ 52 | - Edits 53 | - Use chat API instead. See: https://platform.openai.com/docs/deprecations/edit-models-endpoint 54 | 55 | ### Optimization 56 | 57 | - [ ] Switch HTTP client to [`requests`](https://code.dlang.org/packages/requests) (TODO) 58 | - Not adopted because it is less convenient due to Windows' handling of system certificates. Version flag is required for support. 59 | - Adopting 'requests' is expected to lead to more efficient use of Fiber in vibe.d. 60 | 61 | # Usage 62 | 63 | ## Installation 64 | 65 | ``` 66 | dub add openai-d 67 | ``` 68 | 69 | ## OpenAIClient 70 | 71 | __Completion__ 72 | 73 | ```d name=completion 74 | import std; 75 | import openai; 76 | 77 | // Load API key from environment variable 78 | auto client = new OpenAIClient(); 79 | 80 | // POST /completions 81 | auto message = completionRequest("gpt-3.5-turbo-instruct", "Hello, D Programming Language!\n", 10, 0); 82 | message.stop = "\n"; 83 | 84 | auto response = client.completion(message); 85 | 86 | writeln(response.choices[0].text.chomp()); 87 | ``` 88 | 89 | __Chat__ 90 | 91 | ```d name=chat 92 | import std; 93 | import openai; 94 | 95 | // Load API key from environment variable 96 | auto client = new OpenAIClient(); 97 | 98 | // POST /chat/completions 99 | // You can use the new model constants such as `O4Mini` or `O3`. 100 | auto request = chatCompletionRequest(openai.O4Mini, [ 101 | systemChatMessage("You are a helpful assistant."), 102 | userChatMessage("Hello!") 103 | ], 16, 0); // sets `maxCompletionTokens` 104 | // Optional: control reasoning effort for o-series models 105 | request.reasoningEffort = "high"; 106 | 107 | auto response = client.chatCompletion(request); 108 | 109 | writeln(response.choices[0].message.content); 110 | ``` 111 | 112 | For o-series models such as `O4Mini` or `O3`, use `maxCompletionTokens` instead 113 | of the deprecated `max_tokens` field when creating your requests. 114 | 115 | __Embedding__ 116 | 117 | ```d name=embedding 118 | import std; 119 | import openai; 120 | 121 | // Load API key from environment variable 122 | auto client = new OpenAIClient(); 123 | 124 | // POST /embeddings 125 | const request = embeddingRequest("text-embedding-ada-002", "Hello, D Programming Language!"); 126 | auto response = client.embedding(request); 127 | 128 | float[] embedding = response.data[0].embedding; 129 | writeln(embedding.length); // text-embedding-ada-002 -> 1536 130 | ``` 131 | 132 | __Moderation__ 133 | 134 | ```d name=moderation 135 | import std; 136 | import openai; 137 | 138 | // Load API key from environment variable 139 | auto client = new OpenAIClient(); 140 | 141 | // POST /moderations 142 | const request = moderationRequest("D is a general-purpose programming language with static typing, systems-level access, and C-like syntax. With the D Programming Language, write fast, read fast, and run fast."); 143 | auto response = client.moderation(request); 144 | 145 | if (response.results[0].flagged) 146 | writeln("Warning!"); 147 | else 148 | writeln("Probably safe."); 149 | ``` 150 | 151 | __Transcription__ 152 | 153 | ```d name=transcription 154 | import std; 155 | import openai; 156 | 157 | // Load API key from environment variable 158 | auto client = new OpenAIClient(); 159 | 160 | // POST /audio/transcriptions 161 | auto request = transcriptionRequest("audio.mp3", "whisper-1"); 162 | auto response = client.transcription(request); 163 | 164 | writeln(response.text); 165 | ``` 166 | 167 | See `examples/audio_transcription` for a complete example. 168 | 169 | __Translation__ 170 | 171 | ```d name=translation 172 | import std; 173 | import openai; 174 | 175 | // Load API key from environment variable 176 | auto client = new OpenAIClient(); 177 | 178 | // POST /audio/translations 179 | auto request = translationRequest("audio.mp3", "whisper-1"); 180 | auto response = client.translation(request); 181 | 182 | writeln(response.text); 183 | ``` 184 | 185 | See `examples/audio_translation` for a complete example. 186 | 187 | ## OpenAIClientConfig 188 | 189 | __Environment variables__ 190 | 191 | ```d name=config_env 192 | import std.process; 193 | import openai; 194 | 195 | environment["OPENAI_API_KEY"] = ""; 196 | environment["OPENAI_ORGANIZATION"] = ""; 197 | environment["OPENAI_API_BASE"] = "https://example.api.cognitive.microsoft.com"; // optional 198 | environment["OPENAI_DEPLOYMENT_ID"] = ""; // Azure only 199 | environment["OPENAI_API_VERSION"] = "2024-10-21"; // Azure only 200 | 201 | auto config = OpenAIClientConfig.fromEnvironment(); 202 | 203 | assert(config.apiKey == ""); 204 | assert(config.organization == ""); 205 | assert(config.apiBase == "https://example.api.cognitive.microsoft.com"); 206 | ``` 207 | 208 | __Configuration file__ 209 | 210 | ```d name=config_file 211 | import std.file; 212 | import openai; 213 | 214 | write("config.json", `{"apiKey": "", "organization": null}`); 215 | scope (exit) remove("config.json"); 216 | 217 | auto config = OpenAIClientConfig.fromFile("config.json"); 218 | 219 | assert(config.apiKey == ""); 220 | assert(config.organization is null); 221 | ``` 222 | -------------------------------------------------------------------------------- /dscanner.ini: -------------------------------------------------------------------------------- 1 | ; Configure which static analysis checks are enabled 2 | [analysis.config.StaticAnalysisConfig] 3 | ; Check variable, class, struct, interface, union, and function names against t 4 | ; he Phobos style guide 5 | style_check="enabled" 6 | ; Check for array literals that cause unnecessary allocation 7 | enum_array_literal_check="enabled" 8 | ; Check for poor exception handling practices 9 | exception_check="enabled" 10 | ; Check for use of the deprecated 'delete' keyword 11 | delete_check="enabled" 12 | ; Check for use of the deprecated floating point operators 13 | float_operator_check="enabled" 14 | ; Check number literals for readability 15 | number_style_check="enabled" 16 | ; Checks that opEquals, opCmp, toHash, and toString are either const, immutable 17 | ; , or inout. 18 | object_const_check="enabled" 19 | ; Checks for .. expressions where the left side is larger than the right. 20 | backwards_range_check="enabled" 21 | ; Checks for if statements whose 'then' block is the same as the 'else' block 22 | if_else_same_check="enabled" 23 | ; Checks for some problems with constructors 24 | constructor_check="enabled" 25 | ; Checks for unused variables 26 | unused_variable_check="enabled" 27 | ; Checks for unused labels 28 | unused_label_check="enabled" 29 | ; Checks for unused function parameters 30 | unused_parameter_check="enabled" 31 | ; Checks for duplicate attributes 32 | duplicate_attribute="enabled" 33 | ; Checks that opEquals and toHash are both defined or neither are defined 34 | opequals_tohash_check="enabled" 35 | ; Checks for subtraction from .length properties 36 | length_subtraction_check="enabled" 37 | ; Checks for methods or properties whose names conflict with built-in propertie 38 | ; s 39 | builtin_property_names_check="enabled" 40 | ; Checks for confusing code in inline asm statements 41 | asm_style_check="enabled" 42 | ; Checks for confusing logical operator precedence 43 | logical_precedence_check="enabled" 44 | ; Checks for undocumented public declarations 45 | undocumented_declaration_check="disabled" 46 | ; Checks for poor placement of function attributes 47 | function_attribute_check="enabled" 48 | ; Checks for use of the comma operator 49 | comma_expression_check="enabled" 50 | ; Checks for local imports that are too broad. Only accurate when checking code 51 | ; used with D versions older than 2.071.0 52 | local_import_check="disabled" 53 | ; Checks for variables that could be declared immutable 54 | could_be_immutable_check="disabled" 55 | ; Checks for redundant expressions in if statements 56 | redundant_if_check="enabled" 57 | ; Checks for redundant parenthesis 58 | redundant_parens_check="enabled" 59 | ; Checks for mismatched argument and parameter names 60 | mismatched_args_check="enabled" 61 | ; Checks for labels with the same name as variables 62 | label_var_same_name_check="enabled" 63 | ; Checks for lines longer than `max_line_length` characters 64 | long_line_check="disabled" 65 | ; The maximum line length used in `long_line_check`. 66 | max_line_length="120" 67 | ; Checks for assignment to auto-ref function parameters 68 | auto_ref_assignment_check="enabled" 69 | ; Checks for incorrect infinite range definitions 70 | incorrect_infinite_range_check="enabled" 71 | ; Checks for asserts that are always true 72 | useless_assert_check="enabled" 73 | ; Check for uses of the old-style alias syntax 74 | alias_syntax_check="enabled" 75 | ; Checks for else if that should be else static if 76 | static_if_else_check="enabled" 77 | ; Check for unclear lambda syntax 78 | lambda_return_check="enabled" 79 | ; Check for auto function without return statement 80 | auto_function_check="disabled" 81 | ; Check that if|else|for|foreach|while|do|try|catch are always followed by a Bl 82 | ; ockStatement { } 83 | always_curly_check="disabled" 84 | ; Check for sortedness of imports 85 | imports_sortedness="disabled" 86 | ; Check for explicitly annotated unittests 87 | explicitly_annotated_unittests="disabled" 88 | ; Check for properly documented public functions (Returns, Params) 89 | properly_documented_public_functions="disabled" 90 | ; Check for useless usage of the final attribute 91 | final_attribute_check="enabled" 92 | ; Check for virtual calls in the class constructors 93 | vcall_in_ctor="enabled" 94 | ; Check for useless user defined initializers 95 | useless_initializer="disabled" 96 | ; Check allman brace style 97 | allman_braces_check="disabled" 98 | ; Check for redundant attributes 99 | redundant_attributes_check="enabled" 100 | ; Check public declarations without a documented unittest 101 | has_public_example="disabled" 102 | ; Check for asserts without an explanatory message 103 | assert_without_msg="disabled" 104 | ; Check indent of if constraints 105 | if_constraints_indent="disabled" 106 | ; Check for @trusted applied to a bigger scope than a single function 107 | trust_too_much="enabled" 108 | ; Check for redundant storage classes on variable declarations 109 | redundant_storage_classes="enabled" 110 | ; Check for unused function return values 111 | unused_result="enabled" 112 | ; Enable cyclomatic complexity check 113 | cyclomatic_complexity="disabled" 114 | ; Maximum cyclomatic complexity after which to issue warnings 115 | max_cyclomatic_complexity="50" 116 | ; Check for function bodies on disabled functions 117 | body_on_disabled_func_check="enabled" 118 | -------------------------------------------------------------------------------- /dub.json: -------------------------------------------------------------------------------- 1 | { 2 | "authors": [ 3 | "lempiji" 4 | ], 5 | "dependencies": { 6 | "mir-ion": "~>2.3.3" 7 | }, 8 | "description": "OpenAI Client APIs", 9 | "license": "MIT", 10 | "name": "openai-d" 11 | } 12 | -------------------------------------------------------------------------------- /examples/audio_speech/config.json: -------------------------------------------------------------------------------- 1 | {"apiKey":"","organization":""} 2 | -------------------------------------------------------------------------------- /examples/audio_speech/dub.sdl: -------------------------------------------------------------------------------- 1 | name "audio_speech" 2 | description "A minimal D application." 3 | authors "lempiji" 4 | license "MIT" 5 | 6 | dependency "openai-d" path="../.." 7 | -------------------------------------------------------------------------------- /examples/audio_speech/dub.selections.json: -------------------------------------------------------------------------------- 1 | { 2 | "fileVersion": 1, 3 | "versions": { 4 | "mir-algorithm": "3.22.3", 5 | "mir-core": "1.7.1", 6 | "mir-cpuid": "1.2.11", 7 | "mir-ion": "2.3.3", 8 | "openai-d": {"path":"../.."}, 9 | "silly": "1.1.1" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /examples/audio_speech/source/app.d: -------------------------------------------------------------------------------- 1 | import std.stdio; 2 | import std.file : write; 3 | 4 | import openai; 5 | 6 | void main() 7 | { 8 | string text = "Hello! こんにちは。お元気ですか?"; 9 | auto client = new OpenAIClient; 10 | 11 | auto request = speechRequest(openai.GPT4OMiniTTS, text, VoiceAlloy); 12 | auto data = client.speech(request); 13 | write("speech.mp3", data); 14 | writeln("saved speech.mp3: ", data.length, " bytes"); 15 | } 16 | -------------------------------------------------------------------------------- /examples/audio_transcription/dub.sdl: -------------------------------------------------------------------------------- 1 | name "audio_transcription" 2 | description "A minimal D application." 3 | authors "lempiji" 4 | license "MIT" 5 | 6 | dependency "openai-d" path="../.." 7 | -------------------------------------------------------------------------------- /examples/audio_transcription/dub.selections.json: -------------------------------------------------------------------------------- 1 | { 2 | "fileVersion": 1, 3 | "versions": { 4 | "mir-algorithm": "3.22.3", 5 | "mir-core": "1.7.1", 6 | "mir-cpuid": "1.2.11", 7 | "mir-ion": "2.3.3", 8 | "openai-d": {"path":"../.."}, 9 | "silly": "1.1.1" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /examples/audio_transcription/source/app.d: -------------------------------------------------------------------------------- 1 | import std.stdio; 2 | 3 | import openai; 4 | 5 | void main(string[] args) 6 | { 7 | string file = args.length > 1 ? args[1] : "audio.mp3"; 8 | auto client = new OpenAIClient; 9 | 10 | auto request = transcriptionRequest(file, "whisper-1"); 11 | auto response = client.transcription(request); 12 | writeln(response.text); 13 | } 14 | -------------------------------------------------------------------------------- /examples/audio_translation/dub.sdl: -------------------------------------------------------------------------------- 1 | name "audio_translation" 2 | description "A minimal D application." 3 | authors "lempiji" 4 | license "MIT" 5 | 6 | dependency "openai-d" path="../.." 7 | -------------------------------------------------------------------------------- /examples/audio_translation/dub.selections.json: -------------------------------------------------------------------------------- 1 | { 2 | "fileVersion": 1, 3 | "versions": { 4 | "mir-algorithm": "3.22.3", 5 | "mir-core": "1.7.1", 6 | "mir-cpuid": "1.2.11", 7 | "mir-ion": "2.3.3", 8 | "openai-d": {"path":"../.."}, 9 | "silly": "1.1.1" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /examples/audio_translation/source/app.d: -------------------------------------------------------------------------------- 1 | import std.stdio; 2 | import openai; 3 | 4 | void main(string[] args) 5 | { 6 | string file = args.length > 1 ? args[1] : "audio.mp3"; 7 | auto client = new OpenAIClient; 8 | auto request = translationRequest(file, "whisper-1"); 9 | auto res = client.translation(request); 10 | writeln(res.text); 11 | } 12 | -------------------------------------------------------------------------------- /examples/chat/.gitignore: -------------------------------------------------------------------------------- 1 | .dub 2 | docs.json 3 | __dummy.html 4 | docs/ 5 | /chat 6 | chat.so 7 | chat.dylib 8 | chat.dll 9 | chat.a 10 | chat.lib 11 | chat-test-* 12 | *.exe 13 | *.pdb 14 | *.o 15 | *.obj 16 | *.lst 17 | -------------------------------------------------------------------------------- /examples/chat/config.json: -------------------------------------------------------------------------------- 1 | {"apiKey":"","organization":""} -------------------------------------------------------------------------------- /examples/chat/dub.sdl: -------------------------------------------------------------------------------- 1 | name "chat" 2 | description "A minimal D application." 3 | authors "lempiji" 4 | license "MIT" 5 | 6 | dependency "openai-d" path="../.." -------------------------------------------------------------------------------- /examples/chat/dub.selections.json: -------------------------------------------------------------------------------- 1 | { 2 | "fileVersion": 1, 3 | "versions": { 4 | "mir-algorithm": "3.22.3", 5 | "mir-core": "1.7.1", 6 | "mir-cpuid": "1.2.11", 7 | "mir-ion": "2.3.3", 8 | "openai-d": {"path":"../.."}, 9 | "silly": "1.1.1" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /examples/chat/source/app.d: -------------------------------------------------------------------------------- 1 | import std.stdio; 2 | 3 | import openai; 4 | 5 | void main() 6 | { 7 | // If the argument Config is omitted, it is read from an environment variable 'OPENAI_API_KEY' 8 | // auto config = OpenAIClientConfig.fromFile("config.json"); 9 | // auto client = new OpenAIClient(config); 10 | auto client = new OpenAIClient; 11 | 12 | const request = chatCompletionRequest(openai.GPT4OMini, [ 13 | systemChatMessage("You are a helpful assistant."), 14 | userChatMessage("Hello!") 15 | ], 16, 0); 16 | 17 | auto response = client.chatCompletion(request); 18 | assert(response.choices.length > 0); 19 | writefln!"role: %s\ncontent: %s"(response.choices[0].message.tupleof[0 .. 2]); 20 | } 21 | -------------------------------------------------------------------------------- /examples/chat_db/.gitignore: -------------------------------------------------------------------------------- 1 | .dub 2 | docs.json 3 | __dummy.html 4 | docs/ 5 | /chat 6 | chat.so 7 | chat.dylib 8 | chat.dll 9 | chat.a 10 | chat.lib 11 | chat-test-* 12 | *.exe 13 | *.pdb 14 | *.o 15 | *.obj 16 | *.lst 17 | -------------------------------------------------------------------------------- /examples/chat_db/config.json: -------------------------------------------------------------------------------- 1 | {"apiKey":"","organization":""} -------------------------------------------------------------------------------- /examples/chat_db/dub.sdl: -------------------------------------------------------------------------------- 1 | name "chat_db" 2 | description "A minimal D application." 3 | authors "lempiji" 4 | license "MIT" 5 | 6 | dependency "openai-d" path="../.." 7 | dependency "d2sqlite3" version="~>1.0.0" 8 | 9 | subConfiguration "d2sqlite3" "all-included" -------------------------------------------------------------------------------- /examples/chat_db/dub.selections.json: -------------------------------------------------------------------------------- 1 | { 2 | "fileVersion": 1, 3 | "versions": { 4 | "d2sqlite3": "1.0.0", 5 | "mir-algorithm": "3.22.3", 6 | "mir-core": "1.7.1", 7 | "mir-cpuid": "1.2.11", 8 | "mir-ion": "2.3.3", 9 | "openai-d": {"path":"../.."}, 10 | "silly": "1.1.1" 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /examples/chat_db/source/app.d: -------------------------------------------------------------------------------- 1 | module app; 2 | 3 | import std.datetime; 4 | import std.stdio; 5 | import std.typecons; 6 | 7 | import openai; 8 | import d2sqlite3; 9 | 10 | void main() 11 | { 12 | // If the argument Config is omitted, it is read from an environment variable 'OPENAI_API_KEY' 13 | // auto config = OpenAIClientConfig.fromFile("config.json"); 14 | // auto client = new OpenAIClient(config); 15 | auto client = new OpenAIClient; 16 | 17 | auto db = Database(":memory:"); 18 | 19 | scope (exit) 20 | { 21 | auto tableNames = execute_query(db, `SELECT name FROM sqlite_master WHERE type='table';`); 22 | writeln(tableNames); 23 | 24 | import std.format; 25 | 26 | import mir.algebraic; 27 | import mir.string_map; 28 | 29 | auto name = tableNames[0].get!(StringMap!JsonValue)()["name"]; 30 | writeln("name: ", name); 31 | auto dataset = execute_query(db, format!"SELECT * FROM %s"(name)); 32 | writeln(dataset); 33 | } 34 | 35 | // make dummy data 36 | auto request = chatCompletionRequest("gpt-4o-mini", [ 37 | systemChatMessage("You are a helpful SQL assistant."), 38 | userChatMessage( 39 | "Create a sample database table using sqlite3 with dummy product data for a car dealership in Japan.") 40 | ], 400, 1); 41 | 42 | // define tools 43 | // dfmt off 44 | request.tools = [ 45 | ChatCompletionTool( 46 | "function", 47 | ChatCompletionFunction( 48 | "execute_query", 49 | "Executes a given SQL query using sqlite3. Supports various query types such as CREATE TABLE, SELECT, DELETE, etc. On successful execution of a SELECT query, it returns the fetched records.", 50 | JsonSchema.object_([ 51 | "query": JsonSchema.string_("statement"), 52 | ], ["query"]) 53 | ) 54 | ), 55 | ]; 56 | // dfmt on 57 | request.toolChoice = "auto"; 58 | 59 | foreach (completionCount; 0 .. 5) // max completions 60 | { 61 | writeln("start: ", completionCount + 1); 62 | scope (exit) 63 | writeln("end"); 64 | 65 | auto response = client.chatCompletion(request); 66 | assert(response.choices.length > 0); 67 | 68 | ChatMessage responseMessage = response.choices[0].message; 69 | 70 | import mir.algebraic; 71 | 72 | if (responseMessage.toolCalls.length == 0) 73 | { 74 | writeln("Answer: ", responseMessage.content); 75 | break; 76 | } 77 | 78 | request.messages ~= responseMessage; 79 | 80 | import mir.deser.json; 81 | import mir.ser.json; 82 | 83 | foreach (toolCall; responseMessage.toolCalls) 84 | { 85 | switch (toolCall.function_.name) 86 | { 87 | case "execute_query": 88 | auto params = deserializeJson!ExecuteQueryParams(toolCall.function_.arguments); 89 | writefln!"query: '%s'"(params.query); 90 | auto queryResult = execute_query(db, params.query); 91 | request.messages ~= toolChatMessage(toolCall.function_.name, serializeJson(queryResult), toolCall.id); 92 | break; 93 | default: 94 | writefln!"Error: function(%s) not found"(toolCall.function_.name); 95 | break; 96 | } 97 | } 98 | } 99 | } 100 | 101 | // functions 102 | 103 | struct ExecuteQueryParams 104 | { 105 | string query; 106 | } 107 | 108 | JsonValue[] execute_query(scope Database db, string query) 109 | { 110 | JsonValue[] records; 111 | db.run(query, (ResultRange results) { 112 | foreach (result; results) 113 | { 114 | JsonValue[string] record; 115 | foreach (i; 0 .. result.length) 116 | { 117 | final switch (result.columnType(i)) 118 | { 119 | case SqliteType.INTEGER: 120 | record[result.columnName(i)] = JsonValue(result.peek!long(i)); 121 | break; 122 | case SqliteType.FLOAT: 123 | record[result.columnName(i)] = JsonValue(result.peek!double(i)); 124 | break; 125 | 126 | case SqliteType.TEXT: 127 | record[result.columnName(i)] = JsonValue(result.peek!string(i)); 128 | break; 129 | 130 | case SqliteType.BLOB: 131 | record[result.columnName(i)] = JsonValue(""); 132 | //record[result.columnName(i)] = result.peek!(Blob, PeekMode.copy)(index); 133 | break; 134 | 135 | case SqliteType.NULL: 136 | record[result.columnName(i)] = JsonValue(null); 137 | break; 138 | } 139 | } 140 | records ~= JsonValue(record); 141 | } 142 | return true; 143 | }); 144 | return records; 145 | } 146 | -------------------------------------------------------------------------------- /examples/chat_tools/dub.sdl: -------------------------------------------------------------------------------- 1 | name "chat_tools" 2 | description "A minimal D application." 3 | authors "lempiji" 4 | license "MIT" 5 | 6 | dependency "openai-d" path="../.." -------------------------------------------------------------------------------- /examples/chat_tools/dub.selections.json: -------------------------------------------------------------------------------- 1 | { 2 | "fileVersion": 1, 3 | "versions": { 4 | "mir-algorithm": "3.22.3", 5 | "mir-core": "1.7.1", 6 | "mir-cpuid": "1.2.11", 7 | "mir-ion": "2.3.3", 8 | "openai-d": {"path":"../.."}, 9 | "silly": "1.1.1" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /examples/chat_tools/source/app.d: -------------------------------------------------------------------------------- 1 | module app; 2 | 3 | import std.stdio; 4 | 5 | import openai; 6 | 7 | void main() 8 | { 9 | // If the argument Config is omitted, it is read from an environment variable 'OPENAI_API_KEY' 10 | // auto config = OpenAIClientConfig.fromFile("config.json"); 11 | // auto client = new OpenAIClient(config); 12 | auto client = new OpenAIClient; 13 | 14 | auto request = chatCompletionRequest("gpt-4o-mini", [ 15 | developerChatMessage("You are a helpful assistant."), 16 | userChatMessage("calc 3 + 5 - 2.5") 17 | ], 200, 1); 18 | 19 | // define tools 20 | // dfmt off 21 | request.tools = [ 22 | ChatCompletionTool("function", 23 | ChatCompletionFunction( 24 | "add", "add 2 numbers", 25 | JsonSchema.object_([ 26 | "a": JsonSchema.number_("lhs"), 27 | "b": JsonSchema.number_("rhs"), 28 | ], ["a", "b"], false), 29 | true 30 | ) 31 | ), 32 | ChatCompletionTool("function", 33 | ChatCompletionFunction( 34 | "sub", "subtract 2 numbers", 35 | JsonSchema.object_([ 36 | "a": JsonSchema.number_("lhs"), 37 | "b": JsonSchema.number_("rhs"), 38 | ], ["a", "b"], false), 39 | true 40 | ) 41 | ), 42 | ]; 43 | // dfmt on 44 | request.toolChoice = "required"; 45 | 46 | // setup tools 47 | import mir.ser.json; 48 | import mir.deser.json; 49 | 50 | alias ToolDelegate = string delegate(in string arguments); 51 | ToolDelegate[string] availableTools; 52 | availableTools["add"] = (in string arguments) { 53 | return serializeJson(add(deserializeJson!TwoOperands(arguments))); 54 | }; 55 | availableTools["sub"] = (in string arguments) { 56 | return serializeJson(sub(deserializeJson!TwoOperands(arguments))); 57 | }; 58 | 59 | foreach (completionCount; 0 .. 10) // max completions 60 | { 61 | auto response = client.chatCompletion(request); 62 | assert(response.choices.length > 0); 63 | 64 | ChatMessage responseMessage = response.choices[0].message; 65 | 66 | if (responseMessage.toolCalls.length > 0) 67 | { 68 | request.messages ~= responseMessage; 69 | 70 | int toolCallCount = 0; 71 | foreach (toolCall; responseMessage.toolCalls) 72 | { 73 | const toolFunctionName = toolCall.function_.name; 74 | if (toolFunctionName in availableTools) 75 | { 76 | writefln!"Tool call: %s(%s)"(toolFunctionName, toolCall.function_.arguments); 77 | 78 | toolCallCount++; 79 | auto toolFunction = availableTools[toolFunctionName]; 80 | auto resultContent = toolFunction(toolCall.function_.arguments); 81 | 82 | request.messages ~= toolChatMessage(toolFunctionName, resultContent, toolCall.id); 83 | } 84 | else 85 | { 86 | writefln!"Error: function(%s) not found"(toolFunctionName); 87 | break; 88 | } 89 | } 90 | if (toolCallCount > 0) 91 | { 92 | request.toolChoice = "auto"; 93 | response = client.chatCompletion(request); 94 | } 95 | } 96 | else 97 | { 98 | writefln!"Response: %s"(responseMessage.content); 99 | return; 100 | } 101 | } 102 | } 103 | 104 | // define functions 105 | struct TwoOperands 106 | { 107 | double a; 108 | double b; 109 | } 110 | 111 | double add(in TwoOperands args) 112 | { 113 | return args.a + args.b; 114 | } 115 | 116 | double sub(in TwoOperands args) 117 | { 118 | return args.a - args.b; 119 | } 120 | -------------------------------------------------------------------------------- /examples/chat_vision/.gitattributes: -------------------------------------------------------------------------------- 1 | *.d text 2 | *.sdl text 3 | *.json text 4 | -------------------------------------------------------------------------------- /examples/chat_vision/assets/cat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lempiji/openai-d/7cb7dd581ba24f05829f5e1b7f3ccbb1a612acc8/examples/chat_vision/assets/cat.png -------------------------------------------------------------------------------- /examples/chat_vision/dub.sdl: -------------------------------------------------------------------------------- 1 | name "chat_vision" 2 | description "A minimal D application." 3 | authors "lempiji" 4 | license "MIT" 5 | 6 | dependency "openai-d" path="../.." 7 | -------------------------------------------------------------------------------- /examples/chat_vision/dub.selections.json: -------------------------------------------------------------------------------- 1 | { 2 | "fileVersion": 1, 3 | "versions": { 4 | "mir-algorithm": "3.22.3", 5 | "mir-core": "1.7.1", 6 | "mir-cpuid": "1.2.11", 7 | "mir-ion": "2.3.3", 8 | "openai-d": {"path":"../.."}, 9 | "silly": "1.1.1" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /examples/chat_vision/source/app.d: -------------------------------------------------------------------------------- 1 | import std.stdio; 2 | import std.file : read; 3 | import std.base64; 4 | import std.conv : to; 5 | 6 | import openai; 7 | 8 | void main() 9 | { 10 | // If the argument Config is omitted, it is read from an environment variable 'OPENAI_API_KEY' 11 | auto client = new OpenAIClient; 12 | 13 | // Load local image and convert to data URL 14 | auto bytes = cast(ubyte[]) read("assets/cat.png"); 15 | string dataUri = to!string("data:image/png;base64," ~ Base64.encode(bytes)); 16 | 17 | auto request = chatCompletionRequest(openai.GPT4OMini /* gpt-4o */ , [ 18 | userChatMessageWithImages("この画像の内容を詳しく説明してください。", [ 19 | dataUri 20 | ]) 21 | ], 1024, 0); 22 | 23 | auto response = client.chatCompletion(request); 24 | assert(response.choices.length > 0); 25 | 26 | writeln(response.choices[0].message.content); 27 | } 28 | -------------------------------------------------------------------------------- /examples/completion/.gitignore: -------------------------------------------------------------------------------- 1 | .dub 2 | docs.json 3 | __dummy.html 4 | docs/ 5 | /completion 6 | completion.so 7 | completion.dylib 8 | completion.dll 9 | completion.a 10 | completion.lib 11 | completion-test-* 12 | *.exe 13 | *.pdb 14 | *.o 15 | *.obj 16 | *.lst 17 | -------------------------------------------------------------------------------- /examples/completion/config.json: -------------------------------------------------------------------------------- 1 | {"apiKey":"","organization":""} -------------------------------------------------------------------------------- /examples/completion/dub.sdl: -------------------------------------------------------------------------------- 1 | name "completion" 2 | description "A minimal D application." 3 | authors "lempiji" 4 | license "MIT" 5 | 6 | dependency "openai-d" path="../.." -------------------------------------------------------------------------------- /examples/completion/dub.selections.json: -------------------------------------------------------------------------------- 1 | { 2 | "fileVersion": 1, 3 | "versions": { 4 | "mir-algorithm": "3.22.3", 5 | "mir-core": "1.7.1", 6 | "mir-cpuid": "1.2.11", 7 | "mir-ion": "2.3.3", 8 | "openai-d": {"path":"../.."}, 9 | "silly": "1.1.1" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /examples/completion/source/app.d: -------------------------------------------------------------------------------- 1 | import std.stdio; 2 | import std.string; 3 | 4 | import openai; 5 | 6 | void main() 7 | { 8 | // If the argument Config is omitted, it is read from an environment variable 'OPENAI_API_KEY' 9 | // auto config = OpenAIClientConfig.fromFile("config.json"); 10 | // auto client = new OpenAIClient(config); 11 | auto client = new OpenAIClient; 12 | 13 | // POST /completions 14 | auto message = completionRequest(openai.GPT3Dot5TurboInstruct, "This is a", 16, 0); 15 | message.stop = [".", "\n"]; 16 | 17 | auto response = client.completion(message); 18 | assert(response.choices.length > 0); 19 | 20 | writeln(response.choices[0].text.chomp()); 21 | } 22 | -------------------------------------------------------------------------------- /examples/embedding/.gitignore: -------------------------------------------------------------------------------- 1 | .dub 2 | docs.json 3 | __dummy.html 4 | docs/ 5 | /embedding 6 | embedding.so 7 | embedding.dylib 8 | embedding.dll 9 | embedding.a 10 | embedding.lib 11 | embedding-test-* 12 | *.exe 13 | *.pdb 14 | *.o 15 | *.obj 16 | *.lst 17 | -------------------------------------------------------------------------------- /examples/embedding/config.json: -------------------------------------------------------------------------------- 1 | {"apiKey":"","organization":""} -------------------------------------------------------------------------------- /examples/embedding/dub.sdl: -------------------------------------------------------------------------------- 1 | name "embedding" 2 | description "A minimal D application." 3 | authors "lempiji" 4 | license "MIT" 5 | 6 | dependency "openai-d" path="../.." -------------------------------------------------------------------------------- /examples/embedding/dub.selections.json: -------------------------------------------------------------------------------- 1 | { 2 | "fileVersion": 1, 3 | "versions": { 4 | "mir-algorithm": "3.22.3", 5 | "mir-core": "1.7.1", 6 | "mir-cpuid": "1.2.11", 7 | "mir-ion": "2.3.3", 8 | "openai-d": {"path":"../.."}, 9 | "silly": "1.1.1" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /examples/embedding/source/app.d: -------------------------------------------------------------------------------- 1 | import std.stdio; 2 | 3 | import openai; 4 | 5 | void main() 6 | { 7 | // If the argument Config is omitted, it is read from an environment variable 'OPENAI_API_KEY' 8 | // auto config = OpenAIClientConfig.fromFile("config.json"); 9 | // auto client = new OpenAIClient(config); 10 | auto client = new OpenAIClient; 11 | 12 | auto request = embeddingRequest(openai.AdaEmbeddingV2, "Hello, world!"); 13 | auto response = client.embedding(request); 14 | float[] embedding = response.data[0].embedding; 15 | writeln(embedding.length); // text-embedding-ada-002 -> 1536 16 | 17 | auto request2 = embeddingRequest(openai.TextEmbedding3Large, "D is a general-purpose programming language with static typing, systems-level access, and C-like syntax. With the D Programming Language, write fast, read fast, and run fast.", 512); 18 | auto response2 = client.embedding(request2); 19 | float[] embedding2 = response2.data[0].embedding; 20 | writeln(embedding2.length); // 512 21 | 22 | auto request3 = embeddingRequest(openai.TextEmbedding3Small, "D is a general-purpose programming language with static typing, systems-level access, and C-like syntax. With the D Programming Language, write fast, read fast, and run fast.", 512); 23 | auto response3 = client.embedding(request3); 24 | float[] embedding3 = response3.data[0].embedding; 25 | writeln(embedding3.length); // 512 26 | 27 | import std.numeric : cosineSimilarity; 28 | 29 | const similarity = cosineSimilarity(embedding2, embedding3); 30 | writeln(similarity); 31 | } 32 | -------------------------------------------------------------------------------- /examples/models/.gitignore: -------------------------------------------------------------------------------- 1 | .dub 2 | docs.json 3 | __dummy.html 4 | docs/ 5 | /chat 6 | chat.so 7 | chat.dylib 8 | chat.dll 9 | chat.a 10 | chat.lib 11 | chat-test-* 12 | *.exe 13 | *.pdb 14 | *.o 15 | *.obj 16 | *.lst 17 | -------------------------------------------------------------------------------- /examples/models/config.json: -------------------------------------------------------------------------------- 1 | {"apiKey":"","organization":""} -------------------------------------------------------------------------------- /examples/models/dub.sdl: -------------------------------------------------------------------------------- 1 | name "models" 2 | description "A minimal D application." 3 | authors "lempiji" 4 | license "MIT" 5 | 6 | dependency "openai-d" path="../.." -------------------------------------------------------------------------------- /examples/models/dub.selections.json: -------------------------------------------------------------------------------- 1 | { 2 | "fileVersion": 1, 3 | "versions": { 4 | "mir-algorithm": "3.22.3", 5 | "mir-core": "1.7.1", 6 | "mir-cpuid": "1.2.11", 7 | "mir-ion": "2.3.3", 8 | "openai-d": {"path":"../.."}, 9 | "silly": "1.1.1" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /examples/models/source/app.d: -------------------------------------------------------------------------------- 1 | module app; 2 | 3 | import std.stdio; 4 | import std.algorithm; 5 | import std.array; 6 | 7 | import openai; 8 | 9 | void main() 10 | { 11 | // If the argument Config is omitted, it is read from an environment variable 'OPENAI_API_KEY' 12 | // auto config = OpenAIClientConfig.fromFile("config.json"); 13 | // auto client = new OpenAIClient(config); 14 | auto client = new OpenAIClient; 15 | 16 | auto models = client.listModels(); 17 | auto modelIds = models.data 18 | .map!"a.id" 19 | .filter!(a => a.canFind("ada")) 20 | .array(); 21 | sort(modelIds); 22 | modelIds.each!writeln(); 23 | } 24 | -------------------------------------------------------------------------------- /examples/moderation/.gitignore: -------------------------------------------------------------------------------- 1 | .dub 2 | docs.json 3 | __dummy.html 4 | docs/ 5 | /completion 6 | completion.so 7 | completion.dylib 8 | completion.dll 9 | completion.a 10 | completion.lib 11 | completion-test-* 12 | *.exe 13 | *.pdb 14 | *.o 15 | *.obj 16 | *.lst 17 | -------------------------------------------------------------------------------- /examples/moderation/config.json: -------------------------------------------------------------------------------- 1 | {"apiKey":"","organization":""} -------------------------------------------------------------------------------- /examples/moderation/dub.sdl: -------------------------------------------------------------------------------- 1 | name "moderation" 2 | description "A minimal D application." 3 | authors "lempiji" 4 | license "MIT" 5 | 6 | dependency "openai-d" path="../.." -------------------------------------------------------------------------------- /examples/moderation/dub.selections.json: -------------------------------------------------------------------------------- 1 | { 2 | "fileVersion": 1, 3 | "versions": { 4 | "mir-algorithm": "3.22.3", 5 | "mir-core": "1.7.1", 6 | "mir-cpuid": "1.2.11", 7 | "mir-ion": "2.3.3", 8 | "openai-d": {"path":"../.."}, 9 | "silly": "1.1.1" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /examples/moderation/source/app.d: -------------------------------------------------------------------------------- 1 | module app; 2 | 3 | import std.stdio; 4 | import std.string; 5 | import openai; 6 | 7 | void main() 8 | { 9 | // If the argument Config is omitted, it is read from an environment variable 'OPENAI_API_KEY' 10 | // auto config = OpenAIClientConfig.fromFile("config.json"); 11 | // auto client = new OpenAIClient(config); 12 | auto client = new OpenAIClient; 13 | 14 | // POST /moderations 15 | // Example input: https://platform.openai.com/docs/api-reference/moderations/create 16 | const request = moderationRequest("I want to kill them."); 17 | auto response = client.moderation(request); 18 | 19 | writeln(response.results.length); 20 | writeln(response.results[0]); 21 | 22 | if (response.results[0].flagged) 23 | writeln("Warning!"); 24 | else 25 | writeln("Probably safe."); 26 | } 27 | -------------------------------------------------------------------------------- /examples/reasoning/config.json: -------------------------------------------------------------------------------- 1 | {"apiKey":"","organization":""} 2 | -------------------------------------------------------------------------------- /examples/reasoning/dub.sdl: -------------------------------------------------------------------------------- 1 | name "reasoning" 2 | description "Simple reasoning example." 3 | authors "lempiji" 4 | license "MIT" 5 | 6 | dependency "openai-d" path="../.." 7 | -------------------------------------------------------------------------------- /examples/reasoning/dub.selections.json: -------------------------------------------------------------------------------- 1 | { 2 | "fileVersion": 1, 3 | "versions": { 4 | "mir-algorithm": "3.22.3", 5 | "mir-core": "1.7.1", 6 | "mir-cpuid": "1.2.11", 7 | "mir-ion": "2.3.3", 8 | "openai-d": {"path":"../.."}, 9 | "silly": "1.1.1" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /examples/reasoning/source/app.d: -------------------------------------------------------------------------------- 1 | import std.stdio; 2 | import openai; 3 | 4 | void main() 5 | { 6 | auto client = new OpenAIClient; 7 | auto request = chatCompletionRequest( 8 | openai.O4Mini, // or openai.O3 9 | [ 10 | systemChatMessage("You are a reasoning assistant."), 11 | userChatMessage( 12 | "A snail is at the bottom of a 20\xE2\x80\x91meter well. " ~ 13 | "It climbs 5 meters each day and slips back 3 each night. " ~ 14 | "On which day does it escape? Explain step by step." 15 | ) 16 | ], 17 | 1000, 1); // sets maxCompletionTokens and temperature 18 | request.reasoningEffort = ReasoningEffortLow; 19 | 20 | auto response = client.chatCompletion(request); 21 | writeln(response.choices[0].message.content); 22 | } 23 | -------------------------------------------------------------------------------- /examples/structured_output/.gitignore: -------------------------------------------------------------------------------- 1 | .dub 2 | docs.json 3 | __dummy.html 4 | docs/ 5 | /structured_output 6 | structured_output.so 7 | structured_output.dylib 8 | structured_output.dll 9 | structured_output.a 10 | structured_output.lib 11 | structured_output-test-* 12 | *.exe 13 | *.pdb 14 | *.o 15 | *.obj 16 | *.lst 17 | -------------------------------------------------------------------------------- /examples/structured_output/config.json: -------------------------------------------------------------------------------- 1 | {"apiKey":"","organization":""} -------------------------------------------------------------------------------- /examples/structured_output/dub.sdl: -------------------------------------------------------------------------------- 1 | name "structured_output" 2 | description "A minimal D application." 3 | authors "lempiji" 4 | license "MIT" 5 | 6 | dependency "openai-d" path="../.." -------------------------------------------------------------------------------- /examples/structured_output/dub.selections.json: -------------------------------------------------------------------------------- 1 | { 2 | "fileVersion": 1, 3 | "versions": { 4 | "mir-algorithm": "3.22.3", 5 | "mir-core": "1.7.1", 6 | "mir-cpuid": "1.2.11", 7 | "mir-ion": "2.3.3", 8 | "openai-d": {"path":"../.."}, 9 | "silly": "1.1.1" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /examples/structured_output/source/app.d: -------------------------------------------------------------------------------- 1 | import std.stdio; 2 | 3 | import openai; 4 | import mir.serde; 5 | import mir.deser.json; 6 | import mir.algebraic; 7 | 8 | void main() 9 | { 10 | // If the argument Config is omitted, it is read from an environment variable 'OPENAI_API_KEY' 11 | // auto config = OpenAIClientConfig.fromFile("config.json"); 12 | // auto client = new OpenAIClient(config); 13 | auto client = new OpenAIClient; 14 | 15 | auto request = chatCompletionRequest(openai.GPT4OMini, [ 16 | developerChatMessage("You are a helpful math tutor. Guide the user through the solution step by step."), 17 | userChatMessage("how can I solve 8x + 7 = -23") 18 | ], 16, 0); 19 | 20 | request.responseFormat = jsonResponseFormat("mathResponse", parseJsonSchema!MathResponse); 21 | 22 | auto response = client.chatCompletion(request); 23 | assert(response.choices.length > 0); 24 | 25 | // dfmt off 26 | response.choices[0].message.content.match!( 27 | (string content) { 28 | writeln(content); 29 | 30 | auto mathResponse = deserializeJson!MathResponse(content); 31 | writeln(mathResponse); 32 | }, 33 | _ => writeln("Unexpected response type") 34 | ); 35 | // dfmt on 36 | } 37 | 38 | /+ 39 | class Step(BaseModel): 40 | explanation: str 41 | output: str 42 | 43 | class MathResponse(BaseModel): 44 | steps: list[Step] 45 | final_answer: str 46 | +/ 47 | 48 | struct Step 49 | { 50 | @serdeRequired 51 | string explanation; 52 | @serdeRequired 53 | string output; 54 | } 55 | 56 | struct MathResponse 57 | { 58 | @serdeRequired 59 | Step[] steps; 60 | @serdeRequired 61 | string final_answer; 62 | } 63 | -------------------------------------------------------------------------------- /source/openai/audio.d: -------------------------------------------------------------------------------- 1 | /** 2 | OpenAI API Audio 3 | 4 | Standards: https://platform.openai.com/docs/api-reference/audio 5 | */ 6 | module openai.audio; 7 | 8 | import mir.serde; 9 | 10 | @safe: 11 | 12 | // ----------------------------------------------------------------------------- 13 | // Enumerations 14 | // ----------------------------------------------------------------------------- 15 | 16 | /// Response format `json` used by transcription and translation endpoints. 17 | enum AudioResponseFormatJson = "json"; 18 | /// Response format `text`. 19 | enum AudioResponseFormatText = "text"; 20 | /// Response format `srt` for subtitle output. 21 | enum AudioResponseFormatSrt = "srt"; 22 | /// Response format `verbose_json` with word-level timestamps. 23 | enum AudioResponseFormatVerboseJson = "verbose_json"; 24 | /// Response format `vtt` for WebVTT subtitles. 25 | enum AudioResponseFormatVtt = "vtt"; 26 | 27 | /// Speech format `mp3`. 28 | enum SpeechFormatMp3 = "mp3"; 29 | /// Speech format `opus`. 30 | enum SpeechFormatOpus = "opus"; 31 | /// Speech format `aac`. 32 | enum SpeechFormatAac = "aac"; 33 | /// Speech format `flac`. 34 | enum SpeechFormatFlac = "flac"; 35 | /// Speech format `wav`. 36 | enum SpeechFormatWav = "wav"; 37 | /// Speech format `pcm`. 38 | enum SpeechFormatPcm = "pcm"; 39 | 40 | /// Text-to-speech voice `alloy`. 41 | enum VoiceAlloy = "alloy"; 42 | /// Voice `ash`. 43 | enum VoiceAsh = "ash"; 44 | /// Voice `ballad`. 45 | enum VoiceBallad = "ballad"; 46 | /// Voice `coral`. 47 | enum VoiceCoral = "coral"; 48 | /// Voice `echo`. 49 | enum VoiceEcho = "echo"; 50 | /// Voice `fable`. 51 | enum VoiceFable = "fable"; 52 | /// Voice `onyx`. 53 | enum VoiceOnyx = "onyx"; 54 | /// Voice `nova`. 55 | enum VoiceNova = "nova"; 56 | /// Voice `sage`. 57 | enum VoiceSage = "sage"; 58 | /// Voice `shimmer`. 59 | enum VoiceShimmer = "shimmer"; 60 | /// Voice `verse`. 61 | enum VoiceVerse = "verse"; 62 | 63 | /// Include token log probabilities in the response. 64 | enum TranscriptionIncludeLogprobs = "logprobs"; 65 | 66 | /// Timestamp granularity `word`. 67 | enum TranscriptionTimestampGranularityWord = "word"; 68 | /// Timestamp granularity `segment`. 69 | enum TranscriptionTimestampGranularitySegment = "segment"; 70 | 71 | // ----------------------------------------------------------------------------- 72 | // Requests 73 | // ----------------------------------------------------------------------------- 74 | 75 | /// Request for text-to-speech generation. 76 | struct SpeechRequest 77 | { 78 | /// Model to use. 79 | string model; 80 | 81 | /// Text input. 82 | string input; 83 | 84 | /// Voice id. 85 | string voice; 86 | 87 | /// Additional instructions. 88 | @serdeIgnoreDefault 89 | string instructions; 90 | 91 | /// Response format. 92 | @serdeIgnoreDefault 93 | @serdeKeys("response_format") 94 | string responseFormat = SpeechFormatMp3; 95 | 96 | /// Playback speed. 97 | @serdeIgnoreDefault 98 | double speed = 1; 99 | } 100 | 101 | /// Convenience constructor for `SpeechRequest`. 102 | SpeechRequest speechRequest(string model, string input, string voice) 103 | { 104 | auto request = SpeechRequest(); 105 | request.model = model; 106 | request.input = input; 107 | request.voice = voice; 108 | return request; 109 | } 110 | 111 | /// Request for audio transcription. 112 | struct TranscriptionRequest 113 | { 114 | /// Path to the audio file. 115 | string file; 116 | 117 | /// Model to use. 118 | string model; 119 | 120 | /// Language of the input audio. 121 | @serdeIgnoreDefault 122 | string language; 123 | 124 | /// Optional prompt. 125 | @serdeIgnoreDefault 126 | string prompt; 127 | 128 | /// Response format. 129 | @serdeIgnoreDefault 130 | @serdeKeys("response_format") 131 | string responseFormat = AudioResponseFormatJson; 132 | 133 | /// Sampling temperature. 134 | @serdeIgnoreDefault 135 | double temperature = 0; 136 | 137 | /// Extra items to include in the response. 138 | @serdeIgnoreDefault 139 | @serdeKeys("include") 140 | string[] include; 141 | 142 | /// Timestamp granularities to return. 143 | @serdeIgnoreDefault 144 | @serdeKeys("timestamp_granularities") 145 | string[] timestampGranularities = [TranscriptionTimestampGranularitySegment]; 146 | 147 | /// Stream the response using SSE. 148 | @serdeIgnoreDefault 149 | bool stream = false; 150 | } 151 | 152 | /// Convenience constructor for `TranscriptionRequest`. 153 | TranscriptionRequest transcriptionRequest(string file, string model) 154 | { 155 | auto request = TranscriptionRequest(); 156 | request.file = file; 157 | request.model = model; 158 | return request; 159 | } 160 | 161 | /// Request for audio translation. 162 | struct TranslationRequest 163 | { 164 | /// Path to the audio file. 165 | string file; 166 | 167 | /// Model to use. 168 | string model; 169 | 170 | /// Optional prompt. 171 | @serdeIgnoreDefault 172 | string prompt; 173 | 174 | /// Response format. 175 | @serdeIgnoreDefault 176 | @serdeKeys("response_format") 177 | string responseFormat = AudioResponseFormatJson; 178 | 179 | /// Sampling temperature. 180 | @serdeIgnoreDefault 181 | double temperature = 0; 182 | } 183 | 184 | /// Convenience constructor for `TranslationRequest`. 185 | TranslationRequest translationRequest(string file, string model) 186 | { 187 | auto request = TranslationRequest(); 188 | request.file = file; 189 | request.model = model; 190 | return request; 191 | } 192 | 193 | // ----------------------------------------------------------------------------- 194 | // Responses 195 | // ----------------------------------------------------------------------------- 196 | 197 | /// Basic transcription or translation response. 198 | struct AudioTextResponse 199 | { 200 | /// The generated text. 201 | string text; 202 | /// Optional token log probabilities. 203 | @serdeOptional 204 | TranscriptionLogProb[] logprobs; 205 | } 206 | 207 | /// Details about token log probabilities. 208 | struct TranscriptionLogProb 209 | { 210 | /// Transcribed token. 211 | string token; 212 | /// Log probability of the token. 213 | double logprob; 214 | /// UTF-8 bytes of the token. 215 | uint[] bytes; 216 | } 217 | 218 | /// Detailed word timestamps. 219 | struct TranscriptionWord 220 | { 221 | string word; 222 | double start; 223 | double end; 224 | } 225 | 226 | /// Detailed segment information. 227 | struct TranscriptionSegment 228 | { 229 | int id; 230 | int seek; 231 | double start; 232 | double end; 233 | string text; 234 | int[] tokens; 235 | double temperature; 236 | @serdeKeys("avg_logprob") 237 | double avgLogprob; 238 | @serdeKeys("compression_ratio") 239 | double compressionRatio; 240 | @serdeKeys("no_speech_prob") 241 | double noSpeechProb; 242 | } 243 | 244 | /// Verbose transcription response. 245 | struct TranscriptionVerboseResponse 246 | { 247 | string language; 248 | double duration; 249 | string text; 250 | TranscriptionWord[] words; 251 | TranscriptionSegment[] segments; 252 | } 253 | 254 | // ----------------------------------------------------------------------------- 255 | // Unit tests 256 | // ----------------------------------------------------------------------------- 257 | 258 | unittest 259 | { 260 | auto req = speechRequest("gpt-4o-mini-tts", "Hello", VoiceAlloy); 261 | import mir.ser.json : serializeJson; 262 | 263 | assert(serializeJson(req) == 264 | `{"model":"gpt-4o-mini-tts","input":"Hello","voice":"alloy"}`); 265 | } 266 | 267 | unittest 268 | { 269 | auto req = transcriptionRequest("audio.mp3", "whisper-1"); 270 | import mir.ser.json : serializeJson; 271 | 272 | assert(serializeJson(req) == 273 | `{"file":"audio.mp3","model":"whisper-1"}`); 274 | } 275 | 276 | unittest 277 | { 278 | auto req = translationRequest("audio.mp3", "whisper-1"); 279 | import mir.ser.json : serializeJson; 280 | 281 | assert(serializeJson(req) == 282 | `{"file":"audio.mp3","model":"whisper-1"}`); 283 | } 284 | 285 | unittest 286 | { 287 | auto req = TranscriptionRequest("audio.mp3", "whisper-1"); 288 | req.include = [TranscriptionIncludeLogprobs]; 289 | req.timestampGranularities = [ 290 | TranscriptionTimestampGranularityWord, 291 | TranscriptionTimestampGranularitySegment 292 | ]; 293 | import mir.ser.json : serializeJson; 294 | 295 | assert(serializeJson(req) == 296 | `{"file":"audio.mp3","model":"whisper-1","include":["logprobs"],"timestamp_granularities":["word","segment"]}`); 297 | } 298 | 299 | unittest 300 | { 301 | import mir.deser.json : deserializeJson; 302 | 303 | const json = `{"text":"hello"}`; 304 | auto res = deserializeJson!AudioTextResponse(json); 305 | assert(res.text == "hello"); 306 | } 307 | 308 | unittest 309 | { 310 | import mir.deser.json : deserializeJson; 311 | 312 | const json = `{"language":"english","duration":1.2,"text":"hello","words":[{"word":"hello","start":0.0,"end":0.5}],"segments":[{"id":0,"seek":0,"start":0.0,"end":0.5,"text":"hello","tokens":[1],"temperature":0.0,"avg_logprob":-0.1,"compression_ratio":1.0,"no_speech_prob":0.0}]}`; 313 | auto res = deserializeJson!TranscriptionVerboseResponse(json); 314 | assert(res.text == "hello"); 315 | assert(res.words.length == 1); 316 | assert(res.segments.length == 1); 317 | } 318 | -------------------------------------------------------------------------------- /source/openai/chat.d: -------------------------------------------------------------------------------- 1 | /** 2 | OpenAI API Chat Completions 3 | 4 | Standards: https://platform.openai.com/docs/api-reference/completions 5 | */ 6 | module openai.chat; 7 | 8 | import mir.algebraic; 9 | import mir.serde; 10 | import mir.string_map; 11 | import std.math; 12 | 13 | import openai.common; 14 | import openai.completion : CompletionUsage; 15 | 16 | @safe: 17 | 18 | @serdeIgnoreUnexpectedKeys 19 | struct ChatMessageToolCall 20 | { 21 | string id; 22 | 23 | string type = "function"; 24 | 25 | @serdeKeys("function") 26 | ChatMessageFunctionCall function_; 27 | } 28 | 29 | @serdeIgnoreUnexpectedKeys 30 | struct ChatMessageFunctionCall 31 | { 32 | /** 33 | * The name of the function to call. 34 | */ 35 | @serdeOptional 36 | @serdeIgnoreDefault 37 | string name; 38 | 39 | /** 40 | * The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. 41 | */ 42 | @serdeOptional 43 | @serdeIgnoreDefault 44 | string arguments; 45 | } 46 | 47 | /// 48 | struct ChatCompletionFunction 49 | { 50 | /** 51 | * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. 52 | */ 53 | string name; 54 | 55 | /** 56 | * The description of what the function does. 57 | */ 58 | string description; 59 | 60 | /** 61 | * The parameters the functions accepts, described as a JSON Schema object. 62 | */ 63 | JsonValue parameters; 64 | 65 | /** 66 | * The parameters the functions accepts, described as a JSON Schema object. 67 | */ 68 | @serdeOptional 69 | @serdeIgnoreDefault 70 | bool strict; 71 | } 72 | 73 | /// 74 | @serdeIgnoreUnexpectedKeys 75 | struct ChatCompletionTool 76 | { 77 | string type = "function"; 78 | 79 | @serdeKeys("function") 80 | ChatCompletionFunction function_; 81 | } 82 | 83 | /// 84 | @serdeIgnoreUnexpectedKeys 85 | struct ChatCompletionToolChoiceInfo 86 | { 87 | string type = "function"; 88 | 89 | @serdeKeys("function") 90 | ChatCompletionToolFunctionChoice function_; 91 | } 92 | 93 | /// 94 | @serdeIgnoreUnexpectedKeys 95 | struct ChatCompletionToolFunctionChoice 96 | { 97 | string name; 98 | } 99 | 100 | /// 101 | alias ChatCompletionToolChoice = Algebraic!(string, ChatCompletionToolChoiceInfo); 102 | 103 | /// 104 | @serdeIgnoreUnexpectedKeys 105 | struct ChatUserMessageTextContent 106 | { 107 | string type = "text"; 108 | string text; 109 | } 110 | 111 | /// 112 | @serdeIgnoreUnexpectedKeys 113 | struct ChatUserMessageImageUrl 114 | { 115 | string url; 116 | 117 | @serdeOptional 118 | @serdeIgnoreDefault 119 | string detail; 120 | } 121 | 122 | /// 123 | @serdeIgnoreUnexpectedKeys 124 | struct ChatUserMessageImageContent 125 | { 126 | string type = "image_url"; 127 | 128 | @serdeKeys("image_url") 129 | ChatUserMessageImageUrl imageUrl; 130 | } 131 | 132 | /// 133 | alias ChatUserMessageContentItem = Algebraic!(ChatUserMessageTextContent, ChatUserMessageImageContent); 134 | /// 135 | alias ChatMessageContent = Algebraic!(typeof(null), string, ChatUserMessageContentItem[]); 136 | 137 | /// 138 | @serdeIgnoreUnexpectedKeys 139 | struct ChatMessage 140 | { 141 | /// 142 | @serdeOptional 143 | @serdeIgnoreDefault 144 | string role; 145 | 146 | /// **Required** 147 | ChatMessageContent content; 148 | 149 | /// Optional 150 | @serdeOptional 151 | @serdeIgnoreDefault 152 | string name = null; 153 | 154 | /// Optional 155 | @serdeKeys("function_call") 156 | @serdeOptional 157 | @serdeIgnoreDefault 158 | Nullable!ChatMessageFunctionCall functionCall = null; 159 | 160 | /// Optional 161 | @serdeKeys("tool_calls") 162 | @serdeOptional 163 | @serdeIgnoreDefault 164 | ChatMessageToolCall[] toolCalls; 165 | 166 | /// Optional 167 | @serdeKeys("tool_call_id") 168 | @serdeOptional 169 | @serdeIgnoreDefault 170 | string toolCallId; 171 | 172 | /// concat all text content in the message 173 | string getAllTextContent() const 174 | { 175 | import std.array : Appender; 176 | 177 | Appender!string appender; 178 | 179 | // dfmt off 180 | content.optionalMatch!( 181 | (string text) { 182 | appender.put(text); 183 | appender.put("\n"); 184 | }, 185 | (ChatUserMessageContentItem[] items) { 186 | foreach (item; items) 187 | { 188 | item.optionalMatch!( 189 | (ChatUserMessageTextContent textContent) { 190 | appender.put(textContent.text); 191 | appender.put("\n"); 192 | } 193 | ); 194 | } 195 | } 196 | ); 197 | // dfmt on 198 | 199 | return appender.data; 200 | } 201 | } 202 | 203 | /// 204 | ChatMessage systemChatMessage(string content, string name = null) 205 | { 206 | return ChatMessage("system", ChatMessageContent(content), name); 207 | } 208 | 209 | /// ditto 210 | ChatMessage developerChatMessage(string content, string name = null) 211 | { 212 | return ChatMessage("developer", ChatMessageContent(content), name); 213 | } 214 | 215 | /// ditto 216 | unittest 217 | { 218 | auto message = systemChatMessage("You are helpful AI assistant."); 219 | assert(message.role == "system"); 220 | assert(message.content.get!string() == "You are helpful AI assistant."); 221 | 222 | import mir.ser.json; 223 | 224 | assert(serializeJson(message) == `{"role":"system","content":"You are helpful AI assistant."}`); 225 | } 226 | 227 | /// ditto 228 | unittest 229 | { 230 | auto message = systemChatMessage("You are helpful AI assistant.", "ChatGPT"); 231 | assert(message.role == "system"); 232 | assert(message.content.get!string() == "You are helpful AI assistant."); 233 | assert(message.name == "ChatGPT"); 234 | 235 | import mir.ser.json; 236 | 237 | assert(serializeJson( 238 | message) == `{"role":"system","content":"You are helpful AI assistant.","name":"ChatGPT"}`); 239 | } 240 | 241 | /// ditto 242 | unittest 243 | { 244 | auto message = developerChatMessage("You are helpful AI assistant.", "ChatGPT"); 245 | assert(message.role == "developer"); 246 | assert(message.content.get!string() == "You are helpful AI assistant."); 247 | assert(message.name == "ChatGPT"); 248 | 249 | import mir.ser.json; 250 | 251 | assert(serializeJson( 252 | message) == `{"role":"developer","content":"You are helpful AI assistant.","name":"ChatGPT"}`); 253 | } 254 | 255 | /// 256 | ChatMessage userChatMessage(string content, string name = null) 257 | { 258 | return ChatMessage("user", ChatMessageContent(content), name); 259 | } 260 | 261 | /// ditto 262 | ChatMessage userChatMessage(string[] contents, string name = null) 263 | { 264 | auto items = new ChatUserMessageContentItem[](contents.length); 265 | foreach (i, message; contents) 266 | { 267 | items[i] = ChatUserMessageContentItem(ChatUserMessageTextContent("text", message)); 268 | } 269 | return ChatMessage("user", ChatMessageContent(items), name); 270 | } 271 | 272 | /// ditto 273 | unittest 274 | { 275 | auto message = userChatMessage("Hello, how can I help you?"); 276 | assert(message.role == "user"); 277 | assert(message.content.get!string() == "Hello, how can I help you?"); 278 | 279 | import mir.ser.json; 280 | 281 | assert(serializeJson(message) == `{"role":"user","content":"Hello, how can I help you?"}`); 282 | } 283 | 284 | /// ditto 285 | unittest 286 | { 287 | auto message = userChatMessage("How does this work?", "User123"); 288 | assert(message.role == "user"); 289 | assert(message.content.get!string() == "How does this work?"); 290 | assert(message.name == "User123"); 291 | 292 | import mir.ser.json; 293 | 294 | assert(serializeJson( 295 | message) == `{"role":"user","content":"How does this work?","name":"User123"}`); 296 | } 297 | 298 | /// 299 | ChatMessage userChatMessageWithImages(string text, string[] imageUrls, string name = null) 300 | { 301 | ChatUserMessageContentItem[] contentItems; 302 | 303 | ChatUserMessageTextContent textContent; 304 | textContent.text = text; 305 | 306 | contentItems ~= ChatUserMessageContentItem(textContent); 307 | 308 | foreach (imageUrl; imageUrls) 309 | { 310 | ChatUserMessageImageContent imageContent; 311 | imageContent.imageUrl = ChatUserMessageImageUrl(imageUrl); 312 | contentItems ~= ChatUserMessageContentItem(imageContent); 313 | } 314 | 315 | return ChatMessage("user", ChatMessageContent(contentItems), name); 316 | } 317 | 318 | /// ditto 319 | unittest 320 | { 321 | string text = "Check out these images:"; 322 | string[] imageUrls = [ 323 | "https://example.com/image1.jpg", "https://example.com/image2.jpg" 324 | ]; 325 | string name = "User123"; 326 | 327 | auto message = userChatMessageWithImages(text, imageUrls, name); 328 | 329 | assert(message.role == "user"); 330 | assert(message.name == name); 331 | 332 | auto content = message.content.get!(ChatUserMessageContentItem[]); 333 | 334 | assert(content.length == 3); // テキストメッセージと2つの画像URL 335 | assert(content[0].get!ChatUserMessageTextContent().text == text); 336 | assert(content[1].get!ChatUserMessageImageContent().imageUrl.url == imageUrls[0]); 337 | assert(content[2].get!ChatUserMessageImageContent().imageUrl.url == imageUrls[1]); 338 | } 339 | 340 | /// ditto 341 | unittest 342 | { 343 | string text = "Check out these images:"; 344 | string[] imageUrls = [ 345 | "https://example.com/image1.jpg", "https://example.com/image2.jpg" 346 | ]; 347 | string name = "User12345"; 348 | 349 | auto message = userChatMessageWithImages(text, imageUrls, name); 350 | 351 | import mir.ser.json; 352 | 353 | string jsonString = serializeJson(message); 354 | 355 | string expectedJson = `{"role":"user","content":[{"type":"text","text":"Check out these images:"},{"type":"image_url","image_url":{"url":"https://example.com/image1.jpg"}},{"type":"image_url","image_url":{"url":"https://example.com/image2.jpg"}}],"name":"User12345"}`; 356 | 357 | assert(jsonString == expectedJson); 358 | } 359 | 360 | /// 361 | ChatMessage assistantChatMessage(string content, string name = null) 362 | { 363 | return ChatMessage("assistant", ChatMessageContent(content), name); 364 | } 365 | 366 | /// 367 | ChatMessage toolChatMessage(string name, string content, string toolCallId) 368 | { 369 | ChatMessage message; 370 | message.role = "tool"; 371 | message.name = name; 372 | message.content = content; 373 | message.toolCallId = toolCallId; 374 | return message; 375 | } 376 | 377 | /// 378 | deprecated("Deprecated in favor of toolChatMessage.") 379 | ChatMessage functionChatMessage(string functionName, string functionResponseJson) 380 | { 381 | return ChatMessage("function", ChatMessageContent(functionResponseJson), functionName); 382 | } 383 | 384 | /// 385 | struct ResponseFormatJsonSchema 386 | { 387 | /// 388 | string name; 389 | 390 | /// 391 | bool strict = true; 392 | 393 | /// 394 | JsonValue schema; 395 | } 396 | 397 | /// 398 | struct ResponseFormat 399 | { 400 | /// 401 | string type; 402 | 403 | /// 404 | @serdeKeys("json_schema") 405 | @serdeOptional 406 | @serdeIgnoreDefault 407 | Nullable!ResponseFormatJsonSchema jsonSchema; 408 | } 409 | 410 | /// 411 | ResponseFormat jsonResponseFormat(string name, JsonValue jsonSchema) 412 | { 413 | auto format = ResponseFormat(); 414 | format.type = "json_schema"; 415 | format.jsonSchema = ResponseFormatJsonSchema(name, true, jsonSchema); 416 | return format; 417 | } 418 | 419 | /// 420 | struct ChatCompletionPredictionContentParam 421 | { 422 | string type = "content"; 423 | 424 | ChatMessageContent content; 425 | } 426 | 427 | /// 428 | struct ChatCompletionAudioParam 429 | { 430 | /// format: Required[Literal["wav", "mp3", "flac", "opus", "pcm16"]] 431 | string format; 432 | 433 | /// voice: Required[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] 434 | string voice; 435 | } 436 | 437 | /// Valid values for `ChatCompletionRequest.reasoningEffort` 438 | enum ReasoningEffortLow = "low"; 439 | /// ditto 440 | enum ReasoningEffortMedium = "medium"; 441 | /// ditto 442 | enum ReasoningEffortHigh = "high"; 443 | 444 | /// 445 | struct ChatCompletionRequest 446 | { 447 | /// 448 | @serdeIgnoreDefault 449 | string model; 450 | 451 | /// 452 | ChatMessage[] messages; 453 | 454 | /// 455 | @serdeIgnoreDefault 456 | string store; 457 | 458 | /// Use `ReasoningEffortLow`, `ReasoningEffortMedium` or `ReasoningEffortHigh`. 459 | @serdeIgnoreDefault 460 | @serdeKeys("reasoning_effort") 461 | string reasoningEffort; 462 | 463 | /// 464 | @serdeIgnoreDefault 465 | StringMap!string metadata; 466 | 467 | //deprecated("This value is now deprecated in favor of max_completion_tokens, and is not compatible with o1 series models.") 468 | /// 469 | @serdeIgnoreDefault 470 | @serdeKeys("max_tokens") 471 | uint maxTokens; 472 | 473 | /// 474 | @serdeIgnoreDefault 475 | @serdeKeys("max_completion_tokens") 476 | uint maxCompletionTokens; 477 | 478 | /// 479 | @serdeIgnoreDefault 480 | double temperature = 1; 481 | 482 | /// 483 | @serdeIgnoreDefault 484 | @serdeKeys("top_p") 485 | double topP = 1; 486 | 487 | /// 488 | @serdeIgnoreDefault 489 | uint n = 1; 490 | 491 | /// 492 | @serdeIgnoreDefault 493 | string[] modalities; 494 | 495 | /// 496 | @serdeIgnoreDefault 497 | Nullable!ChatCompletionPredictionContentParam prediction = null; 498 | 499 | /// 500 | @serdeIgnoreDefault 501 | Nullable!ChatCompletionAudioParam audio = null; 502 | 503 | /// 504 | @serdeIgnoreDefault 505 | bool stream = false; 506 | 507 | /// 508 | @serdeIgnoreDefault 509 | bool echo = false; 510 | 511 | /// 512 | @serdeIgnoreDefault 513 | @serdeKeys("service_tier") 514 | string serviceTier = "auto"; 515 | 516 | /// 517 | @serdeIgnoreDefault 518 | StopToken stop = null; 519 | 520 | /// 521 | @serdeIgnoreDefault 522 | @serdeIgnoreOutIf!isNaN @serdeKeys("presence_penalty") 523 | double presencePenalty = 0; 524 | 525 | /// 526 | @serdeIgnoreDefault 527 | @serdeIgnoreOutIf!isNaN @serdeKeys("frequency_penalty") 528 | double frequencyPenalty = 0; 529 | 530 | @serdeIgnoreDefault 531 | @serdeKeys("response_format") 532 | Nullable!ResponseFormat responseFormat = null; 533 | 534 | @serdeIgnoreDefault 535 | Nullable!int seed = null; 536 | 537 | version (none) 538 | { 539 | /// 540 | @serdeIgnoreDefault 541 | @serdeKeys("logit_bias") 542 | double[string] logitBias; // TODO test 543 | } 544 | 545 | /// 546 | @serdeIgnoreDefault 547 | ChatCompletionTool[] tools = null; 548 | 549 | /// 550 | @serdeIgnoreDefault 551 | @serdeKeys("tool_choice") 552 | ChatCompletionToolChoice toolChoice = null; 553 | 554 | /// 555 | @serdeIgnoreDefault 556 | @serdeKeys("parallel_tool_calls") 557 | bool parallelToolCalls = true; 558 | 559 | /// 560 | @serdeIgnoreDefault 561 | string user = null; 562 | 563 | /// 564 | @serdeIgnoreDefault 565 | Nullable!bool logprobs = null; 566 | 567 | @serdeIgnoreDefault 568 | Nullable!uint top_logprobs = null; 569 | } 570 | 571 | unittest 572 | { 573 | ChatCompletionTool tool1; 574 | tool1.type = "function"; 575 | tool1.function_.name = "tool1"; 576 | tool1.function_.description = "Description of tool1"; 577 | tool1.function_.parameters = JsonValue("{}"); 578 | 579 | ChatCompletionTool tool2; 580 | tool2.type = "function"; 581 | tool2.function_.name = "tool2"; 582 | tool2.function_.description = "Description of tool2"; 583 | tool2.function_.parameters = JsonValue("{}"); 584 | 585 | ChatCompletionRequest request; 586 | request.model = "gpt-4o-mini"; 587 | request.messages = [ 588 | userChatMessage("Hello, how can I help you?"), 589 | toolChatMessage("tool1", "This is a tool1 result", "tool1_call") 590 | ]; 591 | 592 | request.tools = [tool1, tool2]; 593 | request.toolChoice = "auto"; 594 | 595 | assert(request.tools.length == 2); 596 | assert(request.tools[0].type == "function"); 597 | assert(request.tools[0].function_.name == "tool1"); 598 | assert(request.tools[1].type == "function"); 599 | assert(request.tools[1].function_.name == "tool2"); 600 | } 601 | 602 | unittest 603 | { 604 | ChatCompletionTool tool3; 605 | tool3.type = "function"; 606 | tool3.function_.name = "tool3"; 607 | tool3.function_.description = "Description of tool3"; 608 | tool3.function_.parameters = JsonValue("{}"); 609 | 610 | ChatCompletionToolChoiceInfo toolChoiceInfo; 611 | toolChoiceInfo.type = "function"; 612 | toolChoiceInfo.function_.name = "tool3"; 613 | 614 | ChatCompletionRequest request; 615 | request.model = "gpt-4o-mini"; 616 | request.messages = [ 617 | userChatMessage("Hello, how can I help you?"), 618 | toolChatMessage("tool1", "This is a tool1 result", "tool1_call") 619 | ]; 620 | request.tools = [tool3]; 621 | request.toolChoice = ChatCompletionToolChoice(toolChoiceInfo); 622 | 623 | assert(request.toolChoice.get!ChatCompletionToolChoiceInfo().type == "function"); 624 | assert(request.toolChoice.get!ChatCompletionToolChoiceInfo().function_.name == "tool3"); 625 | } 626 | 627 | unittest 628 | { 629 | ChatCompletionRequest request; 630 | request.model = "gpt-4o-mini"; 631 | request.maxCompletionTokens = 20; 632 | request.messages = [ 633 | systemChatMessage("Welcome!"), 634 | userChatMessage("How can I use the tools?", "User123"), 635 | ]; 636 | 637 | ChatCompletionTool tool; 638 | tool.function_.name = "sample_function"; 639 | tool.function_.description = "Sample tool function"; 640 | tool.function_.parameters = JsonSchema.string_("tool argument"); 641 | 642 | request.tools ~= tool; 643 | request.toolChoice = "auto"; 644 | 645 | import mir.ser.json; 646 | 647 | string jsonString = serializeJson(request); 648 | 649 | string expectedJson = `{"model":"gpt-4o-mini","messages":[{"role":"system","content":"Welcome!"},{"role":"user","content":"How can I use the tools?","name":"User123"}],"max_completion_tokens":20,"tools":[{"type":"function","function":{"name":"sample_function","description":"Sample tool function","parameters":{"type":"string","description":"tool argument"}}}],"tool_choice":"auto"}`; 650 | 651 | assert(jsonString == expectedJson, jsonString ~ "\n" ~ expectedJson); 652 | } 653 | 654 | unittest 655 | { 656 | const errorJson = `{ 657 | "index": 0, 658 | "message": { 659 | "role": "assistant", 660 | "content": null, 661 | "tool_calls": [ 662 | { 663 | "id": "call_yRyhjp0JMZCquJKRoPOSziS1", 664 | "type": "function", 665 | "function": { 666 | "name": "add", 667 | "arguments": "{\n \"a\": 3,\n \"b\": 5\n}" 668 | } 669 | } 670 | ] 671 | }, 672 | "logprobs": null, 673 | "finish_reason": "tool_calls" 674 | }`; 675 | 676 | import mir.deser.json; 677 | 678 | auto _ = deserializeJson!ChatChoice(errorJson); 679 | } 680 | 681 | version (none) unittest 682 | { 683 | const errorJson = `{ 684 | "id": "chatcmpl-8o4Ov7YkYueWBllPDLhCxcC68CojX", 685 | "object": "chat.completion", 686 | "created": 1706944009, 687 | "model": "gpt-4o-mini", 688 | "choices": [ 689 | { 690 | "index": 0, 691 | "message": { 692 | "role": "assistant", 693 | "content": null, 694 | "tool_calls": [ 695 | { 696 | "id": "call_yRyhjp0JMZCquJKRoPOSziS1", 697 | "type": "function", 698 | "function": { 699 | "name": "add", 700 | "arguments": "{\n \"a\": 3,\n \"b\": 5\n}" 701 | } 702 | } 703 | ] 704 | }, 705 | "logprobs": null, 706 | "finish_reason": "tool_calls" 707 | } 708 | ], 709 | "usage": { 710 | "prompt_tokens": 124, 711 | "completion_tokens": 21, 712 | "total_tokens": 145 713 | }, 714 | "system_fingerprint": null 715 | }`; 716 | 717 | import mir.deser.json; 718 | 719 | auto _ = deserializeJson!ChatCompletionResponse(errorJson); 720 | } 721 | /// 722 | ChatCompletionRequest chatCompletionRequest(return scope string model, return scope ChatMessage[] messages, uint maxTokens, double temperature) 723 | { 724 | auto request = ChatCompletionRequest(); 725 | request.model = model; 726 | request.messages = messages; 727 | request.maxCompletionTokens = maxTokens; 728 | request.temperature = temperature; 729 | return request; 730 | } 731 | 732 | /// 733 | @serdeIgnoreUnexpectedKeys 734 | struct ChatChoice 735 | { 736 | /// 737 | size_t index; 738 | 739 | /// 740 | ChatMessage message; 741 | 742 | /// 743 | @serdeKeys("finish_reason") 744 | string finishReason; 745 | 746 | /// 747 | @serdeOptional 748 | Nullable!float logprobs; 749 | } 750 | 751 | /// 752 | @serdeIgnoreUnexpectedKeys 753 | struct ChatCompletionResponse 754 | { 755 | /// 756 | string id; 757 | 758 | /// 759 | string object; 760 | 761 | /// 762 | ulong created; 763 | 764 | /// 765 | string model; 766 | 767 | /// 768 | ChatChoice[] choices; 769 | 770 | /// 771 | CompletionUsage usage; 772 | 773 | /// 774 | @serdeKeys("system_fingerprint") 775 | string systemFingerprint; 776 | } 777 | -------------------------------------------------------------------------------- /source/openai/clients/openai.d: -------------------------------------------------------------------------------- 1 | /** 2 | OpenAI API Client 3 | */ 4 | module openai.clients.openai; 5 | 6 | import mir.deser.json; 7 | import mir.ser.json; 8 | import std.net.curl; 9 | 10 | import openai.chat; 11 | import openai.completion; 12 | import openai.embedding; 13 | import openai.models; 14 | import openai.moderation; 15 | import openai.audio; 16 | 17 | @safe: 18 | 19 | /// 20 | enum ENV_OPENAI_API_KEY = "OPENAI_API_KEY"; 21 | 22 | /// 23 | enum ENV_OPENAI_ORGANIZATION = "OPENAI_ORGANIZATION"; 24 | 25 | /// 26 | enum ENV_OPENAI_API_BASE = "OPENAI_API_BASE"; 27 | 28 | /// 29 | enum ENV_OPENAI_DEPLOYMENT_ID = "OPENAI_DEPLOYMENT_ID"; 30 | 31 | /// 32 | enum ENV_OPENAI_API_VERSION = "OPENAI_API_VERSION"; 33 | 34 | /// Default Azure OpenAI API version (2025-04-01-preview is also available) 35 | enum DEFAULT_OPENAI_API_VERSION = "2024-10-21"; 36 | 37 | /// 38 | class OpenAIClientConfig 39 | { 40 | string apiKey; 41 | string organization; 42 | string apiBase = "https://api.openai.com/v1"; 43 | string deploymentId; 44 | string apiVersion = DEFAULT_OPENAI_API_VERSION; 45 | 46 | bool isAzure() const @safe 47 | { 48 | import std.algorithm.searching : canFind; 49 | 50 | return apiBase.canFind(".api.cognitive.microsoft.com"); 51 | } 52 | 53 | private this() 54 | { 55 | this.apiBase = "https://api.openai.com/v1"; 56 | this.apiVersion = DEFAULT_OPENAI_API_VERSION; 57 | } 58 | 59 | /// 60 | this(string apiKey) 61 | { 62 | this.apiKey = apiKey; 63 | } 64 | 65 | /// 66 | this(string apiKey, string organization) 67 | { 68 | this.apiKey = apiKey; 69 | this.organization = organization; 70 | } 71 | 72 | /// 73 | static OpenAIClientConfig fromEnvironment( 74 | string envApiKeyName = ENV_OPENAI_API_KEY, 75 | string envOrgName = ENV_OPENAI_ORGANIZATION, 76 | string envApiBaseName = ENV_OPENAI_API_BASE, 77 | string envDeploymentName = ENV_OPENAI_DEPLOYMENT_ID, 78 | string envApiVersionName = ENV_OPENAI_API_VERSION) 79 | { 80 | auto config = new OpenAIClientConfig; 81 | config.loadFromEnvironmentVariables(envApiKeyName, envOrgName, 82 | envApiBaseName, envDeploymentName, envApiVersionName); 83 | return config; 84 | } 85 | 86 | /// 87 | static OpenAIClientConfig fromFile(string filePath) 88 | { 89 | auto config = new OpenAIClientConfig; 90 | config.loadFromFile(filePath); 91 | return config; 92 | } 93 | 94 | /// 95 | void loadFromEnvironmentVariables( 96 | string envApiKeyName = ENV_OPENAI_API_KEY, 97 | string envOrgName = ENV_OPENAI_ORGANIZATION, 98 | string envApiBaseName = ENV_OPENAI_API_BASE, 99 | string envDeploymentName = ENV_OPENAI_DEPLOYMENT_ID, 100 | string envApiVersionName = ENV_OPENAI_API_VERSION) 101 | { 102 | import std.process : environment; 103 | 104 | auto envApiKey = environment.get(envApiKeyName, ""); 105 | auto envOrganization = environment.get(envOrgName, ""); 106 | auto envApiBase = environment.get(envApiBaseName, "https://api.openai.com/v1"); 107 | auto envDeploymentId = environment.get(envDeploymentName, ""); 108 | auto envApiVersion = environment.get(envApiVersionName, ""); 109 | 110 | this.apiKey = envApiKey; 111 | this.organization = envOrganization; 112 | this.apiBase = envApiBase.length ? envApiBase : "https://api.openai.com/v1"; 113 | this.deploymentId = envDeploymentId; 114 | if (envApiVersion.length) 115 | this.apiVersion = envApiVersion; 116 | } 117 | 118 | /// 119 | void loadFromFile(string filePath) 120 | { 121 | import std.file; 122 | 123 | auto configText = readText(filePath); 124 | 125 | @serdeIgnoreUnexpectedKeys 126 | static struct ConfigData 127 | { 128 | @serdeIgnoreDefault 129 | string apiKey; 130 | 131 | @serdeOptional 132 | @serdeIgnoreDefault 133 | string organization; 134 | 135 | @serdeOptional 136 | @serdeIgnoreDefault 137 | string apiBase; 138 | 139 | @serdeOptional 140 | @serdeIgnoreDefault 141 | string deploymentId; 142 | 143 | @serdeOptional 144 | @serdeIgnoreDefault 145 | string apiVersion; 146 | } 147 | 148 | auto config = deserializeJson!ConfigData(configText); 149 | this.apiKey = config.apiKey; 150 | this.organization = config.organization; 151 | if (config.apiBase.length) 152 | this.apiBase = config.apiBase; 153 | if (config.deploymentId.length) 154 | this.deploymentId = config.deploymentId; 155 | if (config.apiVersion.length) 156 | this.apiVersion = config.apiVersion; 157 | } 158 | 159 | /// 160 | void saveToFile(string filePath) 161 | { 162 | import std.file; 163 | 164 | write(filePath, serializeJson(this)); 165 | } 166 | } 167 | 168 | /// 169 | class OpenAIClient 170 | { 171 | /// 172 | OpenAIClientConfig config; 173 | 174 | /// 175 | this() 176 | { 177 | this.config = OpenAIClientConfig.fromEnvironment(); 178 | validateConfig(); 179 | } 180 | 181 | /// 182 | this(OpenAIClientConfig config) 183 | in (config !is null) 184 | do 185 | { 186 | this.config = config; 187 | validateConfig(); 188 | } 189 | 190 | private void validateConfig() 191 | { 192 | import std.exception : enforce; 193 | 194 | if (config.isAzure) 195 | { 196 | enforce(config.deploymentId.length > 0, 197 | "OPENAI_DEPLOYMENT_ID is required for Azure mode"); 198 | } 199 | } 200 | 201 | /// 202 | ModelsResponse listModels() @system 203 | in (config.apiKey != null && config.apiKey.length > 0) 204 | do 205 | { 206 | auto http = HTTP(); 207 | setupHttpByConfig(http); 208 | http.addRequestHeader("Accept", "application/json; charset=utf-8"); 209 | 210 | auto content = cast(char[]) get!(HTTP, ubyte)(buildUrl("/models"), http); 211 | auto result = content.deserializeJson!ModelsResponse(); 212 | return result; 213 | } 214 | 215 | /// 216 | CompletionResponse completion(in CompletionRequest request) @system 217 | in (config.apiKey != null && config.apiKey.length > 0) 218 | in (request.model.length > 0) 219 | do 220 | { 221 | auto http = HTTP(); 222 | setupHttpByConfig(http); 223 | http.addRequestHeader("Accept", "application/json; charset=utf-8"); 224 | http.addRequestHeader("Content-Type", "application/json"); 225 | 226 | auto requestJson = serializeJson(request); 227 | debug scope (failure) 228 | { 229 | import std.stdio; 230 | 231 | writeln("----------"); 232 | writeln("# completion requestJson"); 233 | writeln(requestJson); 234 | writeln("----------"); 235 | } 236 | auto content = cast(char[]) post!ubyte(buildUrl("/completions"), requestJson, http); 237 | 238 | debug scope (failure) 239 | { 240 | import std.stdio; 241 | 242 | writeln("-----------"); 243 | writeln("# completion responseContent"); 244 | writeln(content); 245 | writeln("-----------"); 246 | } 247 | 248 | auto result = content.deserializeJson!CompletionResponse(); 249 | return result; 250 | } 251 | 252 | /// 253 | ChatCompletionResponse chatCompletion(in ChatCompletionRequest request) @system 254 | in (config.apiKey != null && config.apiKey.length > 0) 255 | in (request.model.length > 0) 256 | { 257 | auto http = HTTP(); 258 | setupHttpByConfig(http); 259 | http.addRequestHeader("Accept", "application/json; charset=utf-8"); 260 | http.addRequestHeader("Content-Type", "application/json"); 261 | 262 | auto requestJson = serializeJson(request); 263 | debug scope (failure) 264 | { 265 | import std.stdio; 266 | 267 | writeln("----------"); 268 | writeln("# chatCompletion requestJson"); 269 | writeln(requestJson); 270 | writeln("----------"); 271 | } 272 | auto content = cast(char[]) post!ubyte(buildUrl("/chat/completions"), requestJson, http); 273 | 274 | debug scope (failure) 275 | { 276 | import std.stdio; 277 | 278 | writeln("-----------"); 279 | writeln("# chatCompletion responseContent"); 280 | writeln(content); 281 | writeln("-----------"); 282 | } 283 | auto result = content.deserializeJson!ChatCompletionResponse(); 284 | return result; 285 | } 286 | 287 | /// 288 | EmbeddingResponse embedding(in EmbeddingRequest request) @system 289 | in (config.apiKey != null && config.apiKey.length > 0) 290 | in (request.model.length > 0) 291 | { 292 | auto http = HTTP(); 293 | setupHttpByConfig(http); 294 | http.addRequestHeader("Accept", "application/json; charset=utf-8"); 295 | http.addRequestHeader("Content-Type", "application/json"); 296 | 297 | auto requestJson = serializeJson(request); 298 | auto content = cast(char[]) post!ubyte(buildUrl("/embeddings"), requestJson, http); 299 | 300 | auto result = content.deserializeJson!EmbeddingResponse(); 301 | return result; 302 | } 303 | 304 | /// 305 | ModerationResponse moderation(in ModerationRequest request) @system 306 | in (config.apiKey != null && config.apiKey.length > 0) 307 | in (request.input.length > 0) 308 | { 309 | auto http = HTTP(); 310 | setupHttpByConfig(http); 311 | http.addRequestHeader("Accept", "application/json; charset=utf-8"); 312 | http.addRequestHeader("Content-Type", "application/json"); 313 | 314 | auto requestJson = serializeJson(request); 315 | auto content = cast(char[]) post!ubyte(buildUrl("/moderations"), requestJson, http); 316 | 317 | // import std.stdio; 318 | // writeln(content); 319 | 320 | auto result = content.deserializeJson!ModerationResponse(); 321 | return result; 322 | } 323 | 324 | /// 325 | ubyte[] speech(in SpeechRequest request) @system 326 | in (config.apiKey != null && config.apiKey.length > 0) 327 | in (request.model.length > 0) 328 | in (request.input.length > 0) 329 | in (request.voice.length > 0) 330 | { 331 | auto http = HTTP(); 332 | setupHttpByConfig(http); 333 | http.addRequestHeader("Accept", "application/octet-stream"); 334 | http.addRequestHeader("Content-Type", "application/json"); 335 | 336 | auto requestJson = serializeJson(request); 337 | auto content = post!ubyte(buildUrl("/audio/speech"), requestJson, http); 338 | return cast(ubyte[]) content; 339 | } 340 | 341 | /// 342 | AudioTextResponse transcription(in TranscriptionRequest request) @system 343 | in (config.apiKey != null && config.apiKey.length > 0) 344 | in (request.file.length > 0) 345 | in (request.model.length > 0) 346 | { 347 | import std.array : appender; 348 | import std.conv : to; 349 | import std.file : read; 350 | import std.path : baseName; 351 | import std.random : uniform; 352 | 353 | auto http = HTTP(); 354 | setupHttpByConfig(http); 355 | http.addRequestHeader("Accept", "application/json; charset=utf-8"); 356 | 357 | // create multipart body 358 | auto boundary = "--------------------------" ~ to!string(uniform(0, int.max)); 359 | http.addRequestHeader("Content-Type", 360 | "multipart/form-data; boundary=" ~ boundary); 361 | 362 | auto body = appender!(ubyte[])(); 363 | 364 | void addText(string name, string value) 365 | { 366 | body.put(cast(ubyte[])("--" ~ boundary ~ "\r\n")); 367 | body.put(cast(ubyte[])("Content-Disposition: form-data; name=\"" ~ name ~ "\"\r\n\r\n")); 368 | body.put(cast(ubyte[]) value); 369 | body.put(cast(ubyte[]) "\r\n"); 370 | } 371 | 372 | void addFile(string name, string filename, const(ubyte)[] data) 373 | { 374 | body.put(cast(ubyte[])("--" ~ boundary ~ "\r\n")); 375 | body.put(cast(ubyte[])( 376 | "Content-Disposition: form-data; name=\"" ~ name ~ "\"; filename=\"" ~ filename ~ "\"\r\n")); 377 | body.put(cast(ubyte[])("Content-Type: application/octet-stream\r\n\r\n")); 378 | body.put(data); 379 | body.put(cast(ubyte[]) "\r\n"); 380 | } 381 | 382 | auto fileData = cast(ubyte[]) read(request.file); 383 | addFile("file", baseName(request.file), fileData); 384 | addText("model", request.model); 385 | if (request.language.length) 386 | addText("language", request.language); 387 | if (request.prompt.length) 388 | addText("prompt", request.prompt); 389 | if (request.responseFormat.length) 390 | addText("response_format", request.responseFormat); 391 | if (request.temperature != 0) 392 | addText("temperature", to!string(request.temperature)); 393 | foreach (inc; request.include) 394 | addText("include", inc); 395 | foreach (t; request.timestampGranularities) 396 | addText("timestamp_granularities", t); 397 | if (request.stream) 398 | addText("stream", "true"); 399 | 400 | body.put(cast(ubyte[])("--" ~ boundary ~ "--\r\n")); 401 | 402 | auto content = post!ubyte(buildUrl("/audio/transcriptions"), body.data, http); 403 | 404 | auto text = cast(char[]) content; 405 | if (request.responseFormat == AudioResponseFormatVerboseJson) 406 | { 407 | auto verbose = text.deserializeJson!TranscriptionVerboseResponse(); 408 | AudioTextResponse simple; 409 | simple.text = verbose.text; 410 | simple.logprobs = null; 411 | return simple; 412 | } 413 | return text.deserializeJson!AudioTextResponse(); 414 | } 415 | 416 | /// 417 | AudioTextResponse translation(in TranslationRequest request) @system 418 | in (config.apiKey != null && config.apiKey.length > 0) 419 | in (request.file.length > 0) 420 | in (request.model.length > 0) 421 | { 422 | import std.array : appender; 423 | import std.conv : to; 424 | import std.file : read; 425 | import std.path : baseName; 426 | import std.random : uniform; 427 | 428 | auto http = HTTP(); 429 | setupHttpByConfig(http); 430 | http.addRequestHeader("Accept", "application/json; charset=utf-8"); 431 | 432 | // create multipart body 433 | auto boundary = "--------------------------" ~ to!string(uniform(0, int.max)); 434 | http.addRequestHeader("Content-Type", 435 | "multipart/form-data; boundary=" ~ boundary); 436 | 437 | auto body = appender!(ubyte[])(); 438 | 439 | void addText(string name, string value) 440 | { 441 | body.put(cast(ubyte[])("--" ~ boundary ~ "\r\n")); 442 | body.put(cast(ubyte[])("Content-Disposition: form-data; name=\"" ~ name ~ "\"\r\n\r\n")); 443 | body.put(cast(ubyte[]) value); 444 | body.put(cast(ubyte[]) "\r\n"); 445 | } 446 | 447 | void addFile(string name, string filename, const(ubyte)[] data) 448 | { 449 | body.put(cast(ubyte[])("--" ~ boundary ~ "\r\n")); 450 | body.put(cast(ubyte[])( 451 | "Content-Disposition: form-data; name=\"" ~ name ~ "\"; filename=\"" ~ filename ~ "\"\r\n")); 452 | body.put(cast(ubyte[])("Content-Type: application/octet-stream\r\n\r\n")); 453 | body.put(data); 454 | body.put(cast(ubyte[]) "\r\n"); 455 | } 456 | 457 | auto fileData = cast(ubyte[]) read(request.file); 458 | addFile("file", baseName(request.file), fileData); 459 | addText("model", request.model); 460 | if (request.prompt.length) 461 | addText("prompt", request.prompt); 462 | if (request.responseFormat.length) 463 | addText("response_format", request.responseFormat); 464 | if (request.temperature != 0) 465 | addText("temperature", to!string(request.temperature)); 466 | 467 | body.put(cast(ubyte[])("--" ~ boundary ~ "--\r\n")); 468 | 469 | auto content = post!ubyte(buildUrl("/audio/translations"), body.data, http); 470 | 471 | auto text = cast(char[]) content; 472 | return text.deserializeJson!AudioTextResponse(); 473 | } 474 | 475 | private void setupHttpByConfig(scope ref HTTP http) @system 476 | { 477 | import std.algorithm.searching : canFind; 478 | 479 | if (config.isAzure) 480 | { 481 | http.addRequestHeader("api-key", config.apiKey); 482 | } 483 | else 484 | { 485 | http.addRequestHeader("Authorization", "Bearer " ~ config.apiKey); 486 | if (config.organization.length > 0) 487 | { 488 | http.addRequestHeader("OpenAI-Organization", config.organization); 489 | } 490 | } 491 | } 492 | 493 | private string buildUrl(string path) const @safe 494 | { 495 | import std.format : format; 496 | import std.string : endsWith; 497 | 498 | string base = config.apiBase; 499 | if (base.endsWith("/")) 500 | base = base[0 .. $ - 1]; 501 | if (config.isAzure) 502 | { 503 | return format("%s/openai/deployments/%s%s?api-version=%s", 504 | base, config.deploymentId, path, config.apiVersion); 505 | } 506 | else 507 | { 508 | return base ~ path; 509 | } 510 | } 511 | 512 | @("buildUrl - openai mode") 513 | unittest 514 | { 515 | auto cfg = new OpenAIClientConfig; 516 | cfg.apiKey = "k"; 517 | auto client = new OpenAIClient(cfg); 518 | assert(client.buildUrl("/models") == "https://api.openai.com/v1/models"); 519 | } 520 | 521 | @("buildUrl - openai mode with trailing slash") 522 | unittest 523 | { 524 | auto cfg = new OpenAIClientConfig; 525 | cfg.apiKey = "k"; 526 | cfg.apiBase = "https://api.openai.com/v1/"; 527 | auto client = new OpenAIClient(cfg); 528 | assert(client.buildUrl("/models") == "https://api.openai.com/v1/models"); 529 | } 530 | 531 | @("buildUrl - azure mode") 532 | unittest 533 | { 534 | auto cfg = new OpenAIClientConfig; 535 | cfg.apiKey = "k"; 536 | cfg.apiBase = "https://westus.api.cognitive.microsoft.com"; 537 | cfg.deploymentId = "dep"; 538 | cfg.apiVersion = "2024-05-01"; 539 | auto client = new OpenAIClient(cfg); 540 | assert(client.buildUrl("/chat/completions") == 541 | "https://westus.api.cognitive.microsoft.com/openai/deployments/dep/chat/completions?api-version=2024-05-01"); 542 | } 543 | 544 | @("buildUrl - azure mode with trailing slash") 545 | unittest 546 | { 547 | auto cfg = new OpenAIClientConfig; 548 | cfg.apiKey = "k"; 549 | cfg.apiBase = "https://westus.api.cognitive.microsoft.com/"; 550 | cfg.deploymentId = "dep"; 551 | cfg.apiVersion = "2024-05-01"; 552 | auto client = new OpenAIClient(cfg); 553 | assert(client.buildUrl("/chat/completions") == 554 | "https://westus.api.cognitive.microsoft.com/openai/deployments/dep/chat/completions?api-version=2024-05-01"); 555 | } 556 | 557 | @("buildUrl transcription - openai") 558 | unittest 559 | { 560 | auto cfg = new OpenAIClientConfig; 561 | cfg.apiKey = "k"; 562 | auto client = new OpenAIClient(cfg); 563 | assert(client.buildUrl("/audio/transcriptions") == "https://api.openai.com/v1/audio/transcriptions"); 564 | } 565 | 566 | @("buildUrl transcription - azure") 567 | unittest 568 | { 569 | auto cfg = new OpenAIClientConfig; 570 | cfg.apiKey = "k"; 571 | cfg.apiBase = "https://westus.api.cognitive.microsoft.com"; 572 | cfg.deploymentId = "dep"; 573 | cfg.apiVersion = "2024-05-01"; 574 | auto client = new OpenAIClient(cfg); 575 | assert(client.buildUrl("/audio/transcriptions") == 576 | "https://westus.api.cognitive.microsoft.com/openai/deployments/dep/audio/transcriptions?api-version=2024-05-01"); 577 | } 578 | 579 | @("buildUrl translation - openai") 580 | unittest 581 | { 582 | auto cfg = new OpenAIClientConfig; 583 | cfg.apiKey = "k"; 584 | auto client = new OpenAIClient(cfg); 585 | assert(client.buildUrl("/audio/translations") == 586 | "https://api.openai.com/v1/audio/translations"); 587 | } 588 | 589 | @("buildUrl translation - azure") 590 | unittest 591 | { 592 | auto cfg = new OpenAIClientConfig; 593 | cfg.apiKey = "k"; 594 | cfg.apiBase = "https://westus.api.cognitive.microsoft.com"; 595 | cfg.deploymentId = "dep"; 596 | cfg.apiVersion = "2024-05-01"; 597 | auto client = new OpenAIClient(cfg); 598 | assert(client.buildUrl("/audio/translations") == 599 | "https://westus.api.cognitive.microsoft.com/openai/deployments/dep/audio/translations?api-version=2024-05-01"); 600 | } 601 | 602 | @("buildUrl speech - openai") 603 | unittest 604 | { 605 | auto cfg = new OpenAIClientConfig; 606 | cfg.apiKey = "k"; 607 | auto client = new OpenAIClient(cfg); 608 | assert(client.buildUrl("/audio/speech") == 609 | "https://api.openai.com/v1/audio/speech"); 610 | } 611 | 612 | @("buildUrl speech - azure") 613 | unittest 614 | { 615 | auto cfg = new OpenAIClientConfig; 616 | cfg.apiKey = "k"; 617 | cfg.apiBase = "https://westus.api.cognitive.microsoft.com"; 618 | cfg.deploymentId = "dep"; 619 | cfg.apiVersion = "2024-05-01"; 620 | auto client = new OpenAIClient(cfg); 621 | assert(client.buildUrl("/audio/speech") == 622 | "https://westus.api.cognitive.microsoft.com/openai/deployments/dep/audio/speech?api-version=2024-05-01"); 623 | } 624 | } 625 | 626 | @("config from environment - openai mode") 627 | unittest 628 | { 629 | import std.process : environment; 630 | 631 | environment[ENV_OPENAI_API_KEY] = "k"; 632 | scope (exit) 633 | environment.remove(ENV_OPENAI_API_KEY); 634 | environment.remove(ENV_OPENAI_API_BASE); 635 | scope (exit) 636 | environment.remove(ENV_OPENAI_API_BASE); 637 | auto cfg = OpenAIClientConfig.fromEnvironment(); 638 | 639 | assert(!cfg.isAzure); 640 | assert(cfg.apiBase == "https://api.openai.com/v1"); 641 | } 642 | 643 | @("config from environment - azure mode") 644 | unittest 645 | { 646 | import std.process : environment; 647 | 648 | environment[ENV_OPENAI_API_KEY] = "k"; 649 | scope (exit) 650 | environment.remove(ENV_OPENAI_API_KEY); 651 | environment[ENV_OPENAI_API_BASE] = "https://example.api.cognitive.microsoft.com"; 652 | scope (exit) 653 | environment.remove(ENV_OPENAI_API_BASE); 654 | environment[ENV_OPENAI_DEPLOYMENT_ID] = "dep"; 655 | scope (exit) 656 | environment.remove(ENV_OPENAI_DEPLOYMENT_ID); 657 | environment[ENV_OPENAI_API_VERSION] = "2024-05-01"; 658 | scope (exit) 659 | environment.remove(ENV_OPENAI_API_VERSION); 660 | 661 | auto cfg = OpenAIClientConfig.fromEnvironment(); 662 | 663 | assert(cfg.isAzure); 664 | assert(cfg.deploymentId == "dep"); 665 | assert(cfg.apiVersion == "2024-05-01"); 666 | } 667 | 668 | @("azure mode requires deployment id") 669 | unittest 670 | { 671 | import std.process : environment; 672 | import std.exception : assertThrown; 673 | 674 | environment[ENV_OPENAI_API_KEY] = "k"; 675 | scope (exit) 676 | environment.remove(ENV_OPENAI_API_KEY); 677 | environment[ENV_OPENAI_API_BASE] = "https://example.api.cognitive.microsoft.com"; 678 | scope (exit) 679 | environment.remove(ENV_OPENAI_API_BASE); 680 | environment.remove(ENV_OPENAI_DEPLOYMENT_ID); 681 | scope (exit) 682 | environment.remove(ENV_OPENAI_DEPLOYMENT_ID); 683 | 684 | assertThrown!Exception(new OpenAIClient()); 685 | } 686 | 687 | @("save & load config file - openai mode") 688 | unittest 689 | { 690 | import std.file; 691 | 692 | auto cfg = new OpenAIClientConfig; 693 | cfg.apiKey = "k"; 694 | cfg.organization = "org"; 695 | 696 | auto tmp = "tmp_cfg.json"; 697 | scope (exit) 698 | if (exists(tmp)) 699 | remove(tmp); 700 | cfg.saveToFile(tmp); 701 | 702 | auto loaded = OpenAIClientConfig.fromFile(tmp); 703 | assert(!loaded.isAzure); 704 | assert(loaded.apiKey == "k"); 705 | assert(loaded.organization == "org"); 706 | } 707 | 708 | @("save & load config file - azure mode") 709 | unittest 710 | { 711 | import std.file; 712 | 713 | auto cfg = new OpenAIClientConfig; 714 | cfg.apiKey = "k"; 715 | cfg.apiBase = "https://example.api.cognitive.microsoft.com"; 716 | cfg.deploymentId = "dep"; 717 | cfg.apiVersion = "2024-05-01"; 718 | 719 | auto tmp = "tmp_cfg.json"; 720 | scope (exit) 721 | if (exists(tmp)) 722 | remove(tmp); 723 | cfg.saveToFile(tmp); 724 | 725 | auto loaded = OpenAIClientConfig.fromFile(tmp); 726 | assert(loaded.isAzure); 727 | assert(loaded.apiKey == "k"); 728 | assert(loaded.apiBase == "https://example.api.cognitive.microsoft.com"); 729 | assert(loaded.deploymentId == "dep"); 730 | assert(loaded.apiVersion == "2024-05-01"); 731 | } 732 | -------------------------------------------------------------------------------- /source/openai/common.d: -------------------------------------------------------------------------------- 1 | /** 2 | OpenAI API Client 3 | */ 4 | module openai.common; 5 | 6 | import mir.algebraic; 7 | import mir.algebraic_alias.json : JsonAlgebraic; 8 | import std.traits; 9 | 10 | @safe: 11 | 12 | /// 13 | alias JsonValue = JsonAlgebraic; 14 | 15 | /// 16 | alias StopToken = Algebraic!(typeof(null), string, string[]); 17 | 18 | /// Static utilities for function_call 19 | struct JsonSchema 20 | { 21 | @disable this(); 22 | @disable this(this); 23 | 24 | /// 25 | static JsonValue object_(string description, JsonValue[string] properties, string[] required, bool additionalProperties) 26 | { 27 | import std.algorithm : map; 28 | import std.array : array; 29 | 30 | return JsonValue([ 31 | "type": JsonValue("object"), 32 | "description": JsonValue(description), 33 | "properties": JsonValue(properties), 34 | "required": JsonValue(required.map!(x => JsonValue(x)).array), 35 | "additionalProperties": JsonValue(additionalProperties), 36 | ]); 37 | } 38 | 39 | /// 40 | static JsonValue object_(string description, JsonValue[string] properties, string[] required) 41 | { 42 | import std.algorithm : map; 43 | import std.array : array; 44 | 45 | return JsonValue([ 46 | "type": JsonValue("object"), 47 | "description": JsonValue(description), 48 | "properties": JsonValue(properties), 49 | "required": JsonValue(required.map!(x => JsonValue(x)).array), 50 | ]); 51 | } 52 | 53 | /// 54 | static JsonValue object_(JsonValue[string] properties, string[] required, bool additionalProperties) 55 | { 56 | import std.algorithm : map; 57 | import std.array : array; 58 | 59 | return JsonValue([ 60 | "type": JsonValue("object"), 61 | "properties": JsonValue(properties), 62 | "required": JsonValue(required.map!(x => JsonValue(x)).array), 63 | "additionalProperties": JsonValue(additionalProperties), 64 | ]); 65 | } 66 | 67 | /// 68 | static JsonValue object_(JsonValue[string] properties, string[] required) 69 | { 70 | import std.algorithm : map; 71 | import std.array : array; 72 | 73 | return JsonValue([ 74 | "type": JsonValue("object"), 75 | "properties": JsonValue(properties), 76 | "required": JsonValue(required.map!(x => JsonValue(x)).array), 77 | ]); 78 | } 79 | 80 | /// 81 | static JsonValue object_(string description, JsonValue[string] properties, bool additionalProperties) 82 | { 83 | return JsonValue([ 84 | "type": JsonValue("object"), 85 | "description": JsonValue(description), 86 | "properties": JsonValue(properties), 87 | "additionalProperties": JsonValue(additionalProperties), 88 | ]); 89 | } 90 | 91 | /// 92 | static JsonValue object_(string description, JsonValue[string] properties) 93 | { 94 | return JsonValue([ 95 | "type": JsonValue("object"), 96 | "description": JsonValue(description), 97 | "properties": JsonValue(properties), 98 | ]); 99 | } 100 | 101 | /// 102 | static JsonValue object_(JsonValue[string] properties, bool additionalProperties) 103 | { 104 | return JsonValue([ 105 | "type": JsonValue("object"), 106 | "properties": JsonValue(properties), 107 | "additionalProperties": JsonValue(additionalProperties), 108 | ]); 109 | } 110 | 111 | /// 112 | static JsonValue object_(JsonValue[string] properties) 113 | { 114 | return JsonValue([ 115 | "type": JsonValue("object"), 116 | "properties": JsonValue(properties), 117 | ]); 118 | } 119 | 120 | /// 121 | static JsonValue boolean_(string description) 122 | { 123 | return JsonValue([ 124 | "type": JsonValue("boolean"), 125 | "description": JsonValue(description), 126 | ]); 127 | } 128 | 129 | /// 130 | static JsonValue boolean_() 131 | { 132 | return JsonValue([ 133 | "type": JsonValue("boolean"), 134 | ]); 135 | } 136 | 137 | /// 138 | static JsonValue string_(string description, string pattern, ulong minLength, ulong maxLength) 139 | { 140 | return JsonValue([ 141 | "type": JsonValue("string"), 142 | "description": JsonValue(description), 143 | "pattern": JsonValue(pattern), 144 | "minLength": JsonValue(minLength), 145 | "maxLength": JsonValue(maxLength), 146 | ]); 147 | } 148 | 149 | /// 150 | static JsonValue string_(string description, ulong minLength, ulong maxLength) 151 | { 152 | return JsonValue([ 153 | "type": JsonValue("string"), 154 | "description": JsonValue(description), 155 | "minLength": JsonValue(minLength), 156 | "maxLength": JsonValue(maxLength), 157 | ]); 158 | } 159 | 160 | /// 161 | static JsonValue string_(string description, string pattern) 162 | { 163 | return JsonValue([ 164 | "type": JsonValue("string"), 165 | "description": JsonValue(description), 166 | "pattern": JsonValue(pattern), 167 | ]); 168 | } 169 | 170 | /// 171 | static JsonValue string_(string description, string[] enum_) 172 | { 173 | import std.algorithm : map; 174 | import std.array : array; 175 | 176 | return JsonValue([ 177 | "type": JsonValue("string"), 178 | "description": JsonValue(description), 179 | "enum": JsonValue(enum_.map!(x => JsonValue(x)).array), 180 | ]); 181 | } 182 | 183 | /// 184 | static JsonValue string_(string[] enum_) 185 | { 186 | import std.algorithm : map; 187 | import std.array : array; 188 | 189 | return JsonValue([ 190 | "type": JsonValue("string"), 191 | "enum": JsonValue(enum_.map!(x => JsonValue(x)).array), 192 | ]); 193 | } 194 | 195 | /// 196 | static JsonValue string_(string description) 197 | { 198 | return JsonValue([ 199 | "type": JsonValue("string"), 200 | "description": JsonValue(description), 201 | ]); 202 | } 203 | 204 | /// 205 | static JsonValue string_() 206 | { 207 | return JsonValue([ 208 | "type": JsonValue("string"), 209 | ]); 210 | } 211 | 212 | /// 213 | static JsonValue integer_(string description, long minimum, long maximum) 214 | { 215 | return JsonValue([ 216 | "type": JsonValue("integer"), 217 | "description": JsonValue(description), 218 | "minimum": JsonValue(minimum), 219 | "maximum": JsonValue(maximum), 220 | ]); 221 | } 222 | 223 | /// 224 | static JsonValue integer_(long minimum, long maximum) 225 | { 226 | return JsonValue([ 227 | "type": JsonValue("integer"), 228 | "minimum": JsonValue(minimum), 229 | "maximum": JsonValue(maximum), 230 | ]); 231 | } 232 | 233 | /// 234 | static JsonValue integer_(string description) 235 | { 236 | return JsonValue([ 237 | "type": JsonValue("integer"), 238 | "description": JsonValue(description), 239 | ]); 240 | } 241 | 242 | /// 243 | static JsonValue integer_() 244 | { 245 | return JsonValue([ 246 | "type": JsonValue("integer"), 247 | ]); 248 | } 249 | 250 | /// 251 | static JsonValue number_(string description, double minimum, double maximum) 252 | { 253 | return JsonValue([ 254 | "type": JsonValue("number"), 255 | "description": JsonValue(description), 256 | "minimum": JsonValue(minimum), 257 | "maximum": JsonValue(maximum), 258 | ]); 259 | } 260 | 261 | /// 262 | static JsonValue number_(double minimum, double maximum) 263 | { 264 | return JsonValue([ 265 | "type": JsonValue("number"), 266 | "minimum": JsonValue(minimum), 267 | "maximum": JsonValue(maximum), 268 | ]); 269 | } 270 | 271 | /// 272 | static JsonValue number_(string description) 273 | { 274 | return JsonValue([ 275 | "type": JsonValue("number"), 276 | "description": JsonValue(description), 277 | ]); 278 | } 279 | 280 | /// 281 | static JsonValue number_() 282 | { 283 | return JsonValue([ 284 | "type": JsonValue("number"), 285 | ]); 286 | } 287 | 288 | /// 289 | static JsonValue array_(string description, JsonValue items, ulong minItems, ulong maxItems) 290 | { 291 | return JsonValue([ 292 | "type": JsonValue("array"), 293 | "description": JsonValue(description), 294 | "items": JsonValue(items), 295 | "minItems": JsonValue(minItems), 296 | "maxItems": JsonValue(maxItems), 297 | ]); 298 | } 299 | 300 | /// 301 | static JsonValue array_(JsonValue items, ulong minItems, ulong maxItems) 302 | { 303 | return JsonValue([ 304 | "type": JsonValue("array"), 305 | "items": JsonValue(items), 306 | "minItems": JsonValue(minItems), 307 | "maxItems": JsonValue(maxItems), 308 | ]); 309 | } 310 | 311 | /// 312 | static JsonValue array_(string description, JsonValue items) 313 | { 314 | return JsonValue([ 315 | "type": JsonValue("array"), 316 | "description": JsonValue(description), 317 | "items": JsonValue(items), 318 | ]); 319 | } 320 | 321 | /// 322 | static JsonValue array_(JsonValue items) 323 | { 324 | return JsonValue([ 325 | "type": JsonValue("array"), 326 | "items": JsonValue(items), 327 | ]); 328 | } 329 | 330 | /// 331 | static JsonValue array_(string description) 332 | { 333 | return JsonValue([ 334 | "type": JsonValue("array"), 335 | "description": JsonValue(description), 336 | ]); 337 | } 338 | 339 | /// 340 | static JsonValue array_() 341 | { 342 | return JsonValue([ 343 | "type": JsonValue("array"), 344 | ]); 345 | } 346 | 347 | static JsonValue oneOf(JsonValue[] schemas...) 348 | in (schemas.length > 0) 349 | { 350 | return JsonValue([ 351 | "oneOf": JsonValue(schemas) 352 | ]); 353 | } 354 | 355 | static JsonValue anyOf(JsonValue[] schemas...) 356 | in (schemas.length > 0) 357 | { 358 | return JsonValue([ 359 | "anyOf": JsonValue(schemas) 360 | ]); 361 | } 362 | 363 | static JsonValue allOf(JsonValue[] schemas...) 364 | in (schemas.length > 0) 365 | { 366 | return JsonValue([ 367 | "allOf": JsonValue(schemas) 368 | ]); 369 | } 370 | 371 | static JsonValue not(JsonValue[] schemas...) 372 | in (schemas.length > 0) 373 | { 374 | return JsonValue([ 375 | "not": JsonValue(schemas) 376 | ]); 377 | } 378 | } 379 | 380 | @("simple weather schema") 381 | unittest 382 | { 383 | /* 384 | { 385 | "type": "object", 386 | "properties": { 387 | "location": { 388 | "type": "string", 389 | "description": "The city and state, e.g. San Francisco, CA", 390 | }, 391 | "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, 392 | }, 393 | "required": ["location"], 394 | }, 395 | */ 396 | auto _ = JsonSchema.object_( 397 | [ 398 | "location": JsonSchema.string_("The city and state, e.g. San Francisco, CA"), 399 | "unit": JsonSchema.string_("string", ["celsius", "fahrenheit"]), 400 | ], ["location"]); 401 | } 402 | 403 | @("simple weather schema with strict") 404 | unittest 405 | { 406 | /* 407 | { 408 | "type": "object", 409 | "properties": { 410 | "location": { 411 | "type": "string", 412 | "description": "The city and state, e.g. San Francisco, CA", 413 | }, 414 | "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, 415 | }, 416 | "required": ["location"], 417 | "additionalProperties": false, 418 | }, 419 | */ 420 | auto _ = JsonSchema.object_( 421 | [ 422 | "location": JsonSchema.string_("The city and state, e.g. San Francisco, CA"), 423 | "unit": JsonSchema.string_("string", ["celsius", "fahrenheit"]), 424 | ], ["location"], false); 425 | } 426 | 427 | @("nested weather schema") 428 | unittest 429 | { 430 | /* 431 | { 432 | "name": "get_current_weather", 433 | "description": "Get the current weather", 434 | "parameters": { 435 | "type": "object", 436 | "properties": { 437 | "location": { 438 | "type": "string", 439 | "description": "The city and state, e.g. San Francisco, CA", 440 | }, 441 | "format": { 442 | "type": "string", 443 | "enum": ["celsius", "fahrenheit"], 444 | "description": "The temperature unit to use. Infer this from the users location.", 445 | }, 446 | }, 447 | "required": ["location", "format"], 448 | }, 449 | }, 450 | */ 451 | auto _ = JsonSchema.object_( 452 | [ 453 | "location": JsonSchema.string_("The city and state, e.g. San Francisco, CA"), 454 | "format": JsonSchema.string_("The temperature unit to use. Infer this from the users location.", [ 455 | "celcius", "farenheit" 456 | ]) 457 | ], ["location", "format"]); 458 | } 459 | 460 | @("nested weather schema") 461 | unittest 462 | { 463 | /* 464 | { 465 | "name": "get_n_day_weather_forecast", 466 | "description": "Get an N-day weather forecast", 467 | "parameters": { 468 | "type": "object", 469 | "properties": { 470 | "location": { 471 | "type": "string", 472 | "description": "The city and state, e.g. San Francisco, CA", 473 | }, 474 | "format": { 475 | "type": "string", 476 | "enum": ["celsius", "fahrenheit"], 477 | "description": "The temperature unit to use. Infer this from the users location.", 478 | }, 479 | "num_days": { 480 | "type": "integer", 481 | "description": "The number of days to forecast", 482 | } 483 | }, 484 | "required": ["location", "format", "num_days"] 485 | }, 486 | }, 487 | */ 488 | auto _ = JsonSchema.object_( 489 | [ 490 | "location": JsonSchema.string_("The city and state, e.g. San Francisco, CA"), 491 | "format": JsonSchema.string_("The temperature unit to use. Infer this from the users location.", [ 492 | "celcius", "farenheit" 493 | ]), 494 | "num_days": JsonSchema.integer_("The number of days to forecast"), 495 | ], ["location", "format", "num_days"]); 496 | } 497 | 498 | unittest 499 | { 500 | import mir.ser.json : serializeJson; 501 | 502 | const schema = JsonSchema.oneOf( 503 | JsonSchema.string_(), 504 | JsonSchema.integer_(), 505 | ); 506 | 507 | const json = serializeJson(schema); 508 | 509 | assert(json == `{"oneOf":[{"type":"string"},{"type":"integer"}]}`); 510 | } 511 | 512 | unittest 513 | { 514 | import mir.ser.json : serializeJson; 515 | 516 | const schema = JsonSchema.anyOf( 517 | JsonSchema.string_(), 518 | JsonSchema.integer_(), 519 | ); 520 | 521 | const json = serializeJson(schema); 522 | 523 | assert(json == `{"anyOf":[{"type":"string"},{"type":"integer"}]}`); 524 | } 525 | 526 | unittest 527 | { 528 | import mir.ser.json : serializeJson; 529 | 530 | const schema = JsonSchema.allOf( 531 | JsonSchema.string_("starts with 'a'", "^a.*"), 532 | JsonSchema.string_("ends with 'z'", ".*z$"), 533 | ); 534 | 535 | const json = serializeJson(schema); 536 | 537 | assert(json == `{"allOf":[{"type":"string","description":"starts with 'a'","pattern":"^a.*"},{"type":"string","description":"ends with 'z'","pattern":".*z$"}]}`); 538 | } 539 | 540 | unittest 541 | { 542 | import mir.ser.json : serializeJson; 543 | 544 | const schema = JsonSchema.not( 545 | JsonSchema.string_(), 546 | JsonSchema.integer_(), 547 | ); 548 | 549 | const json = serializeJson(schema); 550 | 551 | assert(json == `{"not":[{"type":"string"},{"type":"integer"}]}`); 552 | } 553 | 554 | private string getFieldDescription(T, string field)() 555 | { 556 | alias attrs = __traits(getAttributes, __traits(getMember, T, field)); 557 | foreach (uda; attrs) 558 | { 559 | static if (is(typeof(uda) == string)) 560 | { 561 | return uda; 562 | } 563 | } 564 | return null; 565 | } 566 | 567 | private enum isStaticString(alias x) = is(typeof(x) == string); 568 | 569 | /// 570 | JsonValue parseJsonSchema(T)(string description = null) 571 | { 572 | import std.meta : Filter; 573 | 574 | alias TAttributes = __traits(getAttributes, T); 575 | alias TAttributesStr = Filter!(isStaticString, TAttributes); 576 | enum hasStrAttributes = TAttributesStr.length > 0; 577 | 578 | JsonValue schema; 579 | 580 | static if (is(T == string)) 581 | { 582 | schema = description ? JsonSchema.string_(description) : JsonSchema.string_(); 583 | } 584 | else static if (is(T == enum)) 585 | { 586 | string[] members; 587 | alias enumMembers = EnumMembers!T; 588 | static if (is(OriginalType!T == string)) 589 | { 590 | static foreach (i, _; enumMembers) 591 | { 592 | members ~= enumMembers[i]; 593 | } 594 | } 595 | else 596 | { 597 | static foreach (i, _; enumMembers) 598 | { 599 | members ~= __traits(identifier, enumMembers[i]); 600 | } 601 | } 602 | schema = description ? JsonSchema.string_(description, members) : JsonSchema.string_(members); 603 | } 604 | else static if (isIntegral!T) 605 | { 606 | schema = description ? JsonSchema.integer_(description) : JsonSchema.integer_(); 607 | } 608 | else static if (isFloatingPoint!T) 609 | { 610 | schema = description ? JsonSchema.number_(description) : JsonSchema.number_(); 611 | } 612 | else static if (is(T == bool)) 613 | { 614 | schema = description ? JsonSchema.boolean_(description) : JsonSchema.boolean_(); 615 | } 616 | else static if (is(T == char) || is(T == wchar) || is(T == dchar)) 617 | { 618 | schema = description ? JsonSchema.string_(description) : JsonSchema.string_(); 619 | } 620 | else static if (isArray!T) 621 | { 622 | import std.range : ElementType; 623 | 624 | // dfmt off 625 | schema = description 626 | ? JsonSchema.array_(description, parseJsonSchema!(ElementType!T)) 627 | : JsonSchema.array_(parseJsonSchema!(ElementType!T)); 628 | // dfmt on 629 | } 630 | else 631 | { 632 | import std.meta : AliasSeq; 633 | import std.traits : FieldNameTuple, hasUDA; 634 | import mir.serde : serdeRequired, serdeIgnoreUnexpectedKeys; 635 | 636 | JsonValue[string] properties; 637 | string[] required; 638 | 639 | static foreach (field; FieldNameTuple!T) 640 | { 641 | { 642 | enum fieldDescription = getFieldDescription!(T, field)(); 643 | properties[field] = parseJsonSchema!(typeof(__traits(getMember, T.init, field)))(fieldDescription); 644 | 645 | static if (hasUDA!(__traits(getMember, T, field), serdeRequired)) 646 | { 647 | required ~= field; 648 | } 649 | } 650 | } 651 | 652 | enum allowAdditionalProperties = hasUDA!(T, serdeIgnoreUnexpectedKeys); 653 | 654 | static if (allowAdditionalProperties) 655 | { 656 | if (description) 657 | { 658 | // dfmt off 659 | schema = required.length > 0 660 | ? JsonSchema.object_(description, properties, required) 661 | : JsonSchema.object_(description, properties); 662 | // dfmt on 663 | } 664 | else 665 | { 666 | static if (hasStrAttributes) 667 | { 668 | // dfmt off 669 | schema = required.length > 0 670 | ? JsonSchema.object_(TAttributesStr[0], properties, required) 671 | : JsonSchema.object_(TAttributesStr[0], properties); 672 | // dfmt on 673 | } 674 | else 675 | { 676 | // dfmt off 677 | schema = required.length > 0 678 | ? JsonSchema.object_(properties, required) 679 | : JsonSchema.object_(properties); 680 | // dfmt on 681 | } 682 | } 683 | } 684 | else 685 | { 686 | if (description) 687 | { 688 | // dfmt off 689 | schema = required.length > 0 690 | ? JsonSchema.object_(description, properties, required, false) 691 | : JsonSchema.object_(description, properties, false); 692 | // dfmt on 693 | } 694 | else 695 | { 696 | static if (hasStrAttributes) 697 | { 698 | // dfmt off 699 | schema = required.length > 0 700 | ? JsonSchema.object_(TAttributesStr[0], properties, required, false) 701 | : JsonSchema.object_(TAttributesStr[0], properties, false); 702 | // dfmt on 703 | } 704 | else 705 | { 706 | // dfmt off 707 | schema = required.length > 0 708 | ? JsonSchema.object_(properties, required, false) 709 | : JsonSchema.object_(properties, false); 710 | // dfmt on 711 | } 712 | } 713 | } 714 | } 715 | 716 | return schema; 717 | } 718 | 719 | @("parseJsonSchema builtin primitives") 720 | unittest 721 | { 722 | import mir.algebraic_alias.json : JsonAlgebraic; 723 | 724 | assert(parseJsonSchema!string() == JsonSchema.string_()); 725 | assert(parseJsonSchema!int() == JsonSchema.integer_()); 726 | assert(parseJsonSchema!long() == JsonSchema.integer_()); 727 | assert(parseJsonSchema!short() == JsonSchema.integer_()); 728 | assert(parseJsonSchema!float() == JsonSchema.number_()); 729 | assert(parseJsonSchema!double() == JsonSchema.number_()); 730 | assert(parseJsonSchema!bool() == JsonSchema.boolean_()); 731 | assert(parseJsonSchema!char() == JsonSchema.string_()); 732 | assert(parseJsonSchema!wchar() == JsonSchema.string_()); 733 | assert(parseJsonSchema!dchar() == JsonSchema.string_()); 734 | } 735 | 736 | @("parseJsonSchema arrays of builtin types") 737 | unittest 738 | { 739 | import mir.algebraic_alias.json : JsonAlgebraic; 740 | 741 | assert(parseJsonSchema!(int[])() == JsonSchema.array_(JsonSchema.integer_())); 742 | assert(parseJsonSchema!(string[])() == JsonSchema.array_(JsonSchema.string_())); 743 | assert(parseJsonSchema!(bool[])() == JsonSchema.array_(JsonSchema.boolean_())); 744 | assert(parseJsonSchema!(double[])() == JsonSchema.array_(JsonSchema.number_())); 745 | } 746 | 747 | @("parseJsonSchema arrays of structs") 748 | unittest 749 | { 750 | @("Custom description") 751 | struct TestStruct 752 | { 753 | @("Custom field description 1") 754 | string name; 755 | int age; 756 | @("Custom field description 2") 757 | bool isCool; 758 | } 759 | 760 | auto itemSchema = JsonSchema.object_("Custom description", [ 761 | "name": JsonSchema.string_("Custom field description 1"), 762 | "age": JsonSchema.integer_(), 763 | "isCool": JsonSchema.boolean_("Custom field description 2"), 764 | ], false); 765 | auto schema = JsonSchema.array_(itemSchema); 766 | 767 | auto actual = parseJsonSchema!(TestStruct[])(); 768 | assert(actual == schema, actual.toString() ~ "\n---\n" ~ schema.toString()); 769 | } 770 | 771 | @("parseJsonSchema structs with builtin types") 772 | unittest 773 | { 774 | import mir.algebraic_alias.json : JsonAlgebraic; 775 | 776 | struct TestStruct 777 | { 778 | string name; 779 | int age; 780 | bool isCool; 781 | } 782 | 783 | auto schema = JsonSchema.object_([ 784 | "name": JsonSchema.string_(), 785 | "age": JsonSchema.integer_(), 786 | "isCool": JsonSchema.boolean_(), 787 | ], false); 788 | 789 | assert(parseJsonSchema!TestStruct() == schema); 790 | } 791 | 792 | @("parseJsonSchema structs with nested structs") 793 | unittest 794 | { 795 | import mir.algebraic_alias.json : JsonAlgebraic; 796 | 797 | @("Address schema") 798 | struct Address 799 | { 800 | string street; 801 | string city; 802 | string state; 803 | string zip; 804 | } 805 | 806 | struct Person 807 | { 808 | string name; 809 | int age; 810 | bool isCool; 811 | Address address; 812 | } 813 | 814 | auto addressSchema = JsonSchema.object_("Address schema", [ 815 | "street": JsonSchema.string_(), 816 | "city": JsonSchema.string_(), 817 | "state": JsonSchema.string_(), 818 | "zip": JsonSchema.string_(), 819 | ], false); 820 | 821 | assert(parseJsonSchema!Address() == addressSchema); 822 | 823 | auto addressSchema2 = JsonSchema.object_("Custom description", [ 824 | "street": JsonSchema.string_(), 825 | "city": JsonSchema.string_(), 826 | "state": JsonSchema.string_(), 827 | "zip": JsonSchema.string_(), 828 | ], false); 829 | 830 | assert(parseJsonSchema!Address("Custom description") == addressSchema2); 831 | 832 | auto personSchema = JsonSchema.object_("Person schema", [ 833 | "name": JsonSchema.string_(), 834 | "age": JsonSchema.integer_(), 835 | "isCool": JsonSchema.boolean_(), 836 | "address": addressSchema, 837 | ], false); 838 | 839 | assert(parseJsonSchema!Person("Person schema") == personSchema); 840 | } 841 | 842 | @("parseJsonSchema structs with description field") 843 | unittest 844 | { 845 | import mir.algebraic_alias.json : JsonAlgebraic; 846 | 847 | @("Custom description") 848 | struct TestStruct 849 | { 850 | @("Custom field description 1") 851 | string name; 852 | 853 | int age; 854 | @("Custom field description 2") 855 | bool isCool; 856 | } 857 | 858 | auto schema = JsonSchema.object_("Custom description", [ 859 | "name": JsonSchema.string_("Custom field description 1"), 860 | "age": JsonSchema.integer_(), 861 | "isCool": JsonSchema.boolean_("Custom field description 2"), 862 | ], false); 863 | 864 | auto actual = parseJsonSchema!TestStruct(); 865 | assert(actual == schema, actual.toString()); 866 | } 867 | 868 | @("parseJsonSchema structs with required fields") 869 | unittest 870 | { 871 | import mir.algebraic_alias.json : JsonAlgebraic; 872 | import mir.serde : serdeRequired; 873 | 874 | struct TestStruct 875 | { 876 | @serdeRequired 877 | string name; 878 | @serdeRequired 879 | int age; 880 | bool isCool; 881 | } 882 | 883 | string[] requiredFields = ["name", "age"]; 884 | 885 | auto schema = JsonSchema.object_([ 886 | "name": JsonSchema.string_(), 887 | "age": JsonSchema.integer_(), 888 | "isCool": JsonSchema.boolean_(), 889 | ], requiredFields, false); 890 | 891 | assert(parseJsonSchema!TestStruct() == schema); 892 | } 893 | 894 | @("parseJsonSchema structs with ignore unexpected keys") 895 | unittest 896 | { 897 | import mir.algebraic_alias.json : JsonAlgebraic; 898 | import mir.serde : serdeIgnoreUnexpectedKeys; 899 | 900 | @serdeIgnoreUnexpectedKeys 901 | struct TestStruct 902 | { 903 | string name; 904 | int age; 905 | bool isCool; 906 | } 907 | 908 | auto schema = JsonSchema.object_([ 909 | "name": JsonSchema.string_(), 910 | "age": JsonSchema.integer_(), 911 | "isCool": JsonSchema.boolean_(), 912 | ]); 913 | 914 | auto actual = parseJsonSchema!TestStruct(); 915 | assert(actual == schema, actual.toString() ~ "\n---\n" ~ schema.toString()); 916 | } 917 | 918 | @("parseJsonSchema structs with full options") 919 | unittest 920 | { 921 | import mir.algebraic_alias.json : JsonAlgebraic; 922 | import mir.serde : serdeRequired, serdeIgnoreUnexpectedKeys; 923 | 924 | @("Custom description") 925 | @serdeIgnoreUnexpectedKeys 926 | struct TestStruct 927 | { 928 | @("Custom field description 1") 929 | @serdeRequired 930 | string name; 931 | int age; 932 | @("Custom field description 2") 933 | bool isCool; 934 | } 935 | 936 | auto schema = JsonSchema.object_("Custom description", [ 937 | "name": JsonSchema.string_("Custom field description 1"), 938 | "age": JsonSchema.integer_(), 939 | "isCool": JsonSchema.boolean_("Custom field description 2"), 940 | ], ["name"]); 941 | 942 | auto actual = parseJsonSchema!TestStruct(); 943 | assert(actual == schema, actual.toString() ~ "\n---\n" ~ schema.toString()); 944 | } 945 | 946 | @("parseJsonSchema structs from OpenAI API example") 947 | unittest 948 | { 949 | import mir.serde : serdeRequired, serdeIgnoreUnexpectedKeys; 950 | 951 | struct Step 952 | { 953 | @serdeRequired 954 | string explanation; 955 | @serdeRequired 956 | string output; 957 | } 958 | 959 | struct MathResponse 960 | { 961 | @serdeRequired 962 | Step[] steps; 963 | @serdeRequired 964 | string final_answer; 965 | } 966 | 967 | auto stepSchema = JsonSchema.object_([ 968 | "explanation": JsonSchema.string_(), 969 | "output": JsonSchema.string_(), 970 | ], ["explanation", "output"], false); 971 | 972 | auto mathResponseSchema = JsonSchema.object_([ 973 | "steps": JsonSchema.array_(stepSchema), 974 | "final_answer": JsonSchema.string_(), 975 | ], ["steps", "final_answer"], false); 976 | 977 | auto actual = parseJsonSchema!MathResponse(); 978 | assert(actual == mathResponseSchema, actual.toString() ~ "\n---\n" ~ mathResponseSchema.toString()); 979 | } 980 | 981 | @("parseJsonSchema with enum") 982 | unittest 983 | { 984 | import mir.algebraic_alias.json : JsonAlgebraic; 985 | import mir.serde : serdeRequired; 986 | 987 | enum TaskState 988 | { 989 | ToDo, 990 | InProgress, 991 | Done, 992 | } 993 | 994 | struct Task 995 | { 996 | @serdeRequired 997 | @("task id") 998 | string id; 999 | 1000 | @serdeRequired 1001 | @("task state") 1002 | TaskState state; 1003 | } 1004 | 1005 | auto schema = JsonSchema.object_([ 1006 | "id": JsonSchema.string_("task id"), 1007 | "state": JsonSchema.string_("task state", ["ToDo", "InProgress", "Done"]), 1008 | ], ["id", "state"], false); 1009 | 1010 | auto actual = parseJsonSchema!Task(); 1011 | 1012 | assert(actual == schema, actual.toString() ~ "\n---\n" ~ schema.toString()); 1013 | } 1014 | 1015 | @("parseJsonSchema with string enum members") 1016 | unittest 1017 | { 1018 | import mir.algebraic_alias.json : JsonAlgebraic; 1019 | import mir.serde : serdeRequired; 1020 | 1021 | enum TaskState 1022 | { 1023 | ToDo = "todo", 1024 | InProgress = "in_progress", 1025 | Done = "done", 1026 | } 1027 | 1028 | struct Task 1029 | { 1030 | @serdeRequired 1031 | @("task id") 1032 | string id; 1033 | 1034 | @serdeRequired 1035 | @("task state") 1036 | TaskState state; 1037 | } 1038 | 1039 | auto schema = JsonSchema.object_([ 1040 | "id": JsonSchema.string_("task id"), 1041 | "state": JsonSchema.string_("task state", ["todo", "in_progress", "done"]), 1042 | ], ["id", "state"], false); 1043 | 1044 | auto actual = parseJsonSchema!Task(); 1045 | 1046 | assert(actual == schema, actual.toString() ~ "\n---\n" ~ schema.toString()); 1047 | } 1048 | -------------------------------------------------------------------------------- /source/openai/completion.d: -------------------------------------------------------------------------------- 1 | /** 2 | OpenAI API Completions 3 | 4 | Standards: https://platform.openai.com/docs/api-reference/completions 5 | */ 6 | module openai.completion; 7 | 8 | import mir.algebraic; 9 | import mir.serde; 10 | import std.math; 11 | 12 | import openai.common; 13 | 14 | @safe: 15 | 16 | /// 17 | struct CompletionRequest 18 | { 19 | /// 20 | @serdeIgnoreDefault 21 | string model; 22 | 23 | /// 24 | string prompt; 25 | 26 | /// 27 | @serdeIgnoreDefault 28 | string suffix = null; 29 | 30 | /// 31 | @serdeIgnoreDefault 32 | Nullable!int logProbs = null; 33 | 34 | /// 35 | @serdeIgnoreDefault 36 | @serdeKeys("max_tokens") 37 | uint maxTokens = 16; 38 | 39 | /// 40 | @serdeIgnoreDefault 41 | double temperature = 1; 42 | 43 | /// 44 | @serdeIgnoreDefault 45 | double topP = 1; 46 | 47 | /// 48 | @serdeIgnoreDefault 49 | uint n = 1; 50 | 51 | /// 52 | @serdeIgnoreDefault 53 | bool stream = false; 54 | 55 | /// 56 | @serdeIgnoreDefault 57 | bool echo = false; 58 | 59 | /// 60 | @serdeIgnoreDefault 61 | StopToken stop = null; 62 | 63 | /// 64 | @serdeIgnoreDefault 65 | @serdeIgnoreOutIf!isNaN double presencePenalty = 0; 66 | 67 | /// 68 | @serdeIgnoreDefault 69 | double frequencyPenalty = 0; 70 | 71 | /// 72 | @serdeIgnoreDefault 73 | uint bestOf = 1; 74 | version (none) 75 | { 76 | /// 77 | @serdeIgnoreDefault 78 | double[string] logitBias; // TODO test 79 | } 80 | 81 | /// 82 | @serdeIgnoreDefault 83 | string user = null; 84 | } 85 | 86 | /// 87 | CompletionRequest completionRequest(string model, string prompt, uint maxTokens, double temperature) 88 | { 89 | auto request = CompletionRequest(); 90 | request.model = model; 91 | request.prompt = prompt; 92 | request.maxTokens = maxTokens; 93 | request.temperature = temperature; 94 | return request; 95 | } 96 | 97 | /// 98 | @serdeIgnoreUnexpectedKeys 99 | struct PromptTokensDetails 100 | { 101 | /// 102 | @serdeKeys("cached_tokens") 103 | uint cachedTokens; 104 | 105 | /// 106 | @serdeKeys("audio_tokens") 107 | uint audioTokens; 108 | } 109 | 110 | /// 111 | @serdeIgnoreUnexpectedKeys 112 | struct CompletionTokensDetails 113 | { 114 | /// 115 | @serdeKeys("reasoning_tokens") 116 | uint reasoningTokens; 117 | 118 | /// 119 | @serdeKeys("audio_tokens") 120 | uint audioTokens; 121 | 122 | /// 123 | @serdeKeys("accepted_prediction_tokens") 124 | uint acceptedPredictionTokens; 125 | 126 | /// 127 | @serdeKeys("rejected_prediction_tokens") 128 | uint rejectedPredictionTokens; 129 | } 130 | 131 | /// 132 | @serdeIgnoreUnexpectedKeys 133 | struct CompletionUsage 134 | { 135 | /// 136 | @serdeKeys("prompt_tokens") 137 | uint promptTokens; 138 | 139 | /// 140 | @serdeKeys("completion_tokens") 141 | @serdeOptional 142 | uint completionTokens; 143 | 144 | /// 145 | @serdeKeys("total_tokens") 146 | uint totalTokens; 147 | 148 | /// 149 | @serdeKeys("prompt_tokens_details") 150 | @serdeOptional // for Legacy Completion API 151 | Nullable!PromptTokensDetails promptTokensDetails; 152 | 153 | /// 154 | @serdeKeys("completion_tokens_details") 155 | @serdeOptional // for Legacy Completion API 156 | Nullable!CompletionTokensDetails completionTokensDetails; 157 | } 158 | 159 | /// 160 | struct CompletionChoice 161 | { 162 | /// 163 | string text; 164 | /// 165 | size_t index; 166 | /// 167 | @serdeKeys("logprobs") 168 | Nullable!double logProbs; 169 | /// 170 | @serdeKeys("finish_reason") 171 | string finishReason; 172 | } 173 | 174 | /// 175 | @serdeIgnoreUnexpectedKeys 176 | struct CompletionResponse 177 | { 178 | /// 179 | string id; 180 | 181 | /// 182 | string object; 183 | 184 | /// 185 | ulong created; 186 | 187 | /// 188 | string model; 189 | 190 | /// 191 | CompletionChoice[] choices; 192 | 193 | /// 194 | CompletionUsage usage; 195 | 196 | /// 197 | @serdeKeys("service_tier") 198 | @serdeOptional // for Legacy Completion API 199 | string serviceTier; 200 | 201 | /// 202 | @serdeKeys("system_fingerprint") 203 | @serdeOptional // for Legacy Completion API 204 | string systemFingerprint; 205 | } 206 | -------------------------------------------------------------------------------- /source/openai/embedding.d: -------------------------------------------------------------------------------- 1 | /** 2 | OpenAI API Embeddings 3 | 4 | Standards: https://platform.openai.com/docs/api-reference/embeddings 5 | */ 6 | module openai.embedding; 7 | 8 | import mir.algebraic; 9 | import mir.serde; 10 | 11 | import openai.common; 12 | 13 | @safe: 14 | 15 | /// 16 | alias EmbeddingInput = Algebraic!(string, string[]); 17 | 18 | /// 19 | struct EmbeddingRequest 20 | { 21 | /// 22 | EmbeddingInput input; 23 | 24 | /// 25 | @serdeIgnoreDefault 26 | string model; 27 | 28 | /// 29 | @serdeIgnoreDefault 30 | @serdeKeys("encoding_format") 31 | string encodingFormat; 32 | 33 | @serdeIgnoreDefault 34 | uint dimensions; 35 | 36 | /// 37 | @serdeIgnoreDefault 38 | string user; 39 | } 40 | 41 | /// 42 | EmbeddingRequest embeddingRequest(string model, string input) 43 | { 44 | auto request = EmbeddingRequest(); 45 | request.model = model; 46 | request.input = input; 47 | return request; 48 | } 49 | 50 | /// 51 | unittest 52 | { 53 | auto request = embeddingRequest("text-embedding-ada-002", "Hello, D Programming Language!"); 54 | 55 | import mir.ser.json : serializeJson; 56 | 57 | string requestJson = request.serializeJson(); 58 | 59 | assert( 60 | requestJson == `{"input":"Hello, D Programming Language!","model":"text-embedding-ada-002"}`); 61 | } 62 | 63 | /// 64 | EmbeddingRequest embeddingRequest(string model, string input, uint dimensions) 65 | { 66 | auto request = EmbeddingRequest(); 67 | request.model = model; 68 | request.input = input; 69 | request.dimensions = dimensions; 70 | return request; 71 | } 72 | 73 | /// 74 | unittest 75 | { 76 | auto request = embeddingRequest("text-embedding-3-small", "Hello, D Programming Language!", 512); 77 | 78 | import mir.ser.json : serializeJson; 79 | 80 | string requestJson = request.serializeJson(); 81 | 82 | assert( 83 | requestJson == `{"input":"Hello, D Programming Language!","model":"text-embedding-3-small","dimensions":512}`); 84 | } 85 | 86 | /// 87 | EmbeddingRequest embeddingRequest(string model, string[] inputs) 88 | { 89 | auto request = EmbeddingRequest(); 90 | request.model = model; 91 | request.input = inputs; 92 | return request; 93 | } 94 | 95 | /// 96 | unittest 97 | { 98 | auto inputs = ["Hello,", "D", "Programming", "Language!"]; 99 | auto request3 = embeddingRequest("text-embedding-ada-003", inputs); 100 | 101 | import mir.ser.json : serializeJson; 102 | 103 | string requestJson3 = request3.serializeJson(); 104 | 105 | assert( 106 | requestJson3 == `{"input":["Hello,","D","Programming","Language!"],"model":"text-embedding-ada-003"}`); 107 | } 108 | 109 | /// 110 | EmbeddingRequest embeddingRequest(string model, string[] inputs, uint dimensions) 111 | { 112 | auto request = EmbeddingRequest(); 113 | request.model = model; 114 | request.input = inputs; 115 | request.dimensions = dimensions; 116 | return request; 117 | } 118 | 119 | /// 120 | unittest 121 | { 122 | auto inputs = ["Hello,", "D", "Programming", "Language!"]; 123 | auto request4 = embeddingRequest("text-embedding-3-small", inputs, 256); 124 | 125 | import mir.ser.json : serializeJson; 126 | 127 | string requestJson = request4.serializeJson(); 128 | 129 | assert( 130 | requestJson == `{"input":["Hello,","D","Programming","Language!"],"model":"text-embedding-3-small","dimensions":256}`); 131 | } 132 | 133 | /// 134 | struct EmbeddingUsage 135 | { 136 | /// 137 | @serdeKeys("prompt_tokens") 138 | uint promptTokens; 139 | 140 | /// 141 | @serdeKeys("total_tokens") 142 | uint totalTokens; 143 | } 144 | 145 | /// 146 | @serdeIgnoreUnexpectedKeys 147 | struct EmbeddingResponse 148 | { 149 | static struct Embedding 150 | { 151 | string object; 152 | float[] embedding; 153 | size_t index; 154 | } 155 | 156 | /// 157 | string object; 158 | 159 | /// 160 | Embedding[] data; 161 | 162 | /// 163 | string model; 164 | 165 | /// 166 | EmbeddingUsage usage; 167 | } 168 | -------------------------------------------------------------------------------- /source/openai/models.d: -------------------------------------------------------------------------------- 1 | /** 2 | OpenAI API Models 3 | 4 | Standards: https://platform.openai.com/docs/api-reference/models 5 | 6 | Remarks: 7 | - For deprecations, refer to: https://platform.openai.com/docs/deprecations 8 | - For model details, refer to: https://platform.openai.com/docs/models/ 9 | - If the model is labeled '(Legacy)' or if it's listed under "upgrade" 10 | - Remove the declaration 3 months after the shutdown date 11 | */ 12 | module openai.models; 13 | 14 | import mir.serde; 15 | 16 | @safe: 17 | 18 | // Chat & Completion 19 | // GPT-4o Series 20 | /// Identifier for the `gpt-4o` model. 21 | enum GPT4O = "gpt-4o"; 22 | /// Identifier for the dated `gpt-4o-2024-05-13` snapshot. 23 | enum GPT4O20240513 = "gpt-4o-2024-05-13"; 24 | /// Identifier for the dated `gpt-4o-2024-08-06` snapshot. 25 | enum GPT4O20240806 = "gpt-4o-2024-08-06"; 26 | /// Identifier for the dated `gpt-4o-2024-11-20` snapshot. 27 | enum GPT4O20241120 = "gpt-4o-2024-11-20"; 28 | /// Always points to the most recent GPT‑4o model. 29 | enum ChatGPT4OLatest = "chatgpt-4o-latest"; 30 | 31 | // GPT-4o mini Series 32 | /// Identifier for the `gpt-4o-mini` model. 33 | enum GPT4OMini = "gpt-4o-mini"; 34 | /// Identifier for the dated `gpt-4o-mini-2024-07-18` snapshot. 35 | enum GPT4OMini20240718 = "gpt-4o-mini-2024-07-18"; 36 | /// Use to request the TTS‑optimised variant. 37 | enum GPT4OMiniTTS = "gpt-4o-mini-tts"; 38 | /// Preview model for real‑time usage. 39 | enum GPT4ORealtimePreview = "gpt-4o-realtime-preview"; 40 | /// Preview model specialised for audio input. 41 | enum GPT4OAudioPreview = "gpt-4o-audio-preview"; 42 | /// Preview model specialised for search. 43 | enum GPT4OSearchPreview = "gpt-4o-search-preview"; 44 | 45 | // o1 and o1-mini Series 46 | /// Identifier for the `o1` model. 47 | enum O1 = "o1"; 48 | /// Dated snapshot `o1-2024-12-17`. 49 | enum O120241217 = "o1-2024-12-17"; 50 | /// Identifier for the `o1-mini` model. 51 | enum O1Mini = "o1-mini"; 52 | /// Dated snapshot `o1-mini-2024-09-12`. 53 | enum O1Mini20240912 = "o1-mini-2024-09-12"; 54 | /// Identifier for the `o1-preview` model. 55 | enum O1Preview = "o1-preview"; 56 | /// Dated snapshot `o1-preview-2024-09-12`. 57 | enum O1Preview20240912 = "o1-preview-2024-09-12"; 58 | 59 | // o4-mini Series 60 | /// Identifier for the `o4-mini` model. 61 | enum O4Mini = "o4-mini"; 62 | /// Dated snapshot `o4-mini-2025-04-16`. 63 | enum O4Mini20250416 = "o4-mini-2025-04-16"; 64 | 65 | // o3 and o3-mini Series 66 | /// Identifier for the `o3` model. 67 | enum O3 = "o3"; 68 | /// Dated snapshot `o3-2025-04-16`. 69 | enum O320250416 = "o3-2025-04-16"; 70 | /// Identifier for the `o3-mini` model. 71 | enum O3Mini = "o3-mini"; 72 | /// Dated snapshot `o3-mini-2025-01-31`. 73 | enum O3Mini20250131 = "o3-mini-2025-01-31"; 74 | 75 | // GPT-4 Turbo Series 76 | /// Identifier for the `gpt-4-turbo` model. 77 | enum GPT4Turbo = "gpt-4-turbo"; 78 | /// Dated snapshot `gpt-4-turbo-2024-04-09`. 79 | enum GPT4Turbo20240409 = "gpt-4-turbo-2024-04-09"; 80 | /// Preview build of GPT‑4 Turbo. 81 | enum GPT4TurboPreview = "gpt-4-turbo-preview"; 82 | 83 | // GPT-4 Vision Series 84 | /// Deprecated: will be removed by 2024‑12‑06. Use `GPT4O` instead. 85 | deprecated("'gpt-4-vision-preview' is to be removed by 2024-12-06. Please use 'gpt-4o' instead") 86 | enum GPT4VisionPreview = "gpt-4-vision-preview"; 87 | 88 | // GPT-4-32k Series 89 | /// Deprecated: will be removed by 2025‑06‑06. Use `GPT4O` instead. 90 | deprecated("'gpt-4-32k' is to be removed by 2025-06-06. Please use 'gpt-4o' instead") 91 | enum GPT432K = "gpt-4-32k"; 92 | /// Deprecated: will be removed by 2025‑06‑06. Use `GPT4O` instead. 93 | deprecated("'gpt-4-32k-0314' is to be removed by 2025-06-06. Please use 'gpt-4o' instead") 94 | enum GPT432K0314 = "gpt-4-32k-0314"; 95 | /// Deprecated: will be removed by 2025‑06‑06. Use `GPT4O` instead. 96 | deprecated("'gpt-4-32k-0613' is to be removed by 2025-06-06. Please use 'gpt-4o' instead") 97 | enum GPT432K0613 = "gpt-4-32k-0613"; 98 | 99 | // GPT-4 Series 100 | /// Identifier for the `gpt-4` model. 101 | enum GPT4 = "gpt-4"; 102 | /// Deprecated: removed by 2024‑06‑13. Use `GPT4O` instead. 103 | deprecated("'gpt-4-0314' is to be removed by 2024-06-13. Please use 'gpt-4o' instead") 104 | enum GPT40314 = "gpt-4-0314"; 105 | /// Identifier for the dated `gpt-4-0613` snapshot. 106 | enum GPT40613 = "gpt-4-0613"; 107 | /// Identifier for the preview build `gpt-4-0125-preview`. 108 | enum GPT40125Preview = "gpt-4-0125-preview"; 109 | /// Identifier for the preview build `gpt-4-1106-preview`. 110 | enum GPT41106Preview = "gpt-4-1106-preview"; 111 | 112 | // GPT-4.1 Series 113 | /// Identifier for the experimental `gpt-4.1` model. 114 | enum GPT4Dot1 = "gpt-4.1"; 115 | /// Identifier for `gpt-4.1-mini`. 116 | enum GPT4Dot1Mini = "gpt-4.1-mini"; 117 | /// Identifier for `gpt-4.1-nano`. 118 | enum GPT4Dot1Nano = "gpt-4.1-nano"; 119 | 120 | // GPT-3.5 Turbo Series 121 | /// Identifier for the `gpt-3.5-turbo` model. 122 | enum GPT3Dot5Turbo = "gpt-3.5-turbo"; 123 | /// Instruction‑tuned variant of `gpt-3.5-turbo`. 124 | enum GPT3Dot5TurboInstruct = "gpt-3.5-turbo-instruct"; 125 | /// Dated snapshot `gpt-3.5-turbo-0125`. 126 | enum GPT350Turbo0125 = "gpt-3.5-turbo-0125"; 127 | /// Dated snapshot `gpt-3.5-turbo-1106`. 128 | enum GPT350Turbo1106 = "gpt-3.5-turbo-1106"; 129 | /// Deprecated: removed by 2024‑06‑13. Use `GPT3Dot5Turbo` instead. 130 | deprecated("'gpt-3-turbo-0301' is to be removed by 2024-06-13. Please use 'gpt-3-turbo' instead") 131 | enum GPT3Dot5Turbo0301 = "gpt-3.5-turbo-0301"; 132 | /// Deprecated: removed by 2024‑09‑13. Use `GPT3Dot5Turbo` instead. 133 | deprecated("'gpt-3.5-turbo-0613' is to be removed by 2024-09-13. Please use 'gpt-3.5-turbo' instead") 134 | enum GPT3Dot5Turbo0613 = "gpt-3.5-turbo-0613"; 135 | 136 | // GPT-3.5 Turbo 16k Series 137 | /// Identifier for the `gpt-3.5-turbo-16k` model. 138 | enum GPT3Dot5Turbo16K = "gpt-3.5-turbo-16k"; 139 | /// Deprecated: removed by 2024‑09‑13. Use `GPT3Dot5Turbo` instead. 140 | deprecated("'gpt-3.5-turbo-16k-0613' is to be removed by 2024-09-13. Please use 'gpt-3.5-turbo' instead") 141 | enum GPT3Dot5Turbo16K0613 = "gpt-3.5-turbo-16k-0613"; 142 | 143 | /// davinci-instruct-beta 144 | enum DavinciInstructBeta = "davinci-instruct-beta"; 145 | /// curie-instruct-beta 146 | enum CurieInstructBeta = "curie-instruct-beta"; 147 | 148 | // Embedding 149 | /// Identifier for the `text-embedding-3-large` model. 150 | enum TextEmbedding3Large = "text-embedding-3-large"; 151 | /// Identifier for the `text-embedding-3-small` model. 152 | enum TextEmbedding3Small = "text-embedding-3-small"; 153 | /// Identifier for the `text-embedding-ada-002` model. 154 | enum AdaEmbeddingV2 = "text-embedding-ada-002"; 155 | 156 | // Moderation 157 | /// Stable moderation model. 158 | enum ModerationTextStable = "text-moderation-stable"; 159 | /// Latest moderation model. 160 | enum ModerationTextLatest = "text-moderation-latest"; 161 | /// Archived moderation model used for testing. 162 | enum ModerationText007 = "text-moderation-007"; 163 | 164 | // GPT base 165 | /// Identifier for the `babbage-002` model. 166 | enum Babbage002 = "babbage-002"; 167 | /// Identifier for the `davinci-002` model. 168 | enum Davinci002 = "davinci-002"; 169 | 170 | /// Describes an OpenAI model offering that can be used with the API. 171 | @serdeIgnoreUnexpectedKeys 172 | struct Model 173 | { 174 | /// The model identifier, which can be referenced in the API endpoints. 175 | string id; 176 | 177 | /// The Unix timestamp (in seconds) when the model was created. 178 | ulong created; 179 | 180 | /// The object type, which is always "model". 181 | string object; 182 | 183 | /// The organization that owns the model. 184 | @serdeKeys("owned_by") 185 | string ownedBy; 186 | } 187 | 188 | /// 189 | struct ModelsResponse 190 | { 191 | /// 192 | Model[] data; 193 | /// 194 | string object; 195 | } 196 | 197 | unittest 198 | { 199 | import mir.deser.json; 200 | 201 | const json = `{"object":"list","data":[{"id":"gpt-3.5-turbo","created":0,"object":"model","owned_by":"openai"}]}`; 202 | auto response = deserializeJson!ModelsResponse(json); 203 | 204 | assert(response.object == "list"); 205 | assert(response.data.length == 1); 206 | assert(response.data[0].id == "gpt-3.5-turbo"); 207 | assert(response.data[0].ownedBy == "openai"); 208 | assert(response.data[0].object == "model"); 209 | } 210 | -------------------------------------------------------------------------------- /source/openai/moderation.d: -------------------------------------------------------------------------------- 1 | /** 2 | OpenAI API Moderations 3 | 4 | Standards: https://platform.openai.com/docs/api-reference/moderations 5 | */ 6 | module openai.moderation; 7 | 8 | import mir.serde; 9 | 10 | @safe: 11 | 12 | /// 13 | struct ModerationRequest 14 | { 15 | /// 16 | string input; 17 | 18 | /// 19 | @serdeIgnoreDefault 20 | string model; 21 | } 22 | 23 | /// 24 | ModerationRequest moderationRequest(string input) 25 | { 26 | auto request = ModerationRequest(); 27 | request.input = input; 28 | return request; 29 | } 30 | 31 | /+ 32 | // moderation response format 33 | { 34 | "id": "modr-AB8CjOTu2jiq12hp1AQPfeqFWaORR", 35 | "model": "text-moderation-007", 36 | "results": [ 37 | { 38 | "flagged": true, 39 | "categories": { 40 | "sexual": false, 41 | "hate": false, 42 | "harassment": true, 43 | "self-harm": false, 44 | "sexual/minors": false, 45 | "hate/threatening": false, 46 | "violence/graphic": false, 47 | "self-harm/intent": false, 48 | "self-harm/instructions": false, 49 | "harassment/threatening": true, 50 | "violence": true 51 | }, 52 | "category_scores": { 53 | "sexual": 0.000011726012417057063, 54 | "hate": 0.22706663608551025, 55 | "harassment": 0.5215635299682617, 56 | "self-harm": 2.227119921371923e-6, 57 | "sexual/minors": 7.107352217872176e-8, 58 | "hate/threatening": 0.023547329008579254, 59 | "violence/graphic": 0.00003391829886822961, 60 | "self-harm/intent": 1.646940972932498e-6, 61 | "self-harm/instructions": 1.1198755256458526e-9, 62 | "harassment/threatening": 0.5694745779037476, 63 | "violence": 0.9971134662628174 64 | } 65 | } 66 | ] 67 | } 68 | +/ 69 | 70 | /// @serdeIgnoreUnexpectedKeys 71 | struct ModerationCategories 72 | { 73 | /// "sexual": false 74 | bool sexual; 75 | 76 | /// "sexual/minors": false 77 | @serdeKeys("sexual/minors") 78 | bool sexualMinors; 79 | 80 | /// "hate": false 81 | bool hate; 82 | 83 | /// "hate/threatening": false 84 | @serdeKeys("hate/threatening") 85 | bool hateThreatening; 86 | 87 | /// "harassment": true 88 | bool harassment; 89 | 90 | /// "harassment/threatening": true 91 | @serdeKeys("harassment/threatening") 92 | bool harassmentThreatening; 93 | 94 | /// "self-harm": false 95 | @serdeKeys("self-harm") 96 | bool selfHarm; 97 | 98 | /// "self-harm/intent": false 99 | @serdeKeys("self-harm/intent") 100 | bool selfHarmIntent; 101 | 102 | /// "self-harm/instructions": false 103 | @serdeKeys("self-harm/instructions") 104 | bool selfHarmInstructions; 105 | 106 | /// "violence": true 107 | bool violence; 108 | 109 | /// "violence/graphic": false 110 | @serdeKeys("violence/graphic") 111 | bool violenceGraphic; 112 | } 113 | 114 | /// @serdeIgnoreUnexpectedKeys 115 | struct ModerationCategoryScores 116 | { 117 | /// "sexual": 0.000011726012417057063 118 | float sexual; 119 | 120 | /// "sexual/minors": 7.107352217872176e-8 121 | @serdeKeys("sexual/minors") 122 | float sexualMinors; 123 | 124 | /// "hate": 0.22706663608551025 125 | float hate; 126 | 127 | /// "hate/threatening": 0.023547329008579254 128 | @serdeKeys("hate/threatening") 129 | float hateThreatening; 130 | 131 | /// "harassment": 0.5215635299682617 132 | float harassment; 133 | 134 | /// "harassment/threatening": 0.5694745779037476 135 | @serdeKeys("harassment/threatening") 136 | float harassmentThreatening; 137 | 138 | /// "self-harm": 2.227119921371923e-6 139 | @serdeKeys("self-harm") 140 | float selfHarm; 141 | 142 | /// "self-harm/intent": 1.646940972932498e-6 143 | @serdeKeys("self-harm/intent") 144 | float selfHarmIntent; 145 | 146 | /// "self-harm/instructions": 1.1198755256458526e-9 147 | @serdeKeys("self-harm/instructions") 148 | float selfHarmInstructions; 149 | 150 | /// "violence": 0.9971134662628174 151 | float violence; 152 | 153 | /// "violence/graphic": 0.00003391829886822961 154 | @serdeKeys("violence/graphic") 155 | float violenceGraphic; 156 | } 157 | 158 | /// 159 | struct ModerationResult 160 | { 161 | /// 162 | ModerationCategories categories; 163 | 164 | /// 165 | @serdeKeys("category_scores") 166 | ModerationCategoryScores categoryScores; 167 | 168 | /// 169 | bool flagged; 170 | } 171 | 172 | /// 173 | struct ModerationResponse 174 | { 175 | 176 | /// 177 | string id; 178 | 179 | /// 180 | string model; 181 | 182 | /// 183 | ModerationResult[] results; 184 | } 185 | -------------------------------------------------------------------------------- /source/openai/package.d: -------------------------------------------------------------------------------- 1 | /*** 2 | OpenAI API Client Library 3 | */ 4 | module openai; 5 | 6 | public import openai.clients.openai; 7 | 8 | public import openai.common; 9 | 10 | public import openai.chat; 11 | public import openai.completion; 12 | public import openai.embedding; 13 | public import openai.models; 14 | public import openai.moderation; 15 | public import openai.audio; 16 | --------------------------------------------------------------------------------