├── .github └── ISSUE_TEMPLATE │ ├── bug_report.yml │ ├── config.yml │ └── feature_request.yml ├── .gitignore ├── LICENSE ├── README.md ├── audio.go ├── chat.go ├── client ├── api.go ├── client.go ├── method_string.go ├── methods.go ├── options.go ├── round_robin.go └── transport.go ├── common.go ├── completion.go ├── doc.go ├── embedding.go ├── entity ├── audio.go ├── audio.partial.go ├── chat.go ├── chat.partial.go ├── common.go ├── completion.go ├── edit.go ├── embedding.go ├── error.go ├── file.go ├── fine_tune.go ├── image.go ├── image.partial.go ├── model.go └── moderation.go ├── errors └── errors.go ├── example_test.go ├── file.go ├── fine_tune.go ├── go.mod ├── go.sum ├── image.go ├── models ├── audio.go ├── chat.go ├── completion.go ├── edit.go ├── embedding.go ├── fine_tunes.go └── moderation.go ├── moderation.go ├── patterns └── completion │ ├── completion.go │ └── example_test.go ├── types ├── programming │ └── programming_language.go └── type.go └── utils └── formbuilder.go /.github/ISSUE_TEMPLATE/bug_report.yml: -------------------------------------------------------------------------------- 1 | name: Bug report 2 | description: Create a report to help us improve 3 | labels: ["bug"] 4 | body: 5 | - type: markdown 6 | attributes: 7 | value: | 8 | Thanks for taking the time to fill out this bug report! 9 | - type: textarea 10 | id: what-happened 11 | attributes: 12 | label: Describe the bug 13 | description: A clear and concise description of what the bug is, and any additional context. 14 | placeholder: Tell us what you see! 15 | validations: 16 | required: true 17 | - type: textarea 18 | id: repro-steps 19 | attributes: 20 | label: To Reproduce 21 | description: Steps to reproduce the behavior. 22 | placeholder: | 23 | 1. Fetch a '...' 24 | 2. Update the '....' 25 | 3. See error 26 | validations: 27 | required: true 28 | - type: textarea 29 | id: code-snippets 30 | attributes: 31 | label: Code snippets 32 | description: If applicable, add code snippets to help explain your problem. 33 | render: JavaScript 34 | validations: 35 | required: false 36 | - type: input 37 | id: os 38 | attributes: 39 | label: OS 40 | placeholder: macOS 41 | validations: 42 | required: true 43 | - type: input 44 | id: language-version 45 | attributes: 46 | label: Go Version 47 | placeholder: Go v1.19 48 | validations: 49 | required: true 50 | - type: input 51 | id: lib-version 52 | attributes: 53 | label: Library version 54 | placeholder: openai v1.0.0 55 | validations: 56 | required: true -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: OpenAI support 4 | url: https://help.openai.com/ 5 | about: | 6 | Please only file issues here that you believe represent actual bugs or feature requests for the OpenAI Node library. 7 | If you're having general trouble with the OpenAI API, please visit our help center to get support. -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.yml: -------------------------------------------------------------------------------- 1 | name: Feature request 2 | description: Suggest an idea for this library 3 | labels: ["feature-request"] 4 | body: 5 | - type: markdown 6 | attributes: 7 | value: | 8 | Thanks for taking the time to fill out this feature request! 9 | - type: textarea 10 | id: feature 11 | attributes: 12 | label: Describe the feature or improvement you're requesting 13 | description: A clear and concise description of what you want to happen. 14 | validations: 15 | required: true 16 | - type: textarea 17 | id: context 18 | attributes: 19 | label: Additional context 20 | description: Add any other context about the feature request here. -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # If you prefer the allow list template instead of the deny list, see community template: 2 | # https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore 3 | # 4 | # Binaries for programs and plugins 5 | *.exe 6 | *.exe~ 7 | *.dll 8 | *.so 9 | *.dylib 10 | 11 | # Test binary, built with `go test -c` 12 | *.test 13 | 14 | # Output of the go coverage tool, specifically when used with LiteIDE 15 | *.out 16 | 17 | # Dependency directories (remove the comment below to include it) 18 | # vendor/ 19 | 20 | # Go workspace file 21 | go.work 22 | .idea 23 | _example/ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OpenAi (community-maintained) [![Go Reference](https://pkg.go.dev/badge/github.com/GeniusAI-Platform/openai.svg)](https://pkg.go.dev/github.com/GeniusAI-Platform/openai) 2 | Package openai provides a Go SDK for the OpenAI API.this package supports several models, including GPT-4, GPT-3.5, GPT-3, DALL-E, and audio 3 | models. You can specify the desired model using the `Model` field in the request object. 4 | 5 | 6 | ## Feature 7 | 8 | - ChatGPT (GPT-3, GPT-3.5, GPT-4) 9 | - DALL·E 2 10 | - Embedding 11 | - Audio 12 | - Fine-Tune 13 | - File 14 | - Moderations 15 | - Completion Patterns 16 | - Multiple API keys support 17 | 18 | ## Install ![Go Version](https://img.shields.io/badge/go%20version-%3E=1.19-61CFDD.svg?style=flat-square) 19 | 20 | ```shell 21 | $ go get -u github.com/GeniusAI-Platform/openai 22 | ``` 23 | 24 | ## Example Completion 25 | 26 | ```go 27 | package main 28 | 29 | import ( 30 | "context" 31 | "github.com/GeniusAI-Platform/openai" 32 | "github.com/GeniusAI-Platform/openai/client" 33 | "github.com/GeniusAI-Platform/openai/entity" 34 | "github.com/GeniusAI-Platform/openai/models" 35 | "log" 36 | "os" 37 | ) 38 | 39 | func main() { 40 | apiKey := os.Getenv("OPENAI_API_KEY") 41 | cli, err := client.New([]string{apiKey}) 42 | if err != nil { 43 | log.Fatalln(err) 44 | } 45 | 46 | c := openai.NewCompletion(cli) 47 | resp, err := c.CreateCompletion(context.Background(), entity.CompletionRequest{ 48 | Model: models.TEXT_DAVINCI_002, 49 | Prompt: "can you explain bubble sort algorithm?", 50 | }) 51 | 52 | if err != nil { 53 | log.Fatalln(err) 54 | } 55 | 56 | log.Println(resp) 57 | } 58 | 59 | ``` 60 | 61 | Example Completion Patterns 62 | 63 | ```go 64 | package main 65 | 66 | import ( 67 | "context" 68 | "github.com/GeniusAI-Platform/openai" 69 | "github.com/GeniusAI-Platform/openai/client" 70 | "github.com/GeniusAI-Platform/openai/patterns/completion" 71 | "github.com/GeniusAI-Platform/openai/types/programming" 72 | "log" 73 | "os" 74 | ) 75 | 76 | var code string = ` 77 | func add(a, b int) int { 78 | return a + b 79 | } 80 | ` 81 | 82 | func main() { 83 | apiKey := os.Getenv("OPENAI_API_KEY") 84 | cli, err := client.New([]string{apiKey}) 85 | if err != nil { 86 | log.Fatalln(err) 87 | } 88 | 89 | c := openai.NewCompletion(cli) 90 | resp, err := c.CreateCompletionFromPattern(context.Background(), completion.ProgrammingLanguageTranslator( 91 | code, 92 | programming.Go, 93 | programming.Python, 94 | 0, 95 | )) 96 | 97 | if err != nil { 98 | log.Fatalln(err) 99 | } 100 | 101 | log.Println(resp.Choices[0].Text) 102 | } 103 | 104 | ``` 105 | 106 | See more details in [documentation](https://pkg.go.dev/github.com/GeniusAI-Platform/openai). 107 | 108 | ## TODO 109 | - [ ] Stream Support 110 | - [x] Moderation API 111 | - [x] Example API 112 | - [x] Fine-Tune API 113 | - [x] File API 114 | - [ ] Engine API 115 | - [ ] Azure API Support 116 | - [ ] Client, API Unit test 117 | 118 | ## Contributing 119 | 120 | 1. fork project in your GitHub account. 121 | 2. create new branch for new changes. 122 | 3. after change code, send Pull Request. 123 | -------------------------------------------------------------------------------- /audio.go: -------------------------------------------------------------------------------- 1 | package openai 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "github.com/GeniusAI-Platform/openai/client" 7 | "github.com/GeniusAI-Platform/openai/entity" 8 | "github.com/GeniusAI-Platform/openai/utils" 9 | "strconv" 10 | ) 11 | 12 | const ( 13 | createTranscriptionEndpoint = "/audio/transcriptions" 14 | createTranslationEndpoint = "/audio/translations" 15 | ) 16 | 17 | type Audio struct { 18 | client client.Transporter 19 | } 20 | 21 | // NewAudio create audio object to transcription and translation 22 | func NewAudio(client client.Transporter) *Audio { 23 | return &Audio{ 24 | client: client, 25 | } 26 | } 27 | 28 | // CreateTranscription Transcribes audio into the input language 29 | func (a *Audio) CreateTranscription(ctx context.Context, req entity.AudioRequest) (*entity.AudioResponse, error) { 30 | if err := a.client.GetValidator().Struct(req); err != nil { 31 | return nil, err 32 | } 33 | 34 | body, contentType, err := a.createForm(req) 35 | if err != nil { 36 | return nil, err 37 | } 38 | 39 | resp, err := a.client.PostFile(ctx, &client.APIConfig{Path: createTranscriptionEndpoint}, body, contentType) 40 | if err != nil { 41 | return nil, err 42 | } 43 | 44 | return responseHandler[*entity.AudioResponse](resp) 45 | } 46 | 47 | // CreateTranslation Translates audio into English 48 | func (a *Audio) CreateTranslation(ctx context.Context, req entity.AudioRequest) (*entity.AudioResponse, error) { 49 | if err := a.client.GetValidator().Struct(req); err != nil { 50 | return nil, err 51 | } 52 | 53 | body, contentType, err := a.createForm(req) 54 | if err != nil { 55 | return nil, err 56 | } 57 | 58 | resp, err := a.client.PostFile(ctx, &client.APIConfig{Path: createTranslationEndpoint}, body, contentType) 59 | if err != nil { 60 | return nil, err 61 | } 62 | 63 | return responseHandler[*entity.AudioResponse](resp) 64 | } 65 | 66 | func (a *Audio) createForm(req entity.AudioRequest) (*bytes.Buffer, string, error) { 67 | body := new(bytes.Buffer) 68 | fb := utils.NewFormBuilder(body) 69 | 70 | if err := fb.CreateFormFile("file", req.File); err != nil { 71 | return nil, "", err 72 | } 73 | 74 | if err := fb.WriteField("model", req.Model.String()); err != nil { 75 | return nil, "", err 76 | } 77 | 78 | if err := fb.WriteField("prompt", req.Prompt); err != nil { 79 | return nil, "", err 80 | } 81 | 82 | if err := fb.WriteField("temperature", strconv.FormatFloat(float64(req.Temperature), 'E', -1, 64)); err != nil { 83 | return nil, "", err 84 | } 85 | 86 | if err := fb.WriteField("language", req.Language); err != nil { 87 | return nil, "", err 88 | } 89 | 90 | if err := fb.WriteField("response_format", req.ResponseFormat.String()); err != nil { 91 | return nil, "", err 92 | } 93 | 94 | if err := fb.Close(); err != nil { 95 | return nil, "", err 96 | } 97 | 98 | return body, fb.FormDataContentType(), nil 99 | } 100 | -------------------------------------------------------------------------------- /chat.go: -------------------------------------------------------------------------------- 1 | package openai 2 | 3 | import ( 4 | "context" 5 | "github.com/GeniusAI-Platform/openai/client" 6 | "github.com/GeniusAI-Platform/openai/entity" 7 | ) 8 | 9 | const ( 10 | chatCompletionEndpoint = "/chat/completions" 11 | ) 12 | 13 | type ChatCompletion struct { 14 | client client.Transporter 15 | } 16 | 17 | // NewChat create chat completion object to create chat with chatgpt 18 | func NewChat(client client.Transporter) *ChatCompletion { 19 | return &ChatCompletion{ 20 | client: client, 21 | } 22 | } 23 | 24 | // CreateChatCompletion Creates a completion for the provided prompt and parameters 25 | func (c *ChatCompletion) CreateChatCompletion(ctx context.Context, req entity.ChatRequest) (*entity.ChatResponse, error) { 26 | if err := c.client.GetValidator().Struct(req); err != nil { 27 | return nil, err 28 | } 29 | 30 | resp, err := c.client.Post(ctx, &client.APIConfig{Path: chatCompletionEndpoint}, req) 31 | if err != nil { 32 | return nil, err 33 | } 34 | 35 | return responseHandler[*entity.ChatResponse](resp) 36 | } 37 | -------------------------------------------------------------------------------- /client/api.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | // APIConfig standard external api path address 4 | type APIConfig struct { 5 | Path string 6 | Query map[string]string // Query use for http get request for set query params example.com/x?q1=v1&q2=v2 7 | Headers map[string]string // Headers set http header request 8 | } 9 | -------------------------------------------------------------------------------- /client/client.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "fmt" 8 | "github.com/GeniusAI-Platform/openai/errors" 9 | "github.com/go-playground/validator/v10" 10 | "golang.org/x/time/rate" 11 | "net/http" 12 | "net/url" 13 | "runtime" 14 | "time" 15 | ) 16 | 17 | var _ Transporter = (*Client)(nil) 18 | 19 | const ( 20 | _default_base_url = "https://api.openai.com/v1" 21 | _defaultEmptyMessagesLimit uint = 300 22 | ) 23 | 24 | type Client struct { 25 | httpClient *http.Client 26 | validator *validator.Validate 27 | rate *rate.Limiter 28 | 29 | roundRobin changer 30 | 31 | timeout time.Duration 32 | concurrent int 33 | 34 | baseURL string 35 | organizationID string 36 | emptyMessagesLimit uint 37 | } 38 | 39 | type Response struct { 40 | resp *http.Response 41 | } 42 | 43 | type Transporter interface { 44 | GetClient() *http.Client 45 | GetValidator() *validator.Validate 46 | GetOrganizationID() string 47 | Get(ctx context.Context, apiConfig *APIConfig) (*Response, error) 48 | Post(ctx context.Context, apiConfig *APIConfig, apiRequest any) (*Response, error) 49 | PostFile(ctx context.Context, apiConfig *APIConfig, body *bytes.Buffer, contentType string) (*Response, error) 50 | Delete(ctx context.Context, apiConfig *APIConfig) (*Response, error) 51 | } 52 | 53 | // New create openai client 54 | func New(apiKeys []string, opts ...Option) (Transporter, error) { 55 | if len(apiKeys) == 0 { 56 | return nil, errors.ErrAPIKeyIsEmpty 57 | } 58 | 59 | client := &Client{ 60 | validator: validator.New(), 61 | baseURL: _default_base_url, 62 | emptyMessagesLimit: _defaultEmptyMessagesLimit, 63 | roundRobin: newRoundRobin(apiKeys...), 64 | } 65 | 66 | for _, opt := range opts { 67 | opt(client) 68 | } 69 | 70 | if client.httpClient == nil { 71 | client.httpClient = client.client() 72 | } 73 | 74 | return client, nil 75 | } 76 | 77 | func (c *Client) GetClient() *http.Client { 78 | return c.httpClient 79 | } 80 | 81 | func (c *Client) GetValidator() *validator.Validate { 82 | return c.validator 83 | } 84 | 85 | func (c *Client) GetOrganizationID() string { 86 | return c.organizationID 87 | } 88 | 89 | // Get do get request and return response 90 | func (c *Client) Get(ctx context.Context, apiConfig *APIConfig) (*Response, error) { 91 | if err := c.awaitRateLimiter(ctx); err != nil { 92 | return nil, errors.New(http.StatusTooManyRequests, "", err.Error(), "", "") 93 | } 94 | 95 | req, err := http.NewRequestWithContext(ctx, GET.String(), c.baseURL+apiConfig.Path, nil) 96 | if err != nil { 97 | return nil, errors.New(http.StatusInternalServerError, "", err.Error(), "", "") 98 | } 99 | 100 | if len(apiConfig.Headers) != 0 { 101 | for k, v := range apiConfig.Headers { 102 | req.Header.Add(k, v) 103 | } 104 | } 105 | 106 | req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.getAPIKey())) 107 | req.Header.Set("Content-Type", "application/json") 108 | 109 | if len(apiConfig.Query) != 0 { 110 | req.URL.RawQuery = c.queryBuilder(apiConfig.Query) 111 | } 112 | 113 | if c.organizationID != "" { 114 | req.Header.Set("OpenAI-Organization", c.organizationID) 115 | } 116 | 117 | resp, err := c.do(ctx, req) 118 | if err != nil { 119 | return nil, errors.New(http.StatusInternalServerError, "", err.Error(), "", "") 120 | } 121 | 122 | return &Response{resp}, nil 123 | } 124 | 125 | // Post do post request and return response 126 | func (c *Client) Post(ctx context.Context, apiConfig *APIConfig, apiRequest any) (*Response, error) { 127 | if err := c.awaitRateLimiter(ctx); err != nil { 128 | return nil, errors.New(http.StatusTooManyRequests, "", err.Error(), "", "") 129 | } 130 | 131 | body, err := json.Marshal(apiRequest) 132 | if err != nil { 133 | return nil, errors.New(http.StatusInternalServerError, "", err.Error(), "", "") 134 | } 135 | 136 | url, err := url.JoinPath(c.baseURL, apiConfig.Path) 137 | if err != nil { 138 | return nil, errors.New(http.StatusTooManyRequests, "", err.Error(), "", "") 139 | } 140 | 141 | req, err := http.NewRequestWithContext(ctx, POST.String(), url, bytes.NewBuffer(body)) 142 | if err != nil { 143 | return nil, errors.New(http.StatusTooManyRequests, "", err.Error(), "", "") 144 | } 145 | 146 | if len(apiConfig.Headers) != 0 { 147 | for k, v := range apiConfig.Headers { 148 | req.Header.Add(k, v) 149 | } 150 | } 151 | 152 | req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.getAPIKey())) 153 | req.Header.Set("Content-Type", "application/json") 154 | 155 | if len(apiConfig.Query) != 0 { 156 | req.URL.RawQuery = c.queryBuilder(apiConfig.Query) 157 | } 158 | 159 | if c.organizationID != "" { 160 | req.Header.Set("OpenAI-Organization", c.organizationID) 161 | } 162 | 163 | resp, err := c.do(ctx, req) 164 | if err != nil { 165 | return nil, errors.New(http.StatusTooManyRequests, "", err.Error(), "", "") 166 | } 167 | 168 | return &Response{resp}, nil 169 | } 170 | 171 | func (c *Client) PostFile(ctx context.Context, apiConfig *APIConfig, body *bytes.Buffer, contentType string) (*Response, error) { 172 | if err := c.awaitRateLimiter(ctx); err != nil { 173 | return nil, errors.New(http.StatusTooManyRequests, "", err.Error(), "", "") 174 | } 175 | 176 | url, err := url.JoinPath(c.baseURL, apiConfig.Path) 177 | if err != nil { 178 | return nil, errors.New(http.StatusTooManyRequests, "", err.Error(), "", "") 179 | } 180 | 181 | req, err := http.NewRequestWithContext(ctx, POST.String(), url, body) 182 | if err != nil { 183 | return nil, errors.New(http.StatusTooManyRequests, "", err.Error(), "", "") 184 | } 185 | 186 | if len(apiConfig.Headers) != 0 { 187 | for k, v := range apiConfig.Headers { 188 | req.Header.Add(k, v) 189 | } 190 | } 191 | 192 | req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.getAPIKey())) 193 | req.Header.Set("Content-Type", contentType) 194 | req.Header.Set("Accept", "application/json; charset=utf-8") 195 | 196 | if len(apiConfig.Query) != 0 { 197 | req.URL.RawQuery = c.queryBuilder(apiConfig.Query) 198 | } 199 | 200 | if c.organizationID != "" { 201 | req.Header.Set("OpenAI-Organization", c.organizationID) 202 | } 203 | 204 | resp, err := c.do(ctx, req) 205 | if err != nil { 206 | return nil, errors.New(http.StatusTooManyRequests, "", err.Error(), "", "") 207 | } 208 | 209 | return &Response{resp}, nil 210 | } 211 | 212 | func (c *Client) Delete(ctx context.Context, apiConfig *APIConfig) (*Response, error) { 213 | if err := c.awaitRateLimiter(ctx); err != nil { 214 | return nil, errors.New(http.StatusTooManyRequests, "", err.Error(), "", "") 215 | } 216 | 217 | url, err := url.JoinPath(c.baseURL, apiConfig.Path) 218 | if err != nil { 219 | return nil, errors.New(http.StatusTooManyRequests, "", err.Error(), "", "") 220 | } 221 | 222 | req, err := http.NewRequestWithContext(ctx, DELETE.String(), url, nil) 223 | if err != nil { 224 | return nil, errors.New(http.StatusTooManyRequests, "", err.Error(), "", "") 225 | } 226 | 227 | if len(apiConfig.Headers) != 0 { 228 | for k, v := range apiConfig.Headers { 229 | req.Header.Add(k, v) 230 | } 231 | } 232 | 233 | req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.getAPIKey())) 234 | req.Header.Set("Content-Type", "application/json") 235 | 236 | if len(apiConfig.Query) != 0 { 237 | req.URL.RawQuery = c.queryBuilder(apiConfig.Query) 238 | } 239 | 240 | if c.organizationID != "" { 241 | req.Header.Set("OpenAI-Organization", c.organizationID) 242 | } 243 | 244 | resp, err := c.do(ctx, req) 245 | if err != nil { 246 | return nil, errors.New(http.StatusTooManyRequests, "", err.Error(), "", "") 247 | } 248 | 249 | return &Response{resp}, nil 250 | } 251 | 252 | func (c *Client) Stream(ctx context.Context, apiConfig *APIConfig, method Method, apiRequest any) (*Response, error) { 253 | if err := c.awaitRateLimiter(ctx); err != nil { 254 | return nil, err 255 | } 256 | 257 | body, err := json.Marshal(apiRequest) 258 | if err != nil { 259 | return nil, err 260 | } 261 | 262 | url, err := url.JoinPath(c.baseURL, apiConfig.Path) 263 | if err != nil { 264 | return nil, err 265 | } 266 | 267 | req, err := http.NewRequest(method.String(), url, bytes.NewBuffer(body)) 268 | if err != nil { 269 | return nil, err 270 | } 271 | 272 | req.Header.Set("Content-Type", "application/json") 273 | req.Header.Set("Accept", "text/event-stream") 274 | req.Header.Set("Cache-Control", "no-cache") 275 | req.Header.Set("Connection", "keep-alive") 276 | 277 | if c.organizationID != "" { 278 | req.Header.Set("OpenAI-Organization", c.organizationID) 279 | } 280 | 281 | resp, err := c.do(ctx, req) 282 | if err != nil { 283 | return nil, err 284 | } 285 | 286 | return &Response{resp}, nil 287 | } 288 | 289 | // GetJSON decode response body to your response 290 | func (r *Response) GetJSON(response any) error { 291 | defer r.resp.Body.Close() 292 | if err := json.NewDecoder(r.resp.Body).Decode(response); err != nil { 293 | return errors.New(http.StatusInternalServerError, "", errors.ErrFailedToUnmarshalJSON.Error(), "", "") 294 | } 295 | return nil 296 | } 297 | 298 | // GetHttpResponse return http response 299 | func (r *Response) GetHttpResponse() *http.Response { 300 | return r.resp 301 | } 302 | 303 | func (c *Client) do(ctx context.Context, req *http.Request) (*http.Response, error) { 304 | client := c.httpClient 305 | if client == nil { 306 | client = http.DefaultClient 307 | } 308 | 309 | return client.Do(req.WithContext(ctx)) 310 | } 311 | 312 | func (c *Client) awaitRateLimiter(ctx context.Context) error { 313 | if c.rate == nil { 314 | return nil 315 | } 316 | return c.rate.Wait(ctx) 317 | } 318 | 319 | func (c *Client) queryBuilder(params map[string]string) string { 320 | query := url.Values{} 321 | for k, v := range params { 322 | query[k] = []string{v} 323 | } 324 | return query.Encode() 325 | } 326 | 327 | func (c *Client) concurrency() int { 328 | if c.concurrent > 0 { 329 | return c.concurrent 330 | } 331 | return runtime.NumCPU() 332 | } 333 | 334 | func (c *Client) getAPIKey() string { 335 | return c.roundRobin.Next() 336 | } 337 | 338 | func (c *Client) client() *http.Client { 339 | return &http.Client{ 340 | Timeout: c.timeout, 341 | Transport: &http.Transport{ 342 | MaxIdleConnsPerHost: c.concurrency(), 343 | }, 344 | } 345 | } 346 | -------------------------------------------------------------------------------- /client/method_string.go: -------------------------------------------------------------------------------- 1 | // Code generated by "stringer -type=Method"; DO NOT EDIT. 2 | 3 | package client 4 | 5 | import "strconv" 6 | 7 | func _() { 8 | // An "invalid array index" compiler error signifies that the constant values have changed. 9 | // Re-run the stringer command to generate them again. 10 | var x [1]struct{} 11 | _ = x[POST-1] 12 | _ = x[GET-2] 13 | _ = x[PUT-3] 14 | _ = x[PATCH-4] 15 | _ = x[DELETE-5] 16 | } 17 | 18 | const _Method_name = "POSTGETPUTPATCHDELETE" 19 | 20 | var _Method_index = [...]uint8{0, 4, 7, 10, 15, 21} 21 | 22 | func (i Method) String() string { 23 | i -= 1 24 | if i < 0 || i >= Method(len(_Method_index)-1) { 25 | return "Method(" + strconv.FormatInt(int64(i+1), 10) + ")" 26 | } 27 | return _Method_name[_Method_index[i]:_Method_index[i+1]] 28 | } 29 | -------------------------------------------------------------------------------- /client/methods.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | //go:generate stringer -type=Method 4 | 5 | // Method is http method for request 6 | type Method int 7 | 8 | const ( 9 | POST Method = iota + 1 10 | GET 11 | PUT 12 | PATCH 13 | DELETE 14 | ) 15 | -------------------------------------------------------------------------------- /client/options.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "golang.org/x/time/rate" 5 | "net/http" 6 | "time" 7 | ) 8 | 9 | type Option func(client *Client) 10 | 11 | // WithCustomClient use custom http httpClient instead default httpClient 12 | func WithCustomClient(client *http.Client) Option { 13 | return func(c *Client) { 14 | if _, ok := client.Transport.(*transport); !ok { 15 | t := client.Transport 16 | if t != nil { 17 | client.Transport = &transport{Base: t} 18 | } else { 19 | client.Transport = &transport{Base: http.DefaultTransport} 20 | } 21 | } 22 | c.httpClient = client 23 | } 24 | } 25 | 26 | // WithRateLimit make rate limit for example every time 5 * time.Second for 50 request 27 | func WithRateLimit(every time.Duration, requestPerTime int) Option { 28 | return func(client *Client) { 29 | client.rate = rate.NewLimiter(rate.Every(every), requestPerTime) 30 | } 31 | } 32 | 33 | func WithCustomBaseURL(baseURL string) Option { 34 | return func(c *Client) { 35 | c.baseURL = baseURL 36 | } 37 | } 38 | 39 | func WithOrganizationID(orgID string) Option { 40 | return func(c *Client) { 41 | c.organizationID = orgID 42 | } 43 | } 44 | 45 | func SetEmptyMessageLimit(limit uint) Option { 46 | return func(c *Client) { 47 | c.emptyMessagesLimit = limit 48 | } 49 | } 50 | 51 | // Concurrency changes Client's concurrency level. 52 | func Concurrency(n int) Option { 53 | return func(c *Client) { c.concurrent = n } 54 | } 55 | 56 | // Timeout changes Client's timeout per request. 57 | func Timeout(d time.Duration) Option { 58 | return func(c *Client) { c.timeout = d } 59 | } 60 | -------------------------------------------------------------------------------- /client/round_robin.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "sync" 5 | "sync/atomic" 6 | ) 7 | 8 | // changer is an interface for representing round-robin balancing API key usage. 9 | type changer interface { 10 | Next() string 11 | } 12 | 13 | type roundRobin struct { 14 | apiKeys []*string 15 | next uint32 16 | mu sync.RWMutex 17 | } 18 | 19 | // newRoundRobin returns a new instance of roundRobin. 20 | func newRoundRobin(keys ...string) changer { 21 | apiKeys := make([]*string, len(keys)) 22 | for i, key := range keys { 23 | newKey := key 24 | apiKeys[i] = &newKey 25 | } 26 | return &roundRobin{ 27 | apiKeys: apiKeys, 28 | } 29 | } 30 | 31 | func (r *roundRobin) Next() string { 32 | r.mu.RLock() 33 | defer r.mu.RUnlock() 34 | n := atomic.AddUint32(&r.next, 1) 35 | return *r.apiKeys[(int(n)-1)%len(r.apiKeys)] 36 | } 37 | -------------------------------------------------------------------------------- /client/transport.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | ) 7 | 8 | const userAgent = "Go-http-httpClient/1.1" 9 | 10 | type transport struct { 11 | Base http.RoundTripper 12 | } 13 | 14 | // RoundTrip appends userAgent existing User-Agent header and performs the request 15 | // via t.Base. 16 | func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { 17 | req = cloneRequest(req) 18 | ua := req.Header.Get("User-Agent") 19 | if ua == "" { 20 | ua = userAgent 21 | } else { 22 | ua = fmt.Sprintf("%s;%s", ua, userAgent) 23 | } 24 | req.Header.Set("User-Agent", ua) 25 | return t.Base.RoundTrip(req) 26 | } 27 | 28 | // cloneRequest returns a clone of the provided *http.Request. 29 | // The clone is a shallow copy of the struct and its Headers map. 30 | func cloneRequest(r *http.Request) *http.Request { 31 | // shallow copy of the struct 32 | r2 := new(http.Request) 33 | *r2 = *r 34 | // deep copy of the Headers 35 | r2.Header = make(http.Header) 36 | for k, s := range r.Header { 37 | r2.Header[k] = s 38 | } 39 | return r2 40 | } 41 | -------------------------------------------------------------------------------- /common.go: -------------------------------------------------------------------------------- 1 | package openai 2 | 3 | import ( 4 | "github.com/GeniusAI-Platform/openai/client" 5 | "github.com/GeniusAI-Platform/openai/entity" 6 | "github.com/GeniusAI-Platform/openai/errors" 7 | "net/http" 8 | "reflect" 9 | ) 10 | 11 | func responseHandler[T any](resp *client.Response) (response T, err error) { 12 | errResp := new(entity.ErrorResponse) 13 | m, ok := reflect.New(reflect.TypeOf(response).Elem()).Interface().(T) 14 | if !ok { 15 | return response, errors.New(http.StatusInternalServerError, "", "response type is invalid", "", "") 16 | } 17 | if resp.GetHttpResponse().StatusCode != http.StatusOK { 18 | if err = resp.GetJSON(errResp); err != nil { 19 | return response, err 20 | } 21 | errResp.HttpCode = resp.GetHttpResponse().StatusCode 22 | return response, errResp 23 | } 24 | 25 | if err = resp.GetJSON(m); err != nil { 26 | return response, err 27 | } 28 | 29 | return m, nil 30 | } 31 | -------------------------------------------------------------------------------- /completion.go: -------------------------------------------------------------------------------- 1 | package openai 2 | 3 | import ( 4 | "context" 5 | "github.com/GeniusAI-Platform/openai/client" 6 | "github.com/GeniusAI-Platform/openai/entity" 7 | "github.com/GeniusAI-Platform/openai/patterns/completion" 8 | ) 9 | 10 | const ( 11 | createCompletionEndpoint = "/completions" 12 | ) 13 | 14 | type Completion struct { 15 | client client.Transporter 16 | } 17 | 18 | // NewCompletion create Completion object to text completion using davinci 19 | func NewCompletion(client client.Transporter) *Completion { 20 | return &Completion{ 21 | client: client, 22 | } 23 | } 24 | 25 | // CreateCompletion Creates a job that fine-tunes a specified model from a given dataset. 26 | // Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. 27 | func (c *Completion) CreateCompletion(ctx context.Context, req entity.CompletionRequest) (*entity.CompletionResponse, error) { 28 | return c.request(ctx, req) 29 | } 30 | 31 | // CreateCompletionFromPattern create a completion using specific patterns 32 | func (c *Completion) CreateCompletionFromPattern(ctx context.Context, pattern completion.CompletionPattern) (*entity.CompletionResponse, error) { 33 | return c.request(ctx, pattern()) 34 | } 35 | 36 | func (c *Completion) request(ctx context.Context, req entity.CompletionRequest) (*entity.CompletionResponse, error) { 37 | if err := c.client.GetValidator().Struct(req); err != nil { 38 | return nil, err 39 | } 40 | 41 | resp, err := c.client.Post(ctx, &client.APIConfig{Path: createCompletionEndpoint}, req) 42 | if err != nil { 43 | return nil, err 44 | } 45 | 46 | return responseHandler[*entity.CompletionResponse](resp) 47 | } 48 | -------------------------------------------------------------------------------- /doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package openai provides a Go SDK for the OpenAI API.this package supports several models, including GPT-4, GPT-3.5, GPT-3, DALL-E, and audio 3 | models. You can specify the desired model using the `Model` field in the request object. 4 | 5 | # Usage 6 | 7 | To use this SDK, you will first need to obtain an API key from the OpenAI website. 8 | You can then create a client object using the `New` function: 9 | 10 | client := client.New(apiKey) 11 | 12 | The client object provides methods for making requests to the various endpoints of 13 | the OpenAI API. For example, to generate text using the GPT-3.5 or GPT-4 model, you can use 14 | the `CreateChatCompletion` method: 15 | 16 | c := openai.NewChat(cli) 17 | resp, err := c.CreateChatCompletion(context.Background(), entity.ChatRequest{ 18 | Model: models.GPT35_TURBO, 19 | Messages: []entity.ChatMessage{ 20 | { 21 | Role: entity.USER, 22 | Content: "Hello", 23 | }, 24 | }, 25 | }) 26 | 27 | In addition to generating text and images, this package also supports fine-tuning 28 | models and generating embeddings. For example, to fine-tune a GPT-3 model, you can 29 | use the `CreateFineTune` method: 30 | 31 | c := openai.NewFineTune(cli) 32 | resp, err := c.CreateFineTune(context.Background(), entity.FineTuneRequest{}) 33 | 34 | For more information about the available methods and request/response objects, see 35 | the documentation for the `Client` type and the various endpoint types. 36 | 37 | # Authentication 38 | 39 | Requests to the OpenAI API must include an API key in the `Authorization` header. 40 | You can pass this key to the client constructor, or you can set the `OPENAI_API_KEY` 41 | environment variable to automatically use it: 42 | 43 | os.Setenv("OPENAI_API_KEY", apiKey) 44 | client := client.New(os.GetEnv("OPENAI_API_KEY")) 45 | 46 | # Concurrency 47 | 48 | The client methods are safe to use concurrently from multiple goroutines. 49 | 50 | # Errors 51 | 52 | Any errors returned by the client methods will be of type `openai.Error`. This type 53 | provides access to the raw HTTP response, as well as any JSON error response that 54 | was returned by the API. For more information, see the documentation for the `Error` 55 | type. 56 | 57 | # Endpoint Types 58 | 59 | The package defines types for each of the endpoints in the OpenAI API. These types 60 | provide a convenient way to construct requests and parse responses for each endpoint. 61 | For more information, see the documentation for each endpoint type. 62 | 63 | # Examples 64 | 65 | The `_examples` directory in the package source contains several examples of how to 66 | use the SDK to perform various tasks with the OpenAI API. These examples can serve as 67 | a starting point for your own usage of the SDK. 68 | */ 69 | package openai 70 | -------------------------------------------------------------------------------- /embedding.go: -------------------------------------------------------------------------------- 1 | package openai 2 | 3 | import ( 4 | "context" 5 | "github.com/GeniusAI-Platform/openai/client" 6 | "github.com/GeniusAI-Platform/openai/entity" 7 | ) 8 | 9 | const ( 10 | createEmbeddingEndpoint = "/embeddings" 11 | ) 12 | 13 | type Embedding struct { 14 | client client.Transporter 15 | } 16 | 17 | // NewEmbedding create embedding object to create embeddings 18 | func NewEmbedding(client client.Transporter) *Embedding { 19 | return &Embedding{ 20 | client: client, 21 | } 22 | } 23 | 24 | // CreateEmbedding Creates an embedding vector representing the input text 25 | func (e *Embedding) CreateEmbedding(ctx context.Context, req entity.EmbeddingRequest) (*entity.EmbeddingResponse, error) { 26 | if err := e.client.GetValidator().Struct(req); err != nil { 27 | return nil, err 28 | } 29 | 30 | resp, err := e.client.Post(ctx, &client.APIConfig{Path: createEmbeddingEndpoint}, req) 31 | if err != nil { 32 | return nil, err 33 | } 34 | 35 | return responseHandler[*entity.EmbeddingResponse](resp) 36 | } 37 | -------------------------------------------------------------------------------- /entity/audio.go: -------------------------------------------------------------------------------- 1 | package entity 2 | 3 | import ( 4 | "github.com/GeniusAI-Platform/openai/models" 5 | "os" 6 | ) 7 | 8 | type AudioRequest struct { 9 | Model models.Audio `json:"model" validate:"required"` 10 | // File The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm 11 | File *os.File `json:"file" validate:"required"` 12 | // Prompt An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language 13 | Prompt string `json:"prompt,omitempty"` 14 | // Temperature The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values 15 | //like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase 16 | //the temperature until certain thresholds are hit 17 | Temperature float32 `json:"temperature,omitempty"` 18 | // Language The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency, learn more: 19 | // https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes 20 | Language string `json:"language,omitempty"` 21 | // ResponseFormat The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt 22 | ResponseFormat AudioFormat `json:"responseFormat,omitempty"` 23 | } 24 | 25 | type AudioResponse struct { 26 | Text string `json:"text"` 27 | } 28 | -------------------------------------------------------------------------------- /entity/audio.partial.go: -------------------------------------------------------------------------------- 1 | package entity 2 | 3 | import "encoding/json" 4 | 5 | type AudioFormat uint8 6 | 7 | const ( 8 | AudioJSONFormat AudioFormat = iota 9 | AudioSRTFormat 10 | AudioVTTFormat 11 | AudioTextFormat 12 | AudioVerboseJSONFormat 13 | ) 14 | 15 | func (a AudioFormat) String() string { 16 | switch a { 17 | case AudioJSONFormat: 18 | return "json" 19 | case AudioSRTFormat: 20 | return "srt" 21 | case AudioVTTFormat: 22 | return "vtt" 23 | case AudioTextFormat: 24 | return "text" 25 | case AudioVerboseJSONFormat: 26 | return "verbose_json" 27 | default: 28 | return "json" 29 | } 30 | } 31 | 32 | func (a AudioFormat) MarshalJSON() ([]byte, error) { 33 | return json.Marshal(a.String()) 34 | } 35 | 36 | func (a AudioFormat) UnmarshalJSON(data []byte) error { 37 | var s string 38 | if err := json.Unmarshal(data, &s); err != nil { 39 | return err 40 | } 41 | return nil 42 | } 43 | -------------------------------------------------------------------------------- /entity/chat.go: -------------------------------------------------------------------------------- 1 | package entity 2 | 3 | import "github.com/GeniusAI-Platform/openai/models" 4 | 5 | type ChatRequest struct { 6 | Model models.Chat `json:"model" validate:"required"` 7 | // Messages A list of messages describing the conversation so far 8 | Messages []ChatMessage `json:"messages"` 9 | // MaxTokens The maximum number of tokens to generate in the completion 10 | // The token count of your prompt plus max_tokens cannot exceed the model's 11 | // context length. Most models have a context length of 2048 tokens 12 | // (except for the newest models, which support 4096). 13 | MaxTokens int `json:"max_tokens,omitempty"` 14 | // Temperature What sampling temperature to use, between 0 and 2. 15 | //Higher values like 0.8 will make the output more random, while 16 | //lower values like 0.2 will make it more focused and deterministic 17 | Temperature float32 `json:"temperature,omitempty"` 18 | // TopP An alternative to sampling with temperature, called nucleus 19 | //sampling, where the model considers the results of the tokens with 20 | //top_p probability mass. So 0.1 means only the tokens comprising the 21 | //top 10% probability mass are considered 22 | TopP float32 `json:"top_p,omitempty"` 23 | // N How many completions to generate for each prompt 24 | N int `json:"n,omitempty"` 25 | 26 | //TODO: required implement Stream support 27 | // Stream bool `json:"stream,omitempty"` 28 | 29 | // Stop Up to 4 sequences where the API will stop generating further tokens. 30 | //The returned text will not contain the stop sequence 31 | Stop []string `json:"stop,omitempty"` 32 | // PresencePenalty Number between -2.0 and 2.0. Positive values penalize 33 | //new tokens based on whether they appear in the text so far, 34 | //increasing the model's likelihood to talk about new topics 35 | PresencePenalty float32 `json:"presence_penalty,omitempty"` 36 | // FrequencyPenalty Number between -2.0 and 2.0. Positive values penalize new tokens 37 | //based on their existing frequency in the text so far, 38 | //decreasing the model's likelihood to repeat the same line verbatim 39 | FrequencyPenalty float32 `json:"frequency_penalty,omitempty"` 40 | // LogitBias Modify the likelihood of specified tokens appearing in the completion 41 | LogitBias map[string]int `json:"logit_bias,omitempty"` 42 | // User A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 43 | User string `json:"user,omitempty"` 44 | } 45 | 46 | type ChatResponse struct { 47 | ID string `json:"id"` 48 | Object string `json:"object"` 49 | Created int64 `json:"created"` 50 | Model string `json:"model"` 51 | Choices []ChatChoice `json:"choices"` 52 | Usage TokenUsage `json:"usage"` 53 | } 54 | 55 | type ChatChoice struct { 56 | Index int `json:"index"` 57 | Message ChatMessage `json:"message"` 58 | FinishReason string `json:"finish_reason"` 59 | } 60 | 61 | type ChatMessage struct { 62 | // Role The role of the author of this message. One of system, user, or assistant 63 | Role Role `json:"role" validate:"required"` 64 | // Content The contents of the message 65 | Content string `json:"content" validate:"required"` 66 | // Name The name of the author of this message. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters 67 | Name string `json:"name,omitempty"` 68 | } 69 | -------------------------------------------------------------------------------- /entity/chat.partial.go: -------------------------------------------------------------------------------- 1 | package entity 2 | 3 | import ( 4 | "encoding/json" 5 | ) 6 | 7 | type Role uint8 8 | 9 | const ( 10 | SYSTEM = iota 11 | USER 12 | ASSISTANT 13 | ) 14 | 15 | func (r Role) String() string { 16 | switch r { 17 | case SYSTEM: 18 | return "system" 19 | case USER: 20 | return "user" 21 | case ASSISTANT: 22 | return "assistant" 23 | default: 24 | return "assistant" 25 | } 26 | } 27 | 28 | func (r Role) MarshalJSON() ([]byte, error) { 29 | return json.Marshal(r.String()) 30 | } 31 | 32 | func (r Role) UnmarshalJSON(data []byte) error { 33 | var s string 34 | if err := json.Unmarshal(data, &s); err != nil { 35 | return err 36 | } 37 | return nil 38 | } 39 | -------------------------------------------------------------------------------- /entity/common.go: -------------------------------------------------------------------------------- 1 | package entity 2 | 3 | type TokenUsage struct { 4 | PromptTokens int `json:"prompt_tokens"` 5 | CompletionTokens int `json:"completion_tokens"` 6 | TotalTokens int `json:"total_tokens"` 7 | } 8 | -------------------------------------------------------------------------------- /entity/completion.go: -------------------------------------------------------------------------------- 1 | package entity 2 | 3 | import "github.com/GeniusAI-Platform/openai/models" 4 | 5 | type CompletionRequest struct { 6 | Model models.Completion `json:"model" validate:"required"` 7 | // Prompt he prompt(s) to generate completions for, encoded as a string, 8 | // array of strings, array of tokens, or array of token arrays 9 | Prompt any `json:"prompt,omitempty" validate:"required"` 10 | // Suffix The suffix that comes after a completion of inserted text 11 | Suffix string `json:"suffix,omitempty"` 12 | // MaxTokens The maximum number of tokens to generate in the completion 13 | // The token count of your prompt plus max_tokens cannot exceed the model's 14 | // context length. Most models have a context length of 2048 tokens 15 | // (except for the newest models, which support 4096). 16 | MaxTokens int `json:"max_tokens,omitempty"` 17 | // Temperature What sampling temperature to use, between 0 and 2. 18 | //Higher values like 0.8 will make the output more random, while 19 | //lower values like 0.2 will make it more focused and deterministic 20 | Temperature float32 `json:"temperature,omitempty"` 21 | // TopP An alternative to sampling with temperature, called nucleus 22 | //sampling, where the model considers the results of the tokens with 23 | //top_p probability mass. So 0.1 means only the tokens comprising the 24 | //top 10% probability mass are considered 25 | TopP float32 `json:"top_p,omitempty"` 26 | // N How many completions to generate for each prompt 27 | N int `json:"n,omitempty"` 28 | 29 | //TODO: required implement Stream support 30 | // Stream bool `json:"stream,omitempty"` 31 | 32 | // LogProbs Include the log probabilities on the logprobs most likely 33 | //tokens, as well the chosen tokens. For example, if logprobs is 5, 34 | //the API will return a list of the 5 most likely tokens. 35 | //The API will always return the logprob of the sampled token, 36 | //so there may be up to logprobs+1 elements in the response 37 | LogProbs int `json:"logprobs,omitempty"` 38 | // Echo Echo back the prompt in addition to the completion 39 | Echo bool `json:"echo,omitempty"` 40 | // Stop Up to 4 sequences where the API will stop generating further tokens. 41 | //The returned text will not contain the stop sequence 42 | Stop []string `json:"stop,omitempty"` 43 | // PresencePenalty Number between -2.0 and 2.0. Positive values penalize 44 | //new tokens based on whether they appear in the text so far, 45 | //increasing the model's likelihood to talk about new topics 46 | PresencePenalty float32 `json:"presence_penalty,omitempty"` 47 | // FrequencyPenalty Number between -2.0 and 2.0. Positive values penalize new tokens 48 | //based on their existing frequency in the text so far, 49 | //decreasing the model's likelihood to repeat the same line verbatim 50 | FrequencyPenalty float32 `json:"frequency_penalty,omitempty"` 51 | // BestOf Generates best_of completions server-side and returns the "best" 52 | //(the one with the highest log probability per token). Results 53 | //cannot be streamed 54 | BestOf int `json:"best_of,omitempty"` 55 | // LogitBias Modify the likelihood of specified tokens appearing in the completion 56 | LogitBias map[string]int `json:"logit_bias,omitempty"` 57 | // User A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 58 | User string `json:"user,omitempty"` 59 | } 60 | 61 | type CompletionResponse struct { 62 | ID string `json:"id"` 63 | Object string `json:"object"` 64 | Created int64 `json:"created"` 65 | Model string `json:"model"` 66 | Choices []CompletionChoice `json:"choices"` 67 | Usage TokenUsage `json:"usage"` 68 | } 69 | 70 | type CompletionChoice struct { 71 | Text string `json:"text"` 72 | Index int `json:"index"` 73 | FinishReason string `json:"finish_reason"` 74 | LogProbs LogprobResult `json:"logprobs"` 75 | } 76 | 77 | type LogprobResult struct { 78 | Tokens []string `json:"tokens"` 79 | TokenLogprobs []float32 `json:"token_logprobs"` 80 | TopLogprobs []map[string]float32 `json:"top_logprobs"` 81 | TextOffset []int `json:"text_offset"` 82 | } 83 | -------------------------------------------------------------------------------- /entity/edit.go: -------------------------------------------------------------------------------- 1 | package entity 2 | 3 | import "github.com/GeniusAI-Platform/openai/models" 4 | 5 | type EditsRequest struct { 6 | Model models.Edit `json:"model" validate:"required"` 7 | // Input The input text to use as a starting point for the edit 8 | Input string `json:"input,omitempty"` 9 | // Instruction The instruction that tells the model how to edit the prompt 10 | Instruction string `json:"instruction" validate:"required"` 11 | // N How many edits to generate for the input and instruction 12 | N int `json:"n,omitempty"` 13 | // Temperature What sampling temperature to use, between 0 and 2. 14 | //Higher values like 0.8 will make the output more random, while 15 | //lower values like 0.2 will make it more focused and deterministic 16 | Temperature float32 `json:"temperature,omitempty"` 17 | // TopP An alternative to sampling with temperature, called nucleus 18 | //sampling, where the model considers the results of the tokens with 19 | //top_p probability mass. So 0.1 means only the tokens comprising the 20 | //top 10% probability mass are considered 21 | TopP float32 `json:"top_p,omitempty"` 22 | } 23 | 24 | type EditsResponse struct { 25 | Object string `json:"object"` 26 | Created int64 `json:"created"` 27 | Usage TokenUsage `json:"usage"` 28 | Choices []EditsChoice `json:"choices"` 29 | } 30 | 31 | type EditsChoice struct { 32 | Text string `json:"text"` 33 | Index int `json:"index"` 34 | } 35 | -------------------------------------------------------------------------------- /entity/embedding.go: -------------------------------------------------------------------------------- 1 | package entity 2 | 3 | import "github.com/GeniusAI-Platform/openai/models" 4 | 5 | // EmbeddingRequest is the input to a Create embeddings request. 6 | type EmbeddingRequest struct { 7 | Model models.Embedding `json:"model" validate:"required"` 8 | // Input is a slice of strings for which you want to generate an EmbeddingData vector. 9 | // Each input must not exceed 2048 tokens in length. 10 | // OpenAPI suggests replacing newlines (\n) in your input with a single space, as they 11 | // have observed inferior results when newlines are present. 12 | // E.g. 13 | // "The food was delicious and the waiter..." 14 | Input []string `json:"input" validate:"required"` 15 | // User A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. 16 | User string `json:"user"` 17 | } 18 | 19 | // EmbeddingResponse is the response from a Create embeddings request. 20 | type EmbeddingResponse struct { 21 | Model models.Embedding `json:"model"` 22 | Object string `json:"object"` 23 | Data []EmbeddingData `json:"data"` 24 | Usage TokenUsage `json:"usage"` 25 | } 26 | 27 | type EmbeddingData struct { 28 | Object string `json:"object"` 29 | Embedding []float32 `json:"embedding"` 30 | Index int `json:"index"` 31 | } 32 | -------------------------------------------------------------------------------- /entity/error.go: -------------------------------------------------------------------------------- 1 | package entity 2 | 3 | type ErrorResponse struct { 4 | HttpCode int 5 | Err *ErrorPayload `json:"error"` 6 | } 7 | 8 | type ErrorPayload struct { 9 | Message string `json:"message"` 10 | Type string `json:"type"` 11 | Param any `json:"param"` 12 | Code string `json:"code"` 13 | } 14 | 15 | func (e *ErrorResponse) Error() string { 16 | return e.Err.Message 17 | } 18 | -------------------------------------------------------------------------------- /entity/file.go: -------------------------------------------------------------------------------- 1 | package entity 2 | 3 | import "os" 4 | 5 | type FileUploadRequest struct { 6 | // File Name of the JSON Lines file to be uploaded, 7 | // If the purpose is set to "fine-tune", each line is a JSON record with "prompt" and "completion" fields 8 | //representing your training examples: https://platform.openai.com/docs/guides/fine-tuning/prepare-training-data 9 | File *os.File `json:"file" validate:"required"` 10 | // Purpose The intended purpose of the uploaded documents, Use "fine-tune" for Fine-tuning. This allows us to validate the format of the uploaded file 11 | Purpose string `json:"purpose" validate:"required"` 12 | } 13 | 14 | type FileResponse struct { 15 | ID string `json:"id"` 16 | Bytes int `json:"bytes"` 17 | FileName string `json:"filename"` 18 | Object string `json:"object"` 19 | Owner string `json:"owner"` 20 | Purpose string `json:"purpose"` 21 | CreatedAt int64 `json:"created_at"` 22 | } 23 | 24 | type FilesListResponse struct { 25 | Files []FileResponse `json:"data"` 26 | } 27 | -------------------------------------------------------------------------------- /entity/fine_tune.go: -------------------------------------------------------------------------------- 1 | package entity 2 | 3 | import "github.com/GeniusAI-Platform/openai/models" 4 | 5 | type FineTuneRequest struct { 6 | Model models.FineTunes `json:"model,omitempty"` 7 | // TrainingFile The ID of an uploaded file that contains training data 8 | TrainingFile string `json:"training_file" validate:"required"` 9 | // ValidationFile The ID of an uploaded file that contains validation data 10 | ValidationFile string `json:"validation_file,omitempty"` 11 | // Epochs The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset 12 | Epochs int `json:"n_epochs,omitempty"` 13 | // BatchSize The batch size to use for training. The batch size is the number of training examples used to train a single forward and backward pass 14 | BatchSize int `json:"batch_size,omitempty"` 15 | // LearningRateMultiplier The learning rate multiplier to use for training. The fine-tuning learning rate is the original learning rate used for pretraining multiplied by this value 16 | //By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final batch_size (larger learning rates tend to perform better with larger batch sizes). We recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best results 17 | LearningRateMultiplier float32 `json:"learning_rate_multiplier,omitempty"` 18 | // PromptLossWeight The weight to use for loss on the prompt tokens. This controls how much the model tries to learn to generate the prompt (as compared to the completion which always has a weight of 1.0), and can add a stabilizing effect to training when completions are short 19 | PromptLossWeight float32 `json:"prompt_loss_weight,omitempty"` 20 | // ComputeClassificationMetrics The number of classes in a classification task 21 | ComputeClassificationMetrics bool `json:"compute_classification_metrics,omitempty"` 22 | // ClassificationClasses The number of classes in a classification task 23 | ClassificationClasses int `json:"classification_n_classes,omitempty"` 24 | // ClassificationPositiveClass The positive class in binary classification 25 | ClassificationPositiveClass string `json:"classification_positive_class,omitempty"` 26 | // ClassificationBetas If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score is a generalization of F-1 score. This is only used for binary classification. 27 | //With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger beta score puts more weight on recall and less on precision. A smaller beta score puts more weight on precision and less on recall. 28 | ClassificationBetas []float32 `json:"classification_betas,omitempty"` 29 | // Suffix A string of up to 40 characters that will be added to your fine-tuned model name. 30 | Suffix string `json:"suffix,omitempty"` 31 | } 32 | 33 | type FineTuneResponse struct { 34 | ID string `json:"id"` 35 | Object string `json:"object"` 36 | Model string `json:"model"` 37 | CreatedAt int64 `json:"created_at"` 38 | FineTuneEventList []FineTuneEvent `json:"events,omitempty"` 39 | FineTunedModel string `json:"fine_tuned_model"` 40 | HyperParams FineTuneHyperParams `json:"hyperparams"` 41 | OrganizationID string `json:"organization_id"` 42 | ResultFiles []FileResponse `json:"result_files"` 43 | Status string `json:"status"` 44 | ValidationFiles []FileResponse `json:"validation_files"` 45 | TrainingFiles []FileResponse `json:"training_files"` 46 | UpdatedAt int64 `json:"updated_at"` 47 | } 48 | 49 | type FineTuneEvent struct { 50 | Object string `json:"object"` 51 | CreatedAt int64 `json:"created_at"` 52 | Level string `json:"level"` 53 | Message string `json:"message"` 54 | } 55 | 56 | type FineTuneHyperParams struct { 57 | BatchSize int `json:"batch_size"` 58 | LearningRateMultiplier float64 `json:"learning_rate_multiplier"` 59 | Epochs int `json:"n_epochs"` 60 | PromptLossWeight float64 `json:"prompt_loss_weight"` 61 | } 62 | 63 | type FineTuneList struct { 64 | Object string `json:"object"` 65 | Data []FineTuneResponse `json:"data"` 66 | } 67 | type FineTuneEventList struct { 68 | Object string `json:"object"` 69 | Data []FineTuneEvent `json:"data"` 70 | } 71 | 72 | type FineTuneDeleteResponse struct { 73 | ID string `json:"id"` 74 | Object string `json:"object"` 75 | Deleted bool `json:"deleted"` 76 | } 77 | -------------------------------------------------------------------------------- /entity/image.go: -------------------------------------------------------------------------------- 1 | package entity 2 | 3 | import "os" 4 | 5 | type ImageRequest struct { 6 | // Prompt A text description of the desired image(s). The maximum length is 1000 characters 7 | Prompt string `json:"prompt" validate:"required"` 8 | // N The number of images to generate. Must be between 1 and 10 9 | N int `json:"n,omitempty"` 10 | // Size The size of the generated images. Must be one of ImageSize256x256, ImageSize512x512, or ImageSize1024x1024 11 | Size ImageSize `json:"size,omitempty"` 12 | // ResponseFormat The format in which the generated images are returned. Must be one of ImageResponseFormatURL or ImageResponseFormatB64JSON 13 | ResponseFormat ImageResponseFormat `json:"response_format,omitempty"` 14 | // User A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse 15 | User string `json:"user,omitempty"` 16 | } 17 | 18 | type ImageEditRequest struct { 19 | // Image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, 20 | //image must have transparency, which will be used as the mask 21 | Image *os.File `json:"image" validate:"required"` 22 | // Mask An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image 23 | //should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as image 24 | Mask *os.File `json:"mask,omitempty"` 25 | // Prompt A text description of the desired image(s). The maximum length is 1000 characters 26 | Prompt string `json:"prompt" validate:"required"` 27 | // N The number of images to generate. Must be between 1 and 10 28 | N int `json:"n,omitempty"` 29 | // Size The size of the generated images. Must be one of ImageSize256x256, ImageSize512x512, or ImageSize1024x1024 30 | Size ImageSize `json:"size,omitempty"` 31 | // ResponseFormat The format in which the generated images are returned. Must be one of ImageResponseFormatURL or ImageResponseFormatB64JSON 32 | ResponseFormat ImageResponseFormat `json:"response_format,omitempty"` 33 | // User A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse 34 | User string `json:"user,omitempty"` 35 | } 36 | 37 | type ImageVariationRequest struct { 38 | // Image The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square 39 | Image *os.File `json:"image" validate:"required"` 40 | // N The number of images to generate. Must be between 1 and 10 41 | N int `json:"n,omitempty"` 42 | // Size The size of the generated images. Must be one of ImageSize256x256, ImageSize512x512, or ImageSize1024x1024 43 | Size ImageSize `json:"size,omitempty"` 44 | // ResponseFormat The format in which the generated images are returned. Must be one of ImageResponseFormatURL or ImageResponseFormatB64JSON 45 | ResponseFormat ImageResponseFormat `json:"response_format,omitempty"` 46 | // User A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse 47 | User string `json:"user,omitempty"` 48 | } 49 | 50 | type ImageResponse struct { 51 | Created int64 `json:"created,omitempty"` 52 | Data []ImageResponseDataInner `json:"data,omitempty"` 53 | } 54 | 55 | type ImageResponseDataInner struct { 56 | URL string `json:"url,omitempty"` 57 | B64JSON string `json:"b64_json,omitempty"` 58 | } 59 | -------------------------------------------------------------------------------- /entity/image.partial.go: -------------------------------------------------------------------------------- 1 | package entity 2 | 3 | import "encoding/json" 4 | 5 | type ( 6 | ImageSize uint8 7 | ImageResponseFormat uint8 8 | ) 9 | 10 | const ( 11 | ImageSize256x256 ImageSize = iota 12 | ImageSize512x512 13 | ImageSize1024x1024 14 | ) 15 | 16 | func (i ImageSize) String() string { 17 | switch i { 18 | case ImageSize256x256: 19 | return "256x256" 20 | case ImageSize512x512: 21 | return "512x512" 22 | case ImageSize1024x1024: 23 | return "1024x1024" 24 | default: 25 | return "512x512" 26 | } 27 | } 28 | 29 | func (i ImageSize) MarshalJSON() ([]byte, error) { 30 | return json.Marshal(i.String()) 31 | } 32 | 33 | func (i ImageSize) UnmarshalJSON(data []byte) error { 34 | var s string 35 | if err := json.Unmarshal(data, &s); err != nil { 36 | return err 37 | } 38 | return nil 39 | } 40 | 41 | const ( 42 | ImageResponseFormatURL ImageResponseFormat = iota 43 | ImageResponseFormatB64JSON 44 | ) 45 | 46 | func (i ImageResponseFormat) String() string { 47 | switch i { 48 | case ImageResponseFormatURL: 49 | return "url" 50 | case ImageResponseFormatB64JSON: 51 | return "b64_json" 52 | default: 53 | return "url" 54 | } 55 | } 56 | 57 | func (i ImageResponseFormat) MarshalJSON() ([]byte, error) { 58 | return json.Marshal(i.String()) 59 | } 60 | 61 | func (i ImageResponseFormat) UnmarshalJSON(data []byte) error { 62 | var s string 63 | if err := json.Unmarshal(data, &s); err != nil { 64 | return err 65 | } 66 | return nil 67 | } 68 | -------------------------------------------------------------------------------- /entity/model.go: -------------------------------------------------------------------------------- 1 | package entity 2 | 3 | type ModelsResponse struct { 4 | Object string `json:"object"` 5 | Data []Models `json:"data"` 6 | } 7 | 8 | type Models struct { 9 | ID string `json:"id"` 10 | Object string `json:"object"` 11 | OwnedBy string `json:"owned_by"` 12 | Permission []ModelsPermission `json:"permission"` 13 | Root string `json:"root"` 14 | Parent any `json:"parent"` 15 | } 16 | 17 | type ModelsPermission struct { 18 | ID string `json:"id"` 19 | Object string `json:"object"` 20 | Created int64 `json:"created"` 21 | AllowCreateEngine bool `json:"allow_create_engine"` 22 | AllowSampling bool `json:"allow_sampling"` 23 | AllowLogprobs bool `json:"allow_logprobs"` 24 | AllowSearchIndices bool `json:"allow_search_indices"` 25 | AllowView bool `json:"allow_view"` 26 | AllowFineTuning bool `json:"allow_fine_tuning"` 27 | Organization string `json:"organization"` 28 | Group any `json:"group"` 29 | IsBlocking bool `json:"is_blocking"` 30 | } 31 | -------------------------------------------------------------------------------- /entity/moderation.go: -------------------------------------------------------------------------------- 1 | package entity 2 | 3 | import "github.com/GeniusAI-Platform/openai/models" 4 | 5 | type ModerationRequest struct { 6 | // Model wo content moderations models are available: models.TEXT_MODERATION_STABLE and models.TEXT_MODERATION_LATEST 7 | Model models.Moderation `json:"model,omitempty"` 8 | // Input The input text to classify 9 | Input any `json:"input" validate:"required"` 10 | } 11 | 12 | type ModerationResponse struct { 13 | ID string `json:"id"` 14 | Model string `json:"model"` 15 | Results []Result `json:"results"` 16 | } 17 | 18 | type Result struct { 19 | Categories ResultCategories `json:"categories"` 20 | CategoryScores ResultCategoryScores `json:"category_scores"` 21 | Flagged bool `json:"flagged"` 22 | } 23 | 24 | type ResultCategories struct { 25 | Hate bool `json:"hate"` 26 | HateThreatening bool `json:"hate/threatening"` 27 | SelfHarm bool `json:"self-harm"` 28 | Sexual bool `json:"sexual"` 29 | SexualMinors bool `json:"sexual/minors"` 30 | Violence bool `json:"violence"` 31 | ViolenceGraphic bool `json:"violence/graphic"` 32 | } 33 | 34 | type ResultCategoryScores struct { 35 | Hate float32 `json:"hate"` 36 | HateThreatening float32 `json:"hate/threatening"` 37 | SelfHarm float32 `json:"self-harm"` 38 | Sexual float32 `json:"sexual"` 39 | SexualMinors float32 `json:"sexual/minors"` 40 | Violence float32 `json:"violence"` 41 | ViolenceGraphic float32 `json:"violence/graphic"` 42 | } 43 | -------------------------------------------------------------------------------- /errors/errors.go: -------------------------------------------------------------------------------- 1 | package errors 2 | 3 | import ( 4 | "errors" 5 | "github.com/GeniusAI-Platform/openai/entity" 6 | ) 7 | 8 | var ( 9 | ErrTooManyEmptyStreamMessages = errors.New("stream has sent too many empty messages") 10 | ErrChatCompletionInvalidModel = errors.New("this model is not supported with this method, please use CreateCompletion client method instead") 11 | ErrChatCompletionStreamNotSupported = errors.New("streaming is not supported with this method, please use CreateChatCompletionStream") 12 | ErrFailedToUnmarshalJSON = errors.New("failed to unmarshal json response") 13 | ErrAPIKeyIsEmpty = errors.New("api key is empty") 14 | ErrFileIsInvalidFormat = errors.New("file format is invalid, please create jsonl file and check training example: https://platform.openai.com/docs/guides/fine-tuning/prepare-training-data") 15 | ) 16 | 17 | func New(httpCode int, providerCode string, message string, messageType string, param any) *entity.ErrorResponse { 18 | return &entity.ErrorResponse{ 19 | HttpCode: httpCode, 20 | Err: &entity.ErrorPayload{ 21 | Message: message, 22 | Type: messageType, 23 | Param: param, 24 | Code: providerCode, 25 | }, 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /example_test.go: -------------------------------------------------------------------------------- 1 | package openai 2 | 3 | import ( 4 | "context" 5 | "github.com/GeniusAI-Platform/openai/client" 6 | "github.com/GeniusAI-Platform/openai/entity" 7 | "github.com/GeniusAI-Platform/openai/models" 8 | "log" 9 | "os" 10 | ) 11 | 12 | func ExampleNewChat() { 13 | apiKey := os.Getenv("OPENAI_API_KEY") 14 | cli, err := client.New([]string{apiKey}) 15 | if err != nil { 16 | log.Fatalln(err) 17 | } 18 | 19 | c := NewChat(cli) 20 | resp, err := c.CreateChatCompletion(context.Background(), entity.ChatRequest{ 21 | Model: models.GPT35_TURBO, 22 | Messages: []entity.ChatMessage{ 23 | { 24 | Role: entity.USER, 25 | Content: "Hello!!", 26 | }, 27 | }, 28 | }) 29 | 30 | if err != nil { 31 | log.Fatalln(err) 32 | } 33 | 34 | log.Println(resp) 35 | } 36 | 37 | func ExampleNewCompletion() { 38 | apiKey := os.Getenv("OPENAI_API_KEY") 39 | cli, err := client.New([]string{apiKey}) 40 | if err != nil { 41 | log.Fatalln(err) 42 | } 43 | 44 | c := NewCompletion(cli) 45 | resp, err := c.CreateCompletion(context.Background(), entity.CompletionRequest{ 46 | Model: models.TEXT_DAVINCI_002, 47 | Prompt: "Golang history", 48 | }) 49 | 50 | if err != nil { 51 | log.Fatalln(err) 52 | } 53 | 54 | log.Println(resp) 55 | } 56 | 57 | func ExampleNewImage() { 58 | apiKey := os.Getenv("OPENAI_API_KEY") 59 | cli, err := client.New([]string{apiKey}) 60 | if err != nil { 61 | log.Fatalln(err) 62 | } 63 | 64 | c := NewImage(cli) 65 | resp, err := c.CreateImage(context.Background(), entity.ImageRequest{ 66 | Prompt: "Create a gopher baby", 67 | }) 68 | 69 | if err != nil { 70 | log.Fatalln(err) 71 | } 72 | 73 | log.Println(resp) 74 | } 75 | 76 | func ExampleNewAudio() { 77 | apiKey := os.Getenv("OPENAI_API_KEY") 78 | cli, err := client.New([]string{apiKey}) 79 | if err != nil { 80 | log.Fatalln(err) 81 | } 82 | 83 | f, err := os.Open("./testdata/file.mp3") 84 | if err != nil { 85 | log.Fatalln(err) 86 | } 87 | 88 | c := NewAudio(cli) 89 | resp, err := c.CreateTranscription(context.Background(), entity.AudioRequest{ 90 | Model: models.WHISPER_1, 91 | File: f, 92 | Language: "fa", 93 | }) 94 | 95 | if err != nil { 96 | log.Fatalln(err) 97 | } 98 | 99 | log.Println(resp) 100 | } 101 | 102 | func ExampleNewEmbedding() { 103 | apiKey := os.Getenv("OPENAI_API_KEY") 104 | cli, err := client.New([]string{apiKey}) 105 | if err != nil { 106 | log.Fatalln(err) 107 | } 108 | 109 | c := NewEmbedding(cli) 110 | resp, err := c.CreateEmbedding(context.Background(), entity.EmbeddingRequest{ 111 | Model: models.TEXT_EMBEDDING_ADA_002, 112 | Input: []string{"example input"}, 113 | }) 114 | 115 | if err != nil { 116 | log.Fatalln(err) 117 | } 118 | 119 | log.Println(resp) 120 | } 121 | 122 | func ExampleNewFile() { 123 | apiKey := os.Getenv("OPENAI_API_KEY") 124 | cli, err := client.New([]string{apiKey}) 125 | if err != nil { 126 | log.Fatalln(err) 127 | } 128 | 129 | f, err := os.Open("./testdata/file.jsonl") 130 | if err != nil { 131 | log.Fatalln(err) 132 | } 133 | 134 | c := NewFile(cli) 135 | resp, err := c.UploadFile(context.Background(), entity.FileUploadRequest{ 136 | File: f, 137 | Purpose: "fine-tune", 138 | }) 139 | 140 | if err != nil { 141 | log.Fatalln(err) 142 | } 143 | 144 | log.Println(resp) 145 | } 146 | 147 | func ExampleNewFineTune() { 148 | apiKey := os.Getenv("OPENAI_API_KEY") 149 | cli, err := client.New([]string{apiKey}) 150 | if err != nil { 151 | log.Fatalln(err) 152 | } 153 | 154 | c := NewFineTune(cli) 155 | resp, err := c.CreateFineTune(context.Background(), entity.FineTuneRequest{ 156 | Model: models.DAVINCI, 157 | TrainingFile: "file-xyz", 158 | }) 159 | 160 | if err != nil { 161 | log.Fatalln(err) 162 | } 163 | 164 | log.Println(resp) 165 | } 166 | 167 | func ExampleNewModeration() { 168 | apiKey := os.Getenv("OPENAI_API_KEY") 169 | cli, err := client.New([]string{apiKey}) 170 | if err != nil { 171 | log.Fatalln(err) 172 | } 173 | 174 | c := NewModeration(cli) 175 | resp, err := c.CreateModeration(context.Background(), entity.ModerationRequest{ 176 | Input: "I want to kill them.", 177 | }) 178 | 179 | if err != nil { 180 | log.Fatalln(err) 181 | } 182 | 183 | log.Println(resp) 184 | } 185 | -------------------------------------------------------------------------------- /file.go: -------------------------------------------------------------------------------- 1 | package openai 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "fmt" 7 | "github.com/GeniusAI-Platform/openai/client" 8 | "github.com/GeniusAI-Platform/openai/entity" 9 | "github.com/GeniusAI-Platform/openai/errors" 10 | "github.com/GeniusAI-Platform/openai/types" 11 | "github.com/GeniusAI-Platform/openai/utils" 12 | "net/http" 13 | "path/filepath" 14 | ) 15 | 16 | const ( 17 | fileEndpoint = "/files" 18 | fileDynamicEndpoint = "/files/%s" 19 | fileDynamicWithContentEndpoint = "/files/%s/content" 20 | ) 21 | 22 | type File struct { 23 | client client.Transporter 24 | } 25 | 26 | // NewFile create File object to manage file (upload, retrieve, list) 27 | func NewFile(client client.Transporter) *File { 28 | return &File{ 29 | client: client, 30 | } 31 | } 32 | 33 | // ListFile Returns a list of files that belong to the user's organization 34 | func (f *File) ListFile(ctx context.Context) (*entity.FilesListResponse, error) { 35 | resp, err := f.client.Get(ctx, &client.APIConfig{Path: fileEndpoint}) 36 | if err != nil { 37 | return nil, err 38 | } 39 | 40 | return responseHandler[*entity.FilesListResponse](resp) 41 | } 42 | 43 | // UploadFile Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit 44 | func (f *File) UploadFile(ctx context.Context, req entity.FileUploadRequest) (*entity.FileResponse, error) { 45 | if err := f.client.GetValidator().Struct(req); err != nil { 46 | return nil, err 47 | } 48 | 49 | body := new(bytes.Buffer) 50 | fb := utils.NewFormBuilder(body) 51 | 52 | if filepath.Ext(req.File.Name()) != ".jsonl" { 53 | return nil, errors.ErrFileIsInvalidFormat 54 | } 55 | 56 | if err := fb.CreateFormFile("file", req.File); err != nil { 57 | return nil, err 58 | } 59 | 60 | if err := fb.WriteField("purpose", req.Purpose); err != nil { 61 | return nil, err 62 | } 63 | 64 | if err := fb.Close(); err != nil { 65 | return nil, err 66 | } 67 | 68 | resp, err := f.client.PostFile(ctx, &client.APIConfig{Path: fileEndpoint}, body, fb.FormDataContentType()) 69 | if err != nil { 70 | return nil, err 71 | } 72 | 73 | return responseHandler[*entity.FileResponse](resp) 74 | } 75 | 76 | // RetrieveFile Returns information about a specific file or file content 77 | func (f *File) RetrieveFile(ctx context.Context, fileID types.ID, content bool) (*entity.FileResponse, error) { 78 | if fileID.IsEmpty() { 79 | return nil, errors.New(http.StatusBadRequest, "", "fileID is empty", "", "") 80 | } 81 | 82 | path := fmt.Sprintf(fileDynamicEndpoint, fileID) 83 | 84 | if content { 85 | path = fmt.Sprintf(fileDynamicWithContentEndpoint, fileID) 86 | } 87 | 88 | resp, err := f.client.Get(ctx, &client.APIConfig{Path: path}) 89 | if err != nil { 90 | return nil, err 91 | } 92 | 93 | return responseHandler[*entity.FileResponse](resp) 94 | } 95 | -------------------------------------------------------------------------------- /fine_tune.go: -------------------------------------------------------------------------------- 1 | package openai 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/GeniusAI-Platform/openai/client" 7 | "github.com/GeniusAI-Platform/openai/entity" 8 | "github.com/GeniusAI-Platform/openai/errors" 9 | "github.com/GeniusAI-Platform/openai/types" 10 | "net/http" 11 | ) 12 | 13 | const ( 14 | fineTuneEndpoint = "/fine-tunes" 15 | fineTuneDynamicEndpoint = "/fine-tunes/%s" 16 | fineTuneCancelEndpoint = "/fine-tunes/%s/cancel" 17 | fineTuneEventEndpoint = "/fine-tunes/%s/events" 18 | deleteFineTuneModelEndpoint = "/models/%s" 19 | ) 20 | 21 | type FineTune struct { 22 | client client.Transporter 23 | } 24 | 25 | func NewFineTune(client client.Transporter) *FineTune { 26 | return &FineTune{ 27 | client: client, 28 | } 29 | } 30 | 31 | // CreateFineTune Creates a job that fine-tunes a specified model from a given dataset 32 | func (f *FineTune) CreateFineTune(ctx context.Context, req entity.FineTuneRequest) (*entity.FineTuneResponse, error) { 33 | if err := f.client.GetValidator().Struct(req); err != nil { 34 | return nil, err 35 | } 36 | 37 | resp, err := f.client.Post(ctx, &client.APIConfig{Path: fineTuneEndpoint}, req) 38 | if err != nil { 39 | return nil, err 40 | } 41 | 42 | return responseHandler[*entity.FineTuneResponse](resp) 43 | } 44 | 45 | // ListFineTunes List your organization's fine-tuning jobs 46 | func (f *FineTune) ListFineTunes(ctx context.Context) (*entity.FineTuneList, error) { 47 | resp, err := f.client.Get(ctx, &client.APIConfig{Path: fineTuneEndpoint}) 48 | if err != nil { 49 | return nil, err 50 | } 51 | 52 | return responseHandler[*entity.FineTuneList](resp) 53 | } 54 | 55 | // RetrieveFineTune Gets info about the fine-tune job 56 | func (f *FineTune) RetrieveFineTune(ctx context.Context, fineTuneID types.ID) (*entity.FineTuneResponse, error) { 57 | if fineTuneID.IsEmpty() { 58 | return nil, errors.New(http.StatusBadRequest, "", "fineTuneID is empty", "", "") 59 | } 60 | 61 | resp, err := f.client.Get(ctx, &client.APIConfig{Path: fmt.Sprintf(fineTuneDynamicEndpoint, fineTuneID)}) 62 | if err != nil { 63 | return nil, err 64 | } 65 | 66 | return responseHandler[*entity.FineTuneResponse](resp) 67 | } 68 | 69 | // CancelFineTune Immediately cancel a fine-tune job 70 | func (f *FineTune) CancelFineTune(ctx context.Context, fineTuneID types.ID) (*entity.FineTuneResponse, error) { 71 | if fineTuneID.IsEmpty() { 72 | return nil, errors.New(http.StatusBadRequest, "", "fineTuneID is empty", "", "") 73 | } 74 | 75 | resp, err := f.client.Post(ctx, &client.APIConfig{Path: fmt.Sprintf(fineTuneCancelEndpoint, fineTuneID)}, nil) 76 | if err != nil { 77 | return nil, err 78 | } 79 | 80 | return responseHandler[*entity.FineTuneResponse](resp) 81 | } 82 | 83 | // ListFineTuneEvent Get fine-grained status updates for a fine-tune job 84 | func (f *FineTune) ListFineTuneEvent(ctx context.Context, fineTuneID types.ID) (*entity.FineTuneEventList, error) { 85 | if fineTuneID.IsEmpty() { 86 | return nil, errors.New(http.StatusBadRequest, "", "fineTuneID is empty", "", "") 87 | } 88 | 89 | resp, err := f.client.Get(ctx, &client.APIConfig{Path: fmt.Sprintf(fineTuneEventEndpoint, fineTuneID)}) 90 | if err != nil { 91 | return nil, err 92 | } 93 | 94 | return responseHandler[*entity.FineTuneEventList](resp) 95 | } 96 | 97 | // DeleteFineTuneModel Delete a fine-tuned model. You must have the Owner role in your organization 98 | func (f *FineTune) DeleteFineTuneModel(ctx context.Context, model string) (*entity.FineTuneDeleteResponse, error) { 99 | if len(model) == 0 { 100 | return nil, errors.New(http.StatusBadRequest, "", "model is empty", "", "") 101 | } 102 | 103 | resp, err := f.client.Delete(ctx, &client.APIConfig{Path: fmt.Sprintf(deleteFineTuneModelEndpoint, model)}) 104 | if err != nil { 105 | return nil, err 106 | } 107 | 108 | return responseHandler[*entity.FineTuneDeleteResponse](resp) 109 | } 110 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/GeniusAI-Platform/openai 2 | 3 | go 1.19 4 | 5 | require ( 6 | github.com/go-playground/validator/v10 v10.12.0 7 | github.com/stretchr/testify v1.8.2 8 | golang.org/x/time v0.3.0 9 | ) 10 | 11 | require ( 12 | github.com/davecgh/go-spew v1.1.1 // indirect 13 | github.com/go-playground/locales v0.14.1 // indirect 14 | github.com/go-playground/universal-translator v0.18.1 // indirect 15 | github.com/leodido/go-urn v1.2.2 // indirect 16 | github.com/pmezard/go-difflib v1.0.0 // indirect 17 | golang.org/x/crypto v0.7.0 // indirect 18 | golang.org/x/sys v0.6.0 // indirect 19 | golang.org/x/text v0.8.0 // indirect 20 | gopkg.in/yaml.v3 v3.0.1 // indirect 21 | ) 22 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 2 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 3 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 4 | github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= 5 | github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= 6 | github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= 7 | github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= 8 | github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= 9 | github.com/go-playground/validator/v10 v10.12.0 h1:E4gtWgxWxp8YSxExrQFv5BpCahla0PVF2oTTEYaWQGI= 10 | github.com/go-playground/validator/v10 v10.12.0/go.mod h1:hCAPuzYvKdP33pxWa+2+6AIKXEKqjIUyqsNCtbsSJrA= 11 | github.com/leodido/go-urn v1.2.2 h1:7z68G0FCGvDk646jz1AelTYNYWrTNm0bEcFAo147wt4= 12 | github.com/leodido/go-urn v1.2.2/go.mod h1:kUaIbLZWttglzwNuG0pgsh5vuV6u2YcGBYz1hIPjtOQ= 13 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 14 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 15 | github.com/rwtodd/Go.Sed v0.0.0-20210816025313-55464686f9ef/go.mod h1:8AEUvGVi2uQ5b24BIhcr0GCcpd/RNAFWaN2CJFrWIIQ= 16 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 17 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= 18 | github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= 19 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 20 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 21 | github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= 22 | github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= 23 | golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= 24 | golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= 25 | golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= 26 | golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 27 | golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= 28 | golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= 29 | golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= 30 | golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= 31 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= 32 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 33 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 34 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 35 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 36 | -------------------------------------------------------------------------------- /image.go: -------------------------------------------------------------------------------- 1 | package openai 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "github.com/GeniusAI-Platform/openai/client" 7 | "github.com/GeniusAI-Platform/openai/entity" 8 | "github.com/GeniusAI-Platform/openai/utils" 9 | "strconv" 10 | ) 11 | 12 | const ( 13 | createImageEndpoint = "/images/generations" 14 | editImageEndpoint = "/images/edits" 15 | createImageVariationEndpoint = "/images/variations" 16 | ) 17 | 18 | type Image struct { 19 | client client.Transporter 20 | } 21 | 22 | // NewImage create Image object to create, edit image or image variation using DALL·E 2 23 | func NewImage(client client.Transporter) *Image { 24 | return &Image{ 25 | client: client, 26 | } 27 | } 28 | 29 | // CreateImage Creates an image given a prompt 30 | func (i *Image) CreateImage(ctx context.Context, req entity.ImageRequest) (*entity.ImageResponse, error) { 31 | if err := i.client.GetValidator().Struct(req); err != nil { 32 | return nil, err 33 | } 34 | 35 | resp, err := i.client.Post(ctx, &client.APIConfig{Path: createImageEndpoint}, req) 36 | if err != nil { 37 | return nil, err 38 | } 39 | 40 | return responseHandler[*entity.ImageResponse](resp) 41 | } 42 | 43 | // ImageEdit Creates an edited or extended image given an original image and a prompt 44 | func (i *Image) ImageEdit(ctx context.Context, req entity.ImageEditRequest) (*entity.ImageResponse, error) { 45 | if err := i.client.GetValidator().Struct(req); err != nil { 46 | return nil, err 47 | } 48 | 49 | body := new(bytes.Buffer) 50 | fb := utils.NewFormBuilder(body) 51 | 52 | if err := fb.CreateFormFile("image", req.Image); err != nil { 53 | return nil, err 54 | } 55 | 56 | if req.Mask != nil { 57 | if err := fb.CreateFormFile("mask", req.Mask); err != nil { 58 | return nil, err 59 | } 60 | } 61 | 62 | if err := fb.WriteField("prompt", req.Prompt); err != nil { 63 | return nil, err 64 | } 65 | 66 | if req.N == 0 { 67 | req.N = 1 68 | } 69 | 70 | if err := fb.WriteField("n", strconv.Itoa(req.N)); err != nil { 71 | return nil, err 72 | } 73 | 74 | if err := fb.WriteField("size", req.Size.String()); err != nil { 75 | return nil, err 76 | } 77 | 78 | if err := fb.WriteField("response_format", req.ResponseFormat.String()); err != nil { 79 | return nil, err 80 | } 81 | 82 | if err := fb.WriteField("user", req.User); err != nil { 83 | return nil, err 84 | } 85 | 86 | if err := fb.Close(); err != nil { 87 | return nil, err 88 | } 89 | 90 | resp, err := i.client.PostFile(ctx, &client.APIConfig{Path: editImageEndpoint}, body, fb.FormDataContentType()) 91 | if err != nil { 92 | return nil, err 93 | } 94 | 95 | return responseHandler[*entity.ImageResponse](resp) 96 | } 97 | 98 | // CreateImageVariation Creates a variation of a given image 99 | func (i *Image) CreateImageVariation(ctx context.Context, req entity.ImageVariationRequest) (*entity.ImageResponse, error) { 100 | if err := i.client.GetValidator().Struct(req); err != nil { 101 | return nil, err 102 | } 103 | 104 | body := new(bytes.Buffer) 105 | fb := utils.NewFormBuilder(body) 106 | 107 | if err := fb.CreateFormFile("image", req.Image); err != nil { 108 | return nil, err 109 | } 110 | 111 | if req.N == 0 { 112 | req.N = 1 113 | } 114 | 115 | if err := fb.WriteField("n", strconv.Itoa(req.N)); err != nil { 116 | return nil, err 117 | } 118 | 119 | if err := fb.WriteField("size", req.Size.String()); err != nil { 120 | return nil, err 121 | } 122 | 123 | if err := fb.WriteField("response_format", req.ResponseFormat.String()); err != nil { 124 | return nil, err 125 | } 126 | 127 | if err := fb.WriteField("user", req.User); err != nil { 128 | return nil, err 129 | } 130 | 131 | if err := fb.Close(); err != nil { 132 | return nil, err 133 | } 134 | 135 | resp, err := i.client.PostFile(ctx, &client.APIConfig{Path: createImageVariationEndpoint}, body, fb.FormDataContentType()) 136 | if err != nil { 137 | return nil, err 138 | } 139 | 140 | return responseHandler[*entity.ImageResponse](resp) 141 | } 142 | -------------------------------------------------------------------------------- /models/audio.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | import "encoding/json" 4 | 5 | type Audio uint8 6 | 7 | const ( 8 | // WHISPER_1 Whisper is a general-purpose speech recognition model. It is trained on a large dataset of diverse audio and is also a multi-task model that can perform multilingual speech recognition as well as speech translation and language identification 9 | WHISPER_1 = iota + 1 10 | ) 11 | 12 | func (a Audio) String() string { 13 | switch a { 14 | case WHISPER_1: 15 | return "whisper-1" 16 | default: 17 | return "" 18 | } 19 | } 20 | 21 | func (a Audio) MarshalJSON() ([]byte, error) { 22 | return json.Marshal(a.String()) 23 | } 24 | 25 | func (a Audio) UnmarshalJSON(data []byte) error { 26 | var s string 27 | if err := json.Unmarshal(data, &s); err != nil { 28 | return err 29 | } 30 | return nil 31 | } 32 | -------------------------------------------------------------------------------- /models/chat.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | import "encoding/json" 4 | 5 | type Chat uint8 6 | 7 | const ( 8 | // GPT4 (GPT-4 model) More capable than any GPT-3.5 model, able to do more complex tasks, and optimized for chat. Will be updated with our latest model iteration. 9 | // training data: Up to Sep 2021 10 | GPT4 Chat = iota + 1 11 | // GPT4_0314 (GPT-4 model) Snapshot of gpt-4 from March 14th 2023. Unlike gpt-4, this model will not receive updates, and will be deprecated 3 months after a new version is released. 12 | // training data: Up to Sep 2021 13 | GPT4_0314 14 | // GPT4_32K (GPT-4 model) Same capabilities as the base gpt-4 mode but with 4x the context length. Will be updated with our latest model iteration. 15 | // training data: Up to Sep 2021 16 | GPT4_32K 17 | // GPT4_32K_0314 (GPT-4 model) Snapshot of gpt-4-32 from March 14th 2023. Unlike gpt-4-32k, this model will not receive updates, and will be deprecated 3 months after a new version is released. 18 | // training data: Up to Sep 2021 19 | GPT4_32K_0314 20 | 21 | // GPT35_TURBO (GPT-3.5 model) Most capable GPT-3.5 model and optimized for chat at 1/10th the cost of text-davinci-003. Will be updated with our latest model iteration. 22 | // training data: Up to Sep 2021 23 | GPT35_TURBO 24 | // GPT35_TURBO_0301 (GPT-3.5 model) Snapshot of gpt-3.5-turbo from March 1st 2023. Unlike gpt-3.5-turbo, this model will not receive updates, and will be deprecated 3 months after a new version is released. 25 | // training data: Up to Sep 2021 26 | GPT35_TURBO_0301 27 | ) 28 | 29 | func (c Chat) String() string { 30 | switch c { 31 | case GPT4: 32 | return "gpt-4" 33 | case GPT4_0314: 34 | return "gpt-4-0314" 35 | case GPT4_32K: 36 | return "gpt-4-32k" 37 | case GPT4_32K_0314: 38 | return "gpt-4-32k-0314" 39 | case GPT35_TURBO: 40 | return "gpt-3.5-turbo" 41 | case GPT35_TURBO_0301: 42 | return "gpt-3.5-turbo-0301" 43 | default: 44 | return "" 45 | } 46 | } 47 | 48 | func (c Chat) MarshalJSON() ([]byte, error) { 49 | return json.Marshal(c.String()) 50 | } 51 | 52 | func (c Chat) UnmarshalJSON(data []byte) error { 53 | var s string 54 | if err := json.Unmarshal(data, &s); err != nil { 55 | return err 56 | } 57 | return nil 58 | } 59 | -------------------------------------------------------------------------------- /models/completion.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | import "encoding/json" 4 | 5 | type Completion uint8 6 | 7 | const ( 8 | // TEXT_DAVINCI_003 (GPT-3.5 model) Can do any language task with better quality, longer output, and consistent instruction-following than the curie, babbage, or ada models. Also supports inserting completions within text. 9 | // training data: Up to Jun 2021 10 | TEXT_DAVINCI_003 Completion = iota + 1 11 | // TEXT_DAVINCI_002 (GPT-3.5 model) Similar capabilities to text-davinci-003 but trained with supervised fine-tuning instead of reinforcement learning. 12 | // training data: Up to Jun 2021 13 | TEXT_DAVINCI_002 14 | 15 | // TEXT_CURIE_001 (GPT-3 model) Very capable, faster and lower cost than Davinci. 16 | // training data: Up to Oct 2019 17 | TEXT_CURIE_001 18 | // TEXT_BABBAGE_001 (GPT-3 model) Capable of straightforward tasks, very fast, and lower cost. 19 | // training data: Up to Oct 2019 20 | TEXT_BABBAGE_001 21 | // TEXT_ADA_001 (GPT-3 model) Capable of very simple tasks, usually the fastest model in the GPT-3 series, and lowest cost. 22 | // training data: Up to Oct 2019 23 | TEXT_ADA_001 24 | 25 | // CODE_DAVINCI_002 (GPT-3.5 model) Optimized for code-completion tasks 26 | CODE_DAVINCI_002 27 | // CODE_DAVINCI_001 (GPT-3 model) Earlier version of code-davinci-002 28 | CODE_DAVINCI_001 29 | // CODE_CUSHMAN_002 (GPT-3 model) Almost as capable as Davinci Codex, but slightly faster. This speed advantage may make it preferable for real-time applications 30 | CODE_CUSHMAN_002 31 | // CODE_CUSHMAN_001 (GPT-3 model) Earlier version of code-cushman-002 32 | CODE_CUSHMAN_001 33 | ) 34 | 35 | func (c Completion) String() string { 36 | switch c { 37 | case TEXT_DAVINCI_003: 38 | return "text-davinci-003" 39 | case TEXT_DAVINCI_002: 40 | return "text-davinci-002" 41 | case TEXT_CURIE_001: 42 | return "text-curie-001" 43 | case TEXT_BABBAGE_001: 44 | return "text-babbage-001" 45 | case TEXT_ADA_001: 46 | return "text-ada-001" 47 | case CODE_DAVINCI_002: 48 | return "code-davinci-002" 49 | case CODE_DAVINCI_001: 50 | return "code-davinci-002" 51 | case CODE_CUSHMAN_002: 52 | return "code-cushman-002" 53 | case CODE_CUSHMAN_001: 54 | return "code-cushman-001" 55 | default: 56 | return "" 57 | } 58 | } 59 | 60 | func (c Completion) MarshalJSON() ([]byte, error) { 61 | return json.Marshal(c.String()) 62 | } 63 | 64 | func (c Completion) UnmarshalJSON(data []byte) error { 65 | var s string 66 | if err := json.Unmarshal(data, &s); err != nil { 67 | return err 68 | } 69 | return nil 70 | } 71 | -------------------------------------------------------------------------------- /models/edit.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | import "encoding/json" 4 | 5 | type Edit uint8 6 | 7 | const ( 8 | TEXT_DAVINCI_EDIT_001 Edit = iota + 1 9 | CODE_DAVINCI_EDIT_001 10 | ) 11 | 12 | func (e Edit) String() string { 13 | switch e { 14 | case TEXT_DAVINCI_EDIT_001: 15 | return "text-davinci-edit-001" 16 | case CODE_DAVINCI_EDIT_001: 17 | return "code-davinci-edit-001" 18 | default: 19 | return "" 20 | } 21 | } 22 | 23 | func (e Edit) MarshalJSON() ([]byte, error) { 24 | return json.Marshal(e.String()) 25 | } 26 | 27 | func (e Edit) UnmarshalJSON(data []byte) error { 28 | var s string 29 | if err := json.Unmarshal(data, &s); err != nil { 30 | return err 31 | } 32 | return nil 33 | } 34 | -------------------------------------------------------------------------------- /models/embedding.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | import "encoding/json" 4 | 5 | type Embedding uint8 6 | 7 | const ( 8 | // TEXT_EMBEDDING_ADA_002 is a designed to replace the previous 16 first-generation embedding models at a fraction of the cost. 9 | TEXT_EMBEDDING_ADA_002 Embedding = iota + 1 10 | TEXT_SEARCH_ADA_DOC_001 11 | ) 12 | 13 | func (e Embedding) String() string { 14 | switch e { 15 | case TEXT_EMBEDDING_ADA_002: 16 | return "text-embedding-ada-002" 17 | case TEXT_SEARCH_ADA_DOC_001: 18 | return "text-search-ada-doc-001" 19 | default: 20 | return "" 21 | } 22 | } 23 | 24 | func (e Embedding) MarshalJSON() ([]byte, error) { 25 | return json.Marshal(e.String()) 26 | } 27 | 28 | func (e Embedding) UnmarshalJSON(data []byte) error { 29 | var s string 30 | if err := json.Unmarshal(data, &s); err != nil { 31 | return err 32 | } 33 | return nil 34 | } 35 | -------------------------------------------------------------------------------- /models/fine_tunes.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | import "encoding/json" 4 | 5 | type FineTunes uint8 6 | 7 | const ( 8 | // DAVINCI (GPT-3 model) Most capable GPT-3 model. Can do any task the other models can do, often with higher quality. 9 | // training data: Up to Oct 2019 10 | DAVINCI FineTunes = iota + 1 11 | // CURIE (GPT-3 model) Very capable, but faster and lower cost than Davinci. 12 | // training data: Up to Oct 2019 13 | CURIE 14 | // BABBAGE (GPT-3 model) Capable of straightforward tasks, very fast, and lower cost. 15 | // training data: Up to Oct 2019 16 | BABBAGE 17 | // ADA (GPT-3 model) Capable of very simple tasks, usually the fastest model in the GPT-3 series, and lowest cost. 18 | // training data: Up to Oct 2019 19 | ADA 20 | ) 21 | 22 | func (f FineTunes) String() string { 23 | switch f { 24 | case DAVINCI: 25 | return "davinci" 26 | case CURIE: 27 | return "curie" 28 | case BABBAGE: 29 | return "babbage" 30 | case ADA: 31 | return "ada" 32 | default: 33 | return "" 34 | } 35 | } 36 | 37 | func (f FineTunes) MarshalJSON() ([]byte, error) { 38 | return json.Marshal(f.String()) 39 | } 40 | 41 | func (f FineTunes) UnmarshalJSON(data []byte) error { 42 | var s string 43 | if err := json.Unmarshal(data, &s); err != nil { 44 | return err 45 | } 46 | return nil 47 | } 48 | -------------------------------------------------------------------------------- /models/moderation.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | import "encoding/json" 4 | 5 | type Moderation uint8 6 | 7 | const ( 8 | // TEXT_MODERATION_STABLE Almost as capable as the latest model, but slightly older 9 | TEXT_MODERATION_STABLE Moderation = iota + 1 10 | // TEXT_MODERATION_LATEST Most capable moderation model. Accuracy will be slighlty higher than the stable model 11 | TEXT_MODERATION_LATEST 12 | ) 13 | 14 | func (m Moderation) String() string { 15 | switch m { 16 | case TEXT_MODERATION_STABLE: 17 | return "text-moderation-stable" 18 | case TEXT_MODERATION_LATEST: 19 | return "text-moderation-latest" 20 | default: 21 | return "" 22 | } 23 | } 24 | 25 | func (m Moderation) MarshalJSON() ([]byte, error) { 26 | return json.Marshal(m.String()) 27 | } 28 | 29 | func (m Moderation) UnmarshalJSON(data []byte) error { 30 | var s string 31 | if err := json.Unmarshal(data, &s); err != nil { 32 | return err 33 | } 34 | return nil 35 | } 36 | -------------------------------------------------------------------------------- /moderation.go: -------------------------------------------------------------------------------- 1 | package openai 2 | 3 | import ( 4 | "context" 5 | "github.com/GeniusAI-Platform/openai/client" 6 | "github.com/GeniusAI-Platform/openai/entity" 7 | ) 8 | 9 | const ( 10 | moderationEndpoint = "/moderations" 11 | ) 12 | 13 | type Moderation struct { 14 | client client.Transporter 15 | } 16 | 17 | func NewModeration(client client.Transporter) *Moderation { 18 | return &Moderation{ 19 | client: client, 20 | } 21 | } 22 | 23 | // CreateModeration Classifies if text violates OpenAI's Content Policy 24 | func (m *Moderation) CreateModeration(ctx context.Context, req entity.ModerationRequest) (*entity.ModerationResponse, error) { 25 | if err := m.client.GetValidator().Struct(req); err != nil { 26 | return nil, err 27 | } 28 | 29 | resp, err := m.client.Post(ctx, &client.APIConfig{Path: moderationEndpoint}, req) 30 | if err != nil { 31 | return nil, err 32 | } 33 | 34 | return responseHandler[*entity.ModerationResponse](resp) 35 | } 36 | -------------------------------------------------------------------------------- /patterns/completion/completion.go: -------------------------------------------------------------------------------- 1 | package completion 2 | 3 | import ( 4 | "fmt" 5 | "github.com/GeniusAI-Platform/openai/entity" 6 | "github.com/GeniusAI-Platform/openai/models" 7 | "github.com/GeniusAI-Platform/openai/types/programming" 8 | ) 9 | 10 | type CompletionPattern func() entity.CompletionRequest 11 | 12 | // ProgrammingLanguageTranslator convert programming language code from language to language (go > python) 13 | func ProgrammingLanguageTranslator(code string, languageFrom, languageTo programming.ProgrammingLanguage, maxTokens int) CompletionPattern { 14 | style := ` 15 | ##### Translate this function from %s into %s 16 | ### %s 17 | 18 | %s 19 | 20 | ### %s 21 | ` 22 | prompt := fmt.Sprintf(style, languageFrom, languageTo, languageFrom, code, languageTo) 23 | 24 | return func() entity.CompletionRequest { 25 | return requestBuilder( 26 | models.TEXT_DAVINCI_003, 27 | prompt, 28 | 1.0, 29 | 0, 30 | 0.0, 31 | 0.0, 32 | 0.0, 33 | maxTokens, 34 | []string{"###"}..., 35 | ) 36 | } 37 | } 38 | 39 | // ProgrammingBugFixer find bug in your programming code with specific language 40 | func ProgrammingBugFixer(code string, language programming.ProgrammingLanguage, maxTokens int) CompletionPattern { 41 | style := ` 42 | ##### Fix bugs in the below function %s language 43 | 44 | ### Buggy %s 45 | 46 | %s 47 | 48 | ### Fixed %s 49 | ` 50 | prompt := fmt.Sprintf(style, language, language, code, language) 51 | 52 | return func() entity.CompletionRequest { 53 | return requestBuilder( 54 | models.TEXT_DAVINCI_003, 55 | prompt, 56 | 1.0, 57 | 0, 58 | 0.0, 59 | 0.0, 60 | 0.0, 61 | maxTokens, 62 | []string{"###"}..., 63 | ) 64 | } 65 | } 66 | 67 | // ProgrammingAlgorithmOptimizer improve performance your algorithm function 68 | func ProgrammingAlgorithmOptimizer(code string, language programming.ProgrammingLanguage, maxTokens int) CompletionPattern { 69 | style := ` 70 | ##### Improve performance in the below function 71 | 72 | ### Performance %s 73 | %s 74 | 75 | ### Improved %s 76 | ` 77 | prompt := fmt.Sprintf(style, language, code, language) 78 | 79 | return func() entity.CompletionRequest { 80 | return requestBuilder( 81 | models.TEXT_DAVINCI_003, 82 | prompt, 83 | 1.0, 84 | 0.7, 85 | 0.0, 86 | 0.0, 87 | 1, 88 | maxTokens, 89 | []string{"###"}..., 90 | ) 91 | } 92 | } 93 | 94 | // TextToCommand create command using explained text 95 | func TextToCommand(text string, maxTokens int) CompletionPattern { 96 | style := ` 97 | Convert this text to a programmatic command: 98 | 99 | %s 100 | ` 101 | prompt := fmt.Sprintf(style, text) 102 | 103 | return func() entity.CompletionRequest { 104 | return requestBuilder( 105 | models.TEXT_DAVINCI_003, 106 | prompt, 107 | 1.0, 108 | 0, 109 | 0.0, 110 | 0.2, 111 | 0.0, 112 | maxTokens, 113 | []string{`\n`}..., 114 | ) 115 | } 116 | } 117 | 118 | // GrammarCorrection check your english text grammar and do correction 119 | func GrammarCorrection(text string, maxTokens int) CompletionPattern { 120 | style := ` 121 | Correct this to standard English: 122 | 123 | %s 124 | ` 125 | prompt := fmt.Sprintf(style, text) 126 | 127 | return func() entity.CompletionRequest { 128 | return requestBuilder( 129 | models.TEXT_DAVINCI_003, 130 | prompt, 131 | 1.0, 132 | 0, 133 | 0.0, 134 | 0.0, 135 | 0.0, 136 | maxTokens, 137 | ) 138 | } 139 | } 140 | 141 | func requestBuilder(model models.Completion, prompt any, topP, temperature, frequencyPenalty, presencePenalty float32, bestOf, maxTokens int, stop ...string) entity.CompletionRequest { 142 | req := entity.CompletionRequest{ 143 | Model: model, 144 | Prompt: prompt, 145 | TopP: topP, 146 | Temperature: temperature, 147 | FrequencyPenalty: frequencyPenalty, 148 | PresencePenalty: presencePenalty, 149 | BestOf: bestOf, 150 | Stop: stop, 151 | } 152 | 153 | if maxTokens != 0 { 154 | req.MaxTokens = maxTokens 155 | } 156 | 157 | return req 158 | } 159 | -------------------------------------------------------------------------------- /patterns/completion/example_test.go: -------------------------------------------------------------------------------- 1 | package completion 2 | 3 | import ( 4 | "context" 5 | "github.com/GeniusAI-Platform/openai" 6 | "github.com/GeniusAI-Platform/openai/client" 7 | "github.com/GeniusAI-Platform/openai/types/programming" 8 | "log" 9 | "os" 10 | ) 11 | 12 | func ExampleProgrammingLanguageTranslator() { 13 | var code string = ` 14 | func add(a, b int) int { 15 | return a + b 16 | } 17 | ` 18 | 19 | apiKey := os.Getenv("OPENAI_API_KEY") 20 | cli, err := client.New([]string{apiKey}) 21 | if err != nil { 22 | log.Fatalln(err) 23 | } 24 | 25 | c := openai.NewCompletion(cli) 26 | resp, err := c.CreateCompletionFromPattern(context.Background(), ProgrammingLanguageTranslator( 27 | code, 28 | programming.Go, 29 | programming.Python, 30 | 0, 31 | )) 32 | 33 | if err != nil { 34 | log.Fatalln(err) 35 | } 36 | 37 | log.Println(resp.Choices[0].Text) 38 | } 39 | 40 | func ExampleTextToCommand() { 41 | var text string = ` 42 | create nginx pod with kubectl and 5 replica 43 | ` 44 | 45 | apiKey := os.Getenv("OPENAI_API_KEY") 46 | cli, err := client.New([]string{apiKey}) 47 | if err != nil { 48 | log.Fatalln(err) 49 | } 50 | 51 | c := openai.NewCompletion(cli) 52 | resp, err := c.CreateCompletionFromPattern(context.Background(), TextToCommand( 53 | text, 54 | 0, 55 | )) 56 | 57 | if err != nil { 58 | log.Fatalln(err) 59 | } 60 | 61 | log.Println(resp.Choices[0].Text) 62 | } 63 | 64 | func ExampleProgrammingBugFixer() { 65 | var code string = ` 66 | func add(a, b int) string { 67 | return a + b 68 | } 69 | ` 70 | 71 | apiKey := os.Getenv("OPENAI_API_KEY") 72 | cli, err := client.New([]string{apiKey}) 73 | if err != nil { 74 | log.Fatalln(err) 75 | } 76 | 77 | c := openai.NewCompletion(cli) 78 | resp, err := c.CreateCompletionFromPattern(context.Background(), ProgrammingBugFixer( 79 | code, 80 | programming.Go, 81 | 0, 82 | )) 83 | 84 | if err != nil { 85 | log.Fatalln(err) 86 | } 87 | 88 | log.Println(resp.Choices[0].Text) 89 | } 90 | 91 | func ExampleGrammarCorrection() { 92 | var text string = ` 93 | Helo w0rld! how are to you? 94 | ` 95 | 96 | apiKey := os.Getenv("OPENAI_API_KEY") 97 | cli, err := client.New([]string{apiKey}) 98 | if err != nil { 99 | log.Fatalln(err) 100 | } 101 | 102 | c := openai.NewCompletion(cli) 103 | resp, err := c.CreateCompletionFromPattern(context.Background(), GrammarCorrection( 104 | text, 105 | 0, 106 | )) 107 | 108 | if err != nil { 109 | log.Fatalln(err) 110 | } 111 | 112 | log.Println(resp.Choices[0].Text) 113 | } 114 | 115 | func ExampleProgrammingAlgorithmOptimizer() { 116 | var code string = ` 117 | func BubbleSort(array[] int)[]int { 118 | for i:=0; i< len(array)-1; i++ { 119 | for j:=0; j < len(array)-i-1; j++ { 120 | if (array[j] > array[j+1]) { 121 | array[j], array[j+1] = array[j+1], array[j] 122 | } 123 | } 124 | } 125 | return array 126 | } 127 | ` 128 | 129 | apiKey := os.Getenv("OPENAI_API_KEY") 130 | cli, err := client.New([]string{apiKey}) 131 | if err != nil { 132 | log.Fatalln(err) 133 | } 134 | 135 | c := openai.NewCompletion(cli) 136 | resp, err := c.CreateCompletionFromPattern(context.Background(), ProgrammingAlgorithmOptimizer( 137 | code, 138 | programming.Go, 139 | 0, 140 | )) 141 | 142 | if err != nil { 143 | log.Fatalln(err) 144 | } 145 | 146 | log.Println(resp.Choices[0].Text) 147 | } 148 | -------------------------------------------------------------------------------- /types/programming/programming_language.go: -------------------------------------------------------------------------------- 1 | package programming 2 | 3 | type ProgrammingLanguage string 4 | 5 | const ( 6 | Go ProgrammingLanguage = "go" 7 | Python ProgrammingLanguage = "python" 8 | JS ProgrammingLanguage = "javascript" 9 | ) 10 | -------------------------------------------------------------------------------- /types/type.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | type ( 4 | ID string 5 | ) 6 | 7 | func (i ID) IsEmpty() bool { 8 | if len(i) == 0 { 9 | return true 10 | } 11 | return false 12 | } 13 | -------------------------------------------------------------------------------- /utils/formbuilder.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "io" 5 | "mime/multipart" 6 | "os" 7 | ) 8 | 9 | type FormBuilder interface { 10 | CreateFormFile(fieldName string, file *os.File) error 11 | WriteField(fieldName, value string) error 12 | Close() error 13 | FormDataContentType() string 14 | } 15 | 16 | type Form struct { 17 | writer *multipart.Writer 18 | } 19 | 20 | func NewFormBuilder(body io.Writer) *Form { 21 | return &Form{ 22 | writer: multipart.NewWriter(body), 23 | } 24 | } 25 | 26 | func (f *Form) CreateFormFile(fieldName string, file *os.File) error { 27 | fieldWriter, err := f.writer.CreateFormFile(fieldName, file.Name()) 28 | if err != nil { 29 | return err 30 | } 31 | 32 | _, err = io.Copy(fieldWriter, file) 33 | if err != nil { 34 | return err 35 | } 36 | return nil 37 | } 38 | 39 | func (f *Form) WriteField(fieldName, value string) error { 40 | return f.writer.WriteField(fieldName, value) 41 | } 42 | 43 | func (f *Form) Close() error { 44 | return f.writer.Close() 45 | } 46 | 47 | func (f *Form) FormDataContentType() string { 48 | return f.writer.FormDataContentType() 49 | } 50 | --------------------------------------------------------------------------------