├── .gitignore ├── testdata ├── train.jsonl ├── baby-sea-otter.png ├── finetune-delete.json ├── notfound.json ├── image-create.json ├── chat-completion.json ├── edits-create.json ├── completion.json ├── finetuning-cancel.json ├── finetuning-create.json ├── finetuning-retrieve.json ├── finetune-list.json ├── text-davinci-003.json ├── finetuning-event-list.json ├── moderation-create.json ├── finetune-create.json ├── finetune-events-list.json ├── finetune-retrieve.json └── models_list.json ├── go.mod ├── request.go ├── go.sum ├── edits_test.go ├── response.go ├── moderation_test.go ├── object.go ├── embedding_test.go ├── completion_test.go ├── embedding.go ├── client_test.go ├── edits.go ├── .github ├── workflows │ ├── api.yml │ ├── go.yml │ └── codeql-analysis.yml └── ISSUE_TEMPLATE │ └── bug_report.yml ├── models_test.go ├── error.go ├── LICENSE ├── moderation.go ├── finetuning_test.go ├── chatgpt ├── README.md ├── example_test.go └── chatgpt.go ├── file.go ├── functioncall ├── invoke.go ├── functioncall.go └── all_test.go ├── finetuning.go ├── chat_test.go ├── finetune_test.go ├── file_test.go ├── models.go ├── image_test.go ├── finetune.go ├── client.go ├── image.go ├── completion.go ├── README.md ├── testapp └── main.go ├── chat.go ├── all_test.go └── endpoints.go /.gitignore: -------------------------------------------------------------------------------- 1 | vendor 2 | -------------------------------------------------------------------------------- /testdata/train.jsonl: -------------------------------------------------------------------------------- 1 | {} 2 | {} 3 | {} -------------------------------------------------------------------------------- /testdata/baby-sea-otter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/otiai10/openaigo/HEAD/testdata/baby-sea-otter.png -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/otiai10/openaigo 2 | 3 | go 1.18 4 | 5 | require github.com/otiai10/mint v1.6.1 6 | -------------------------------------------------------------------------------- /testdata/finetune-delete.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "curie:ft-acmeco-2021-03-03-21-44-20", 3 | "object": "model", 4 | "deleted": true 5 | } -------------------------------------------------------------------------------- /request.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | import "bytes" 4 | 5 | type MultipartFormDataRequestBody interface { 6 | ToMultipartFormData() (*bytes.Buffer, string, error) 7 | } 8 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/otiai10/mint v1.6.1 h1:kgbTJmOpp/0ce7hk3H8jiSuR0MXmpwWRfqUdKww17qg= 2 | github.com/otiai10/mint v1.6.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= 3 | -------------------------------------------------------------------------------- /testdata/notfound.json: -------------------------------------------------------------------------------- 1 | { 2 | "error": { 3 | "message": "Invalid URL (GET /v1/foobaa)", 4 | "type": "invalid_request_error", 5 | "param": null, 6 | "code": null 7 | } 8 | } -------------------------------------------------------------------------------- /testdata/image-create.json: -------------------------------------------------------------------------------- 1 | { 2 | "created": 1589478378, 3 | "data": [ 4 | { 5 | "url": "https://..." 6 | }, 7 | { 8 | "url": "https://..." 9 | } 10 | ] 11 | } -------------------------------------------------------------------------------- /testdata/chat-completion.json: -------------------------------------------------------------------------------- 1 | {"id":"chatcmpl-123","object":"chat.completion","created":1677652288,"choices":[{"index":0,"message":{"role":"assistant","content":"\n\nHello there, how may I assist you today?"},"finish_reason":"stop"}],"usage":{"prompt_tokens":9,"completion_tokens":12,"total_tokens":21}} -------------------------------------------------------------------------------- /testdata/edits-create.json: -------------------------------------------------------------------------------- 1 | { 2 | "object": "edit", 3 | "created": 1589478378, 4 | "choices": [ 5 | { 6 | "text": "What day of the week is it?", 7 | "index": 0 8 | } 9 | ], 10 | "usage": { 11 | "prompt_tokens": 25, 12 | "completion_tokens": 32, 13 | "total_tokens": 57 14 | } 15 | } -------------------------------------------------------------------------------- /edits_test.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/otiai10/mint" 7 | ) 8 | 9 | func TestClient_CreateEdit(t *testing.T) { 10 | client := NewClient("") 11 | client.BaseURL = mockserver.URL 12 | res, err := client.CreateEdit(nil, EditCreateRequestBody{}) 13 | Expect(t, err).ToBe(nil) 14 | Expect(t, res).TypeOf("openaigo.EditCreateResponse") 15 | } 16 | -------------------------------------------------------------------------------- /response.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | type CompletionChoice struct { 4 | Text string `json:"text"` 5 | Index int `json:"index"` 6 | LogProbs int `json:"logprobs"` 7 | FinishReason string `json:"finish_reason"` 8 | } 9 | 10 | type Usage struct { 11 | PromptTokens int `json:"prompt_tokens"` 12 | CompletionTokens int `json:"completion_tokens"` 13 | TotalTokens int `json:"total_tokens"` 14 | } 15 | -------------------------------------------------------------------------------- /moderation_test.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/otiai10/mint" 7 | ) 8 | 9 | func TestClient_CreateModeration(t *testing.T) { 10 | client := NewClient("") 11 | client.BaseURL = mockserver.URL 12 | res, err := client.CreateModeration(nil, ModerationCreateRequestBody{ 13 | Input: "I want to kixx you.", 14 | }) 15 | Expect(t, err).ToBe(nil) 16 | Expect(t, res).TypeOf("openaigo.ModerationCreateResponse") 17 | } 18 | -------------------------------------------------------------------------------- /object.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | type ObjectType string 4 | 5 | const ( 6 | OTModel ObjectType = "model" 7 | OTModelPermission ObjectType = "model_permission" 8 | OTList ObjectType = "list" 9 | OTEdit ObjectType = "edit" 10 | OTTextCompletion ObjectType = "text_completion" 11 | OTEEmbedding ObjectType = "embedding" 12 | OTFile ObjectType = "file" 13 | OTFineTune ObjectType = "fine-tune" 14 | OTFineTuneEvent ObjectType = "fine-tune-event" 15 | ) 16 | -------------------------------------------------------------------------------- /embedding_test.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/otiai10/mint" 7 | ) 8 | 9 | func TestClient_CreateEmbedding(t *testing.T) { 10 | client := NewClient("") 11 | client.BaseURL = mockserver.URL 12 | res, err := client.CreateEmbedding(nil, EmbeddingCreateRequestBody{ 13 | Model: "text-similarity-babbage-001", 14 | Input: []string{"The food was delicious and the waiter..."}, 15 | }) 16 | Expect(t, err).ToBe(nil) 17 | Expect(t, res).TypeOf("openaigo.EmbeddingCreateResponse") 18 | } 19 | -------------------------------------------------------------------------------- /testdata/completion.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "cmpl-6LxNg3sbNoryG6yulQ0GnOTRQY7Q8", 3 | "object": "text_completion", 4 | "created": 1670691168, 5 | "model": "text-davinci-003", 6 | "choices": [ 7 | { 8 | "text": "\n\n今は元気です。", 9 | "index": 0, 10 | "logprobs": null, 11 | "finish_reason": "stop" 12 | } 13 | ], 14 | "usage": { 15 | "prompt_tokens": 13, 16 | "completion_tokens": 12, 17 | "total_tokens": 25 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /completion_test.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/otiai10/mint" 7 | ) 8 | 9 | func TestClient_Completion(t *testing.T) { 10 | client := NewClient("") 11 | client.BaseURL = mockserver.URL 12 | res, err := client.Completion_Legacy(nil, CompletionRequestBody{}) 13 | Expect(t, err).ToBe(nil) 14 | Expect(t, res).TypeOf("openaigo.CompletionResponse") 15 | 16 | // client.BaseURL = "xxxxx" 17 | // _, err = client.Completion(nil, CompletionRequestBody{}) 18 | // Expect(t, err).Not().ToBe(nil) 19 | } 20 | -------------------------------------------------------------------------------- /embedding.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | type EmbeddingCreateRequestBody struct { 4 | Model string `json:"model"` 5 | Input []string `json:"input"` 6 | User string `json:"user,omitempty"` 7 | } 8 | 9 | type EmbeddingCreateResponse struct { 10 | Object string `json:"object"` 11 | Data []EmbeddingData `json:"data"` 12 | Usage Usage `json:"usage"` 13 | } 14 | 15 | type EmbeddingData struct { 16 | Object string `json:"object"` 17 | Embedding []float32 `json:"embedding"` 18 | Index int `json:"index"` 19 | } 20 | -------------------------------------------------------------------------------- /client_test.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | . "github.com/otiai10/mint" 8 | ) 9 | 10 | func TestNewClient(t *testing.T) { 11 | Expect(t, NewClient("xxx")).TypeOf("*openaigo.Client") 12 | } 13 | 14 | func TestClient_internal(t *testing.T) { 15 | client := NewClient("invalid") 16 | client.Organization = "org-xxx" 17 | _, err := client.ListModels(context.TODO()) 18 | Expect(t, err).Not().ToBe(nil) 19 | 20 | client.BaseURL = mockserver.URL 21 | _, err = client.RetrieveModel(nil, "notfound") 22 | Expect(t, err).Not().ToBe(nil) 23 | } 24 | -------------------------------------------------------------------------------- /edits.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | type EditCreateRequestBody struct { 4 | Model string `json:"model"` 5 | Instruction string `json:"instruction"` 6 | Input string `json:"input,omitempty"` 7 | N int `json:"n,omitempty"` 8 | Temperature float32 `json:"temperature,omitempty"` 9 | TopP float32 `json:"top_p,omitempty"` 10 | } 11 | 12 | type EditCreateResponse struct { 13 | Object ObjectType `json:"object"` 14 | Created int64 `json:"created"` 15 | Choices []CompletionChoice `json:"choices"` 16 | Usage Usage `json:"usage"` 17 | } 18 | -------------------------------------------------------------------------------- /testdata/finetuning-cancel.json: -------------------------------------------------------------------------------- 1 | { 2 | "object": "fine_tuning.job", 3 | "id": "ft-zRdUkP4QeZqeYjDcQL0wwam1", 4 | "model": "davinci-002", 5 | "created_at": 1692661014, 6 | "finished_at": 1692661190, 7 | "fine_tuned_model": "ft:davinci-002:my-org:custom_suffix:7q8mpxmy", 8 | "organization_id": "org-123", 9 | "result_files": [ 10 | "file-abc123" 11 | ], 12 | "status": "succeeded", 13 | "validation_file": null, 14 | "training_file": "file-abc123", 15 | "hyperparameters": { 16 | "n_epochs": 4 17 | }, 18 | "trained_tokens": 5768 19 | } 20 | -------------------------------------------------------------------------------- /testdata/finetuning-create.json: -------------------------------------------------------------------------------- 1 | { 2 | "object": "fine_tuning.job", 3 | "id": "ft-zRdUkP4QeZqeYjDcQL0wwam1", 4 | "model": "davinci-002", 5 | "created_at": 1692661014, 6 | "finished_at": 1692661190, 7 | "fine_tuned_model": "ft:davinci-002:my-org:custom_suffix:7q8mpxmy", 8 | "organization_id": "org-123", 9 | "result_files": [ 10 | "file-abc123" 11 | ], 12 | "status": "succeeded", 13 | "validation_file": null, 14 | "training_file": "file-abc123", 15 | "hyperparameters": { 16 | "n_epochs": 4 17 | }, 18 | "trained_tokens": 5768 19 | } 20 | -------------------------------------------------------------------------------- /testdata/finetuning-retrieve.json: -------------------------------------------------------------------------------- 1 | { 2 | "object": "fine_tuning.job", 3 | "id": "ft-zRdUkP4QeZqeYjDcQL0wwam1", 4 | "model": "davinci-002", 5 | "created_at": 1692661014, 6 | "finished_at": 1692661190, 7 | "fine_tuned_model": "ft:davinci-002:my-org:custom_suffix:7q8mpxmy", 8 | "organization_id": "org-123", 9 | "result_files": [ 10 | "file-abc123" 11 | ], 12 | "status": "succeeded", 13 | "validation_file": null, 14 | "training_file": "file-abc123", 15 | "hyperparameters": { 16 | "n_epochs": 4 17 | }, 18 | "trained_tokens": 5768 19 | } 20 | -------------------------------------------------------------------------------- /testdata/finetune-list.json: -------------------------------------------------------------------------------- 1 | { 2 | "object": "list", 3 | "data": [ 4 | { 5 | "id": "ft-AF1WoRqd3aJAHsqc9NY7iL8F", 6 | "object": "fine-tune", 7 | "model": "curie", 8 | "created_at": 1614807352, 9 | "fine_tuned_model": null, 10 | "hyperparams": { 11 | "batch_size": 4, 12 | "learning_rate_multiplier": 0.1, 13 | "n_epochs": 4, 14 | "prompt_loss_weight": 0.1 15 | }, 16 | "organization_id": "org-...", 17 | "result_files": [], 18 | "status": "pending", 19 | "validation_files": [], 20 | "training_files": [], 21 | "updated_at": 1614807352 22 | } 23 | ] 24 | } -------------------------------------------------------------------------------- /.github/workflows/api.yml: -------------------------------------------------------------------------------- 1 | name: App Test over API 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | schedule: 7 | - cron: '0 10 * * 2' 8 | 9 | jobs: 10 | testapp: 11 | name: Test over API with testapp 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Set up Go 15 | uses: actions/setup-go@v5 16 | with: 17 | go-version: 1.19 18 | - name: Check out code into the Go module directory 19 | uses: actions/checkout@v4 20 | - name: Get dependencies 21 | run: go get -v -t -d ./... 22 | - name: Test 23 | run: go run ./testapp/main.go 24 | env: 25 | OPENAI_API_KEY: ${{ secrets.OPENAI_APIKEY }} 26 | -------------------------------------------------------------------------------- /testdata/text-davinci-003.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "text-davinci-003", 3 | "object": "model", 4 | "created": 1669599635, 5 | "owned_by": "openai-internal", 6 | "permission": [ 7 | { 8 | "id": "modelperm-NFSavcOn3jzqNG8maspoV7WA", 9 | "object": "model_permission", 10 | "created": 1670634735, 11 | "allow_create_engine": false, 12 | "allow_sampling": true, 13 | "allow_logprobs": true, 14 | "allow_search_indices": false, 15 | "allow_view": true, 16 | "allow_fine_tuning": false, 17 | "organization": "*", 18 | "group": null, 19 | "is_blocking": false 20 | } 21 | ], 22 | "root": "text-davinci-003", 23 | "parent": null 24 | } 25 | -------------------------------------------------------------------------------- /testdata/finetuning-event-list.json: -------------------------------------------------------------------------------- 1 | { 2 | "object": "list", 3 | "data": [ 4 | { 5 | "object": "fine_tuning.job.event", 6 | "id": "ft-event-ddTJfwuMVpfLXseO0Am0Gqjm", 7 | "created_at": 1692407401, 8 | "level": "info", 9 | "message": "Fine tuning job successfully completed", 10 | "data": null, 11 | "type": "message" 12 | }, 13 | { 14 | "object": "fine_tuning.job.event", 15 | "id": "ft-event-tyiGuB72evQncpH87xe505Sv", 16 | "created_at": 1692407400, 17 | "level": "info", 18 | "message": "New fine-tuned model created: ft:gpt-3.5-turbo:openai::7p4lURel", 19 | "data": null, 20 | "type": "message" 21 | } 22 | ], 23 | "has_more": true 24 | } 25 | -------------------------------------------------------------------------------- /models_test.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | . "github.com/otiai10/mint" 8 | ) 9 | 10 | func TestClient_ListModels(t *testing.T) { 11 | client := NewClient("") 12 | client.BaseURL = mockserver.URL 13 | res, err := client.ListModels(nil) 14 | Expect(t, err).ToBe(nil) 15 | Expect(t, res).TypeOf("openaigo.ModelsListResponse") 16 | } 17 | 18 | func TestClient_RetrieveModel(t *testing.T) { 19 | client := NewClient("") 20 | client.BaseURL = mockserver.URL 21 | res, err := client.RetrieveModel(nil, "text-davinci-003") 22 | Expect(t, err).ToBe(nil) 23 | Expect(t, res).TypeOf("openaigo.ModelRetrieveResponse") 24 | 25 | _, err = client.RetrieveModel(context.TODO(), "200-but-invalidjson") 26 | Expect(t, err).Not().ToBe(nil) 27 | } 28 | -------------------------------------------------------------------------------- /testdata/moderation-create.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "modr-5MWoLO", 3 | "model": "text-moderation-001", 4 | "results": [ 5 | { 6 | "categories": { 7 | "hate": false, 8 | "hate/threatening": true, 9 | "self-harm": false, 10 | "sexual": false, 11 | "sexual/minors": false, 12 | "violence": true, 13 | "violence/graphic": false 14 | }, 15 | "category_scores": { 16 | "hate": 0.22714105248451233, 17 | "hate/threatening": 0.4132447838783264, 18 | "self-harm": 0.005232391878962517, 19 | "sexual": 0.01407341007143259, 20 | "sexual/minors": 0.0038522258400917053, 21 | "violence": 0.9223177433013916, 22 | "violence/graphic": 0.036865197122097015 23 | }, 24 | "flagged": true 25 | } 26 | ] 27 | } -------------------------------------------------------------------------------- /error.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "net/http" 7 | ) 8 | 9 | type APIErrorType string 10 | 11 | const ( 12 | ErrorInsufficientQuota APIErrorType = "insufficient_quota" 13 | ErrorInvalidRequest APIErrorType = "invalid_request_error" 14 | ) 15 | 16 | type APIError struct { 17 | Message string `json:"message"` 18 | Type APIErrorType `json:"type"` 19 | Param interface{} `json:"param"` // TODO: typing 20 | Code interface{} `json:"code"` // TODO: typing 21 | 22 | Status string 23 | StatusCode int 24 | } 25 | 26 | func (err APIError) Error() string { 27 | return fmt.Sprintf("openai API error: %v: %v (param: %v, code: %v)", err.Type, err.Message, err.Param, err.Code) 28 | } 29 | 30 | func (client *Client) apiError(res *http.Response) error { 31 | errbody := struct { 32 | Error APIError `json:"error"` 33 | }{APIError{Status: res.Status, StatusCode: res.StatusCode}} 34 | if err := json.NewDecoder(res.Body).Decode(&errbody); err != nil { 35 | return fmt.Errorf("failed to decode error body: %v", err) 36 | } 37 | return errbody.Error 38 | } 39 | -------------------------------------------------------------------------------- /testdata/finetune-create.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "ft-AF1WoRqd3aJAHsqc9NY7iL8F", 3 | "object": "fine-tune", 4 | "model": "curie", 5 | "created_at": 1614807352, 6 | "events": [ 7 | { 8 | "object": "fine-tune-event", 9 | "created_at": 1614807352, 10 | "level": "info", 11 | "message": "Job enqueued. Waiting for jobs ahead to complete. Queue number: 0." 12 | } 13 | ], 14 | "fine_tuned_model": null, 15 | "hyperparams": { 16 | "batch_size": 4, 17 | "learning_rate_multiplier": 0.1, 18 | "n_epochs": 4, 19 | "prompt_loss_weight": 0.1 20 | }, 21 | "organization_id": "org-...", 22 | "result_files": [], 23 | "status": "pending", 24 | "validation_files": [], 25 | "training_files": [ 26 | { 27 | "id": "file-XGinujblHPwGLSztz8cPS8XY", 28 | "object": "file", 29 | "bytes": 1547276, 30 | "created_at": 1610062281, 31 | "filename": "my-data-train.jsonl", 32 | "purpose": "fine-tune-train" 33 | } 34 | ], 35 | "updated_at": 1614807352 36 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2022 otiai10 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /testdata/finetune-events-list.json: -------------------------------------------------------------------------------- 1 | { 2 | "object": "list", 3 | "data": [ 4 | { 5 | "object": "fine-tune-event", 6 | "created_at": 1614807352, 7 | "level": "info", 8 | "message": "Job enqueued. Waiting for jobs ahead to complete. Queue number: 0." 9 | }, 10 | { 11 | "object": "fine-tune-event", 12 | "created_at": 1614807356, 13 | "level": "info", 14 | "message": "Job started." 15 | }, 16 | { 17 | "object": "fine-tune-event", 18 | "created_at": 1614807861, 19 | "level": "info", 20 | "message": "Uploaded snapshot: curie:ft-acmeco-2021-03-03-21-44-20." 21 | }, 22 | { 23 | "object": "fine-tune-event", 24 | "created_at": 1614807864, 25 | "level": "info", 26 | "message": "Uploaded result files: file-QQm6ZpqdNwAaVC3aSz5sWwLT." 27 | }, 28 | { 29 | "object": "fine-tune-event", 30 | "created_at": 1614807864, 31 | "level": "info", 32 | "message": "Job succeeded." 33 | } 34 | ] 35 | } -------------------------------------------------------------------------------- /moderation.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | type ModerationCreateRequestBody struct { 4 | Input string `json:"input"` 5 | Model string `json:"model,omitempty"` 6 | } 7 | 8 | type ModerationCreateResponse struct { 9 | ID string `json:"id"` 10 | Model string `json:"model"` 11 | Results []ModerationData `json:"results"` 12 | } 13 | 14 | type ModerationData struct { 15 | Categories struct { 16 | Hate bool `json:"hate"` 17 | HateThreatening bool `json:"hate/threatening"` 18 | SelfHarm bool `json:"self-harm"` 19 | Sexual bool `json:"sexual"` 20 | SexualMinors bool `json:"sexual/minors"` 21 | Violence bool `json:"violence"` 22 | ViolenceGraphic bool `json:"violence/graphic"` 23 | } `json:"categories"` 24 | CategoryScores struct { 25 | Hate float32 `json:"hate"` 26 | HateThreatening float32 `json:"hate/threatening"` 27 | SelfHarm float32 `json:"self-harm"` 28 | Sexual float32 `json:"sexual"` 29 | SexualMinors float32 `json:"sexual/minors"` 30 | Violence float32 `json:"violence"` 31 | ViolenceGraphic float32 `json:"violence/graphic"` 32 | } `json:"category_scores"` 33 | } 34 | -------------------------------------------------------------------------------- /finetuning_test.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | . "github.com/otiai10/mint" 8 | ) 9 | 10 | func TestClient_CreateFineTuning(t *testing.T) { 11 | client := NewClient("") 12 | client.BaseURL = mockserver.URL 13 | res, err := client.CreateFineTuning(context.TODO(), FineTuningCreateRequestBody{ 14 | TrainingFile: "file-XGinujblHPwGLSztz8cPS8XY", 15 | }) 16 | Expect(t, err).ToBe(nil) 17 | Expect(t, res).TypeOf("openaigo.FineTuningJob") 18 | } 19 | 20 | func TestClient_RetrieveFineTuning(t *testing.T) { 21 | client := NewClient("") 22 | client.BaseURL = mockserver.URL 23 | res, err := client.RetrieveFineTuning(context.TODO(), "abcdefghi") 24 | Expect(t, err).ToBe(nil) 25 | Expect(t, res).TypeOf("openaigo.FineTuningJob") 26 | } 27 | 28 | func TestClient_CancelFineTuning(t *testing.T) { 29 | client := NewClient("") 30 | client.BaseURL = mockserver.URL 31 | res, err := client.CancelFineTuning(context.TODO(), "abcdefghi") 32 | Expect(t, err).ToBe(nil) 33 | Expect(t, res).TypeOf("openaigo.FineTuningJob") 34 | } 35 | 36 | func TestClient_ListFineTuningEvents(t *testing.T) { 37 | client := NewClient("") 38 | client.BaseURL = mockserver.URL 39 | res, err := client.ListFineTuningEvents(context.TODO(), "abcdefghi") 40 | Expect(t, err).ToBe(nil) 41 | Expect(t, res).TypeOf("openaigo.FineTuningListEventsResponse") 42 | } 43 | -------------------------------------------------------------------------------- /chatgpt/README.md: -------------------------------------------------------------------------------- 1 | # ChatGPT: Chat Completions API Client 2 | 3 | This is a usable example of `github.com/otiai10/openaigo`, especially for [`Chat Completions`](https://platform.openai.com/docs/guides/gpt/chat-completions-api) API. 4 | 5 | ```go 6 | package main 7 | 8 | import ( 9 | "github.com/otiai10/openaigo/chatgpt" 10 | fc "github.com/otiai10/openaigo/functioncall" 11 | ) 12 | 13 | func main() { 14 | ai := chatgpt.New(token, "gpt-4-0613") 15 | 16 | ai.Functions = fc.Funcs{ 17 | "get_user_location": {/* */}, 18 | "get_current_date": {/* */}, 19 | "get_weather": {/* */}, 20 | } 21 | 22 | conversation, err := ai.Chat(ctx, []chatgpt.Message{ 23 | chatgpt.User("Should I take my umbrella tomorrow?"), 24 | }) 25 | // AI calls necessary functions sequentially, 26 | // and finally reply to the user's question. 27 | } 28 | ``` 29 | This conversation will look like this: 30 | 31 | 0. User asked "Should I take my umbrella tomorrow?" 32 | 1. Assistant wanted to call "get_user_location" 33 | 2. Function replied "Tokyo" to the assistant 34 | 3. Assistant wanted to call "get_current_date" 35 | 4. Function replied "20230707" to the assistant 36 | 5. Assistant wanted to call "get_weather" with ["Tokyo","20230707"] 37 | 6. Function replied "sunny" 38 | 7. Assistant replied "No you don't need to" to the user 39 | 40 | and step 1~6 are done automatically. 41 | -------------------------------------------------------------------------------- /file.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "io" 7 | "mime/multipart" 8 | ) 9 | 10 | type FileListResponse struct { 11 | Object string `json:"object"` 12 | Data []FileData `json:"data"` 13 | } 14 | 15 | type FileData struct { 16 | ID string `json:"id"` 17 | Object string `json:"object"` 18 | Bytes int64 `json:"bytes"` 19 | CreatedAt int64 `json:"created_at"` 20 | Filename string `json:"filename"` 21 | Purpose string `json:"purpuse"` 22 | } 23 | 24 | type FileUploadRequestBody struct { 25 | File io.Reader 26 | Purpose string 27 | } 28 | 29 | func (body FileUploadRequestBody) ToMultipartFormData() (*bytes.Buffer, string, error) { 30 | if body.File == nil { 31 | return nil, "", fmt.Errorf("body.File must not be nil") 32 | } 33 | buf := bytes.NewBuffer(nil) 34 | w := multipart.NewWriter(buf) 35 | defer w.Close() 36 | filew, err := w.CreateFormFile("file", "file.jsonl") 37 | if err != nil { 38 | return nil, "", err 39 | } 40 | if _, err := io.Copy(filew, body.File); err != nil { 41 | return nil, "", err 42 | } 43 | w.WriteField("purpose", body.Purpose) 44 | return buf, w.FormDataContentType(), err 45 | 46 | } 47 | 48 | type FileUploadResponse FileData 49 | 50 | type FileDeleteResponse struct { 51 | ID string `json:"id"` 52 | Object string `json:"object"` 53 | Deleted bool `json:"deleted"` 54 | } 55 | 56 | type FileRetrieveResponse FileData 57 | -------------------------------------------------------------------------------- /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | name: Go 2 | 3 | on: 4 | push: 5 | branches: [ main, develop ] 6 | pull_request: 7 | branches: [ main, develop ] 8 | 9 | jobs: 10 | 11 | build: 12 | name: Build 13 | runs-on: ${{ matrix.os }} 14 | strategy: 15 | matrix: 16 | os: [ubuntu-latest, macos-latest, windows-latest] 17 | go: [1.18, 1.19] 18 | steps: 19 | - name: Set up Go 20 | uses: actions/setup-go@v5 21 | with: 22 | go-version: ${{ matrix.go }} 23 | id: go 24 | - name: Check out code into the Go module directory 25 | uses: actions/checkout@v4 26 | - name: Get dependencies 27 | run: go get -v -t -d ./... 28 | - name: Test 29 | run: go test -v --tags=go${{ matrix.go }} 30 | - name: Format 31 | if: ${{ matrix.os == 'ubuntu-latest' }} 32 | run: COUNT=`go fmt | wc -l`; if [ $COUNT -gt 0 ]; then exit 1; fi 33 | coverage: 34 | name: Coverage 35 | runs-on: ubuntu-latest 36 | steps: 37 | - name: Set up Go 38 | uses: actions/setup-go@v5 39 | with: 40 | go-version: 1.19 41 | - name: Check out code into the Go module directory 42 | uses: actions/checkout@v4 43 | - name: Get dependencies 44 | run: go get -v -t -d ./... 45 | - name: Test 46 | run: go test -race -coverprofile=coverage.txt -covermode=atomic 47 | - name: Upload Coverage 48 | uses: codecov/codecov-action@v3 49 | with: 50 | files: coverage.txt 51 | -------------------------------------------------------------------------------- /functioncall/invoke.go: -------------------------------------------------------------------------------- 1 | package functioncall 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "reflect" 7 | ) 8 | 9 | type Invocation interface { 10 | Name() string 11 | Args() map[string]any 12 | } 13 | 14 | func (funcs Funcs) Call(invocation Invocation) string { 15 | b, err := json.Marshal(funcs.invoke(invocation)) 16 | if err != nil { 17 | return err.Error() 18 | } 19 | return string(b) 20 | } 21 | 22 | func (funcs Funcs) invoke(invocation Invocation) any { 23 | f, ok := funcs[invocation.Name()] 24 | if !ok { 25 | return fmt.Sprintf("function not found: `%s`", invocation.Name()) 26 | } 27 | v := reflect.ValueOf(f.Value) 28 | if !v.IsValid() || v.IsZero() { 29 | return fmt.Sprintf("function is invalid: %s", invocation.Name()) 30 | } 31 | if v.Kind() != reflect.Func { 32 | return fmt.Sprintf("function is not a function: %s", invocation.Name()) 33 | } 34 | if v.Type().NumIn() != len(invocation.Args()) { 35 | return fmt.Sprintf("function argument length mismatch: %s", invocation.Name()) 36 | } 37 | // Call the function with given arguments by using `reflect` package 38 | args := invocation.Args() 39 | params := []reflect.Value{} 40 | for i, p := range f.Parameters { 41 | if arg, ok := args[p.Name]; ok { 42 | params = append(params, reflect.ValueOf(arg)) 43 | } else { 44 | params = append(params, reflect.Zero(v.Type().In(i))) 45 | } 46 | } 47 | rets := []any{} 48 | for _, r := range v.Call(params) { 49 | rets = append(rets, r.Interface()) 50 | } 51 | return rets 52 | } 53 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.yml: -------------------------------------------------------------------------------- 1 | name: Bug report 2 | description: Create a report to help us improve 3 | labels: ["bug"] 4 | body: 5 | - type: markdown 6 | attributes: 7 | value: | 8 | Thanks for taking the time to fill out this bug report! 9 | - type: textarea 10 | id: what-happened 11 | attributes: 12 | label: Describe the bug 13 | description: A clear and concise description of what the bug is, and any additional context. 14 | placeholder: Tell us what you see! 15 | validations: 16 | required: true 17 | - type: textarea 18 | id: repro-steps 19 | attributes: 20 | label: To Reproduce 21 | description: Steps to reproduce the behavior. 22 | placeholder: | 23 | 1. Fetch a '...' 24 | 2. Update the '....' 25 | 3. See error 26 | validations: 27 | required: true 28 | - type: textarea 29 | id: code-snippets 30 | attributes: 31 | label: Code snippets 32 | description: If applicable, add code snippets to help explain your problem. 33 | render: JavaScript 34 | validations: 35 | required: false 36 | - type: input 37 | id: os 38 | attributes: 39 | label: OS 40 | placeholder: macOS 41 | validations: 42 | required: true 43 | - type: input 44 | id: language-version 45 | attributes: 46 | label: Go version 47 | placeholder: Go 1.18 48 | validations: 49 | required: true 50 | - type: input 51 | id: lib-version 52 | attributes: 53 | label: Library version 54 | placeholder: openai v3.0.1 55 | validations: 56 | required: true 57 | 58 | -------------------------------------------------------------------------------- /finetuning.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | type FineTuningJob struct { 4 | ID string `json:"id"` 5 | Object string `json:"object"` 6 | CreatedAt int64 `json:"created_at"` 7 | FinishedAt int64 `json:"finished_at"` 8 | Model string `json:"model"` 9 | FineTunedModel string `json:"fine_tuned_model,omitempty"` 10 | OrganizationID string `json:"organization_id"` 11 | Status string `json:"status"` 12 | Hyperparameters Hyperparameters `json:"hyperparameters"` 13 | TrainingFile string `json:"training_file"` 14 | ValidationFile string `json:"validation_file,omitempty"` 15 | ResultFiles []string `json:"result_files"` 16 | TrainedTokens int `json:"trained_tokens"` 17 | } 18 | 19 | type Hyperparameters struct { 20 | Epochs int `json:"n_epochs"` 21 | } 22 | 23 | type FineTuningCreateRequestBody struct { 24 | TrainingFile string `json:"training_file"` 25 | ValidationFile string `json:"validation_file,omitempty"` 26 | Model string `json:"model,omitempty"` 27 | Hyperparameters *Hyperparameters `json:"hyperparameters,omitempty"` 28 | Suffix string `json:"suffix,omitempty"` 29 | } 30 | 31 | type FineTuningListEventsResponse struct { 32 | Object string `json:"object"` 33 | Data []FineTuneEvent `json:"data"` 34 | HasMore bool `json:"has_more"` 35 | } 36 | 37 | type FineTuningEvent struct { 38 | Object string `json:"object"` 39 | ID string `json:"id"` 40 | CreatedAt int `json:"created_at"` 41 | Level string `json:"level"` 42 | Message string `json:"message"` 43 | Data any `json:"data"` 44 | Type string `json:"type"` 45 | } 46 | -------------------------------------------------------------------------------- /chat_test.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | import ( 4 | "sync" 5 | "testing" 6 | 7 | . "github.com/otiai10/mint" 8 | ) 9 | 10 | func TestClient_ChatCompletion(t *testing.T) { 11 | client := NewClient("") 12 | client.BaseURL = mockserver.URL 13 | res, err := client.ChatCompletion(nil, ChatCompletionRequestBody{ 14 | Model: GPT3_5Turbo, 15 | }) 16 | Expect(t, err).ToBe(nil) 17 | Expect(t, res).TypeOf("openaigo.ChatCompletionResponse") 18 | } 19 | 20 | func TestClient_ChatCompletion_Stream(t *testing.T) { 21 | client := NewClient("") 22 | client.BaseURL = mockserver.URL 23 | wg := sync.WaitGroup{} 24 | wg.Add(2) 25 | res, err := client.ChatCompletion(nil, ChatCompletionRequestBody{ 26 | Model: GPT3_5Turbo, 27 | Stream: true, 28 | StreamCallback: func(res ChatCompletionResponse, done bool, err error) { 29 | Expect(t, err).ToBe(nil) 30 | wg.Done() 31 | }, 32 | }) 33 | Expect(t, err).ToBe(nil) 34 | Expect(t, res).TypeOf("openaigo.ChatCompletionResponse") 35 | wg.Wait() 36 | } 37 | 38 | func TestClient_ChatCompletion_FunctionCall(t *testing.T) { 39 | client := NewClient("") 40 | client.BaseURL = mockserver.URL 41 | res, err := client.Chat(nil, ChatRequest{ 42 | Model: GPT3_5Turbo, 43 | Messages: []Message{ 44 | { 45 | Role: "user", Content: "Hello, I'm John.", 46 | }, 47 | }, 48 | Functions: Functions{ 49 | { 50 | Name: "test_method", 51 | Parameters: Parameters{ 52 | Type: "object", 53 | Properties: map[string]map[string]any{ 54 | "arg_0": { 55 | "type": "string", 56 | "description": "This is a test", 57 | }, 58 | }, 59 | Required: []string{"arg_0"}, 60 | }, 61 | }, 62 | }, 63 | FunctionCall: "auto", 64 | }) 65 | Expect(t, err).ToBe(nil) 66 | Expect(t, res).TypeOf("openaigo.ChatCompletionResponse") 67 | } 68 | -------------------------------------------------------------------------------- /finetune_test.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/otiai10/mint" 7 | ) 8 | 9 | func TestClient_CreateFineTune(t *testing.T) { 10 | client := NewClient("") 11 | client.BaseURL = mockserver.URL 12 | res, err := client.CreateFineTune(nil, FineTuneCreateRequestBody{ 13 | TrainingFile: "file-XGinujblHPwGLSztz8cPS8XY", 14 | }) 15 | Expect(t, err).ToBe(nil) 16 | Expect(t, res).TypeOf("openaigo.FineTuneCreateResponse") 17 | } 18 | 19 | func TestClient_ListFineTunes(t *testing.T) { 20 | client := NewClient("") 21 | client.BaseURL = mockserver.URL 22 | res, err := client.ListFineTunes(nil) 23 | Expect(t, err).ToBe(nil) 24 | Expect(t, res).TypeOf("openaigo.FineTuneListResponse") 25 | } 26 | 27 | func TestClient_RetrieveFineTune(t *testing.T) { 28 | client := NewClient("") 29 | client.BaseURL = mockserver.URL 30 | res, err := client.RetrieveFineTune(nil, "abcdefghi") 31 | Expect(t, err).ToBe(nil) 32 | Expect(t, res).TypeOf("openaigo.FineTuneRetrieveResponse") 33 | } 34 | 35 | func TestClient_CancelFineTune(t *testing.T) { 36 | client := NewClient("") 37 | client.BaseURL = mockserver.URL 38 | res, err := client.CancelFineTune(nil, "abcdefghi") 39 | Expect(t, err).ToBe(nil) 40 | Expect(t, res).TypeOf("openaigo.FineTuneCancelResponse") 41 | } 42 | 43 | func TestClient_ListFineTuneEvents(t *testing.T) { 44 | client := NewClient("") 45 | client.BaseURL = mockserver.URL 46 | res, err := client.ListFineTuneEvents(nil, "abcdefghi") 47 | Expect(t, err).ToBe(nil) 48 | Expect(t, res).TypeOf("openaigo.FineTuneListEventsResponse") 49 | } 50 | 51 | func TestClient_DeleteFineTune(t *testing.T) { 52 | client := NewClient("") 53 | client.BaseURL = mockserver.URL 54 | res, err := client.DeleteFineTuneModel(nil, "abcdefg") 55 | Expect(t, err).ToBe(nil) 56 | Expect(t, res).TypeOf("openaigo.FineTuneDeleteModelResponse") 57 | } 58 | -------------------------------------------------------------------------------- /file_test.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | import ( 4 | "context" 5 | "os" 6 | "testing" 7 | 8 | . "github.com/otiai10/mint" 9 | ) 10 | 11 | func TestClient_UploadFile(t *testing.T) { 12 | client := NewClient("") 13 | client.BaseURL = mockserver.URL 14 | _, err := client.UploadFile(nil, FileUploadRequestBody{}) 15 | Expect(t, err).Not().ToBe(nil) 16 | f, _ := os.Open("./testdata/train.jsonl") 17 | res, err := client.UploadFile(nil, FileUploadRequestBody{File: f}) 18 | Expect(t, err).ToBe(nil) 19 | Expect(t, res).TypeOf("openaigo.FileUploadResponse") 20 | } 21 | 22 | func TestClient_ListFiles(t *testing.T) { 23 | client := NewClient("") 24 | client.BaseURL = mockserver.URL 25 | res, err := client.ListFiles(nil) 26 | Expect(t, err).ToBe(nil) 27 | Expect(t, res).TypeOf("openaigo.FileListResponse") 28 | } 29 | 30 | func TestClient_RetrieveFileContent(t *testing.T) { 31 | client := NewClient("") 32 | client.BaseURL = mockserver.URL 33 | res, err := client.RetrieveFileContent(context.TODO(), "abcdefg") 34 | Expect(t, err).ToBe(nil) 35 | Expect(t, res).TypeOf("*http.bodyEOFSignal") 36 | res.Close() 37 | 38 | _, err = client.RetrieveFileContent(context.TODO(), "notfound") 39 | Expect(t, err).Not().ToBe(nil) 40 | } 41 | 42 | func TestClient_RetrieveFile(t *testing.T) { 43 | client := NewClient("") 44 | client.BaseURL = mockserver.URL 45 | res, err := client.RetrieveFile(nil, "abcdefg") 46 | Expect(t, err).ToBe(nil) 47 | Expect(t, res).TypeOf("openaigo.FileRetrieveResponse") 48 | 49 | _, err = client.RetrieveFile(nil, "abc") 50 | Expect(t, err).Not().ToBe(nil) 51 | Expect(t, err.Error()).ToBe("openai API error: invalid_request_error: No such File object: abc (param: id, code: )") 52 | 53 | _, err = client.RetrieveFile(nil, "zzz") 54 | Expect(t, err).Not().ToBe(nil) 55 | Expect(t, err.Error()).ToBe("failed to decode error body: invalid character '.' looking for beginning of object key string") 56 | } 57 | 58 | func TestClient_DeleteFile(t *testing.T) { 59 | client := NewClient("") 60 | client.BaseURL = mockserver.URL 61 | res, err := client.DeleteFile(nil, "abcdefg") 62 | Expect(t, err).ToBe(nil) 63 | Expect(t, res).TypeOf("openaigo.FileDeleteResponse") 64 | } 65 | -------------------------------------------------------------------------------- /models.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | type ( 4 | ModelData struct { 5 | ID string `json:"id"` 6 | Object ObjectType `json:"object"` 7 | Created int64 `json:"created"` 8 | OwnedBy string `json:"owned_by"` 9 | Permission []ModelPermission `json:"permission"` 10 | Root string `json:"root"` 11 | Parent string `json:"parent"` 12 | } 13 | ModelPermission struct { 14 | ID string `json:"id"` 15 | Object ObjectType `json:"object"` 16 | Created int64 `json:"created"` 17 | AllowCreateEngine bool `json:"allow_create_engine"` 18 | AllowSampling bool `json:"allow_sampling"` 19 | AllowLogProbs bool `json:"allow_logprobs"` 20 | AllowSearchIndices bool `json:"allow_search_indices"` 21 | AllowView bool `json:"allow_view"` 22 | AllowFineTuning bool `json:"allow_fine_tuning"` 23 | Organization string `json:"organization"` 24 | Group string `json:"group"` 25 | IsBlocking bool `json:"is_blocking"` 26 | } 27 | ) 28 | 29 | type ModelsListResponse struct { 30 | Data []ModelData `json:"data"` 31 | Object ObjectType 32 | } 33 | 34 | type ModelRetrieveResponse ModelData 35 | 36 | // https://beta.openai.com/docs/models/overview 37 | const ( 38 | 39 | // {{{ https://platform.openai.com/docs/models/gpt-4 40 | GPT4o = "gpt-4o" 41 | GPT4o_20240513 = "gpt-4o-2024-05-13" 42 | GPT4 = "gpt-4" 43 | GPT4_0314 = "gpt-4-0314" 44 | GPT4_0613 = "gpt-4-0613" 45 | GPT4_32K = "gpt-4-32k" 46 | GPT4_32K_0314 = "gpt-4-32k-0314" 47 | GPT4_32K_0613 = "gpt-4-32k-0613" 48 | // }}} 49 | 50 | // {{{ https://platform.openai.com/docs/models/gpt-3-5 51 | GPT3_5Turbo_0125 = "gpt-3.5-turbo-0125" 52 | GPT3_5Turbo = "gpt-3.5-turbo" 53 | GPT3_5Turbo_1106 = "gpt-3.5-turbo-1106" 54 | GPT3_5Turbo_Instruct = "gpt-3.5-turbo-instruct" 55 | GPT3_5Turbo_16K_0613 = "gpt-3.5-turbo-16k-0613" // @deprecated 56 | GPT3_5Turbo_0613 = "gpt-3.5-turbo-0613" // @deprecated 57 | GPT3_5Turbo_16K = "gpt-3.5-turbo-16k" // @deprecated 58 | GPT3_5Turbo_0301 = "gpt-3.5-turbo-0301" // @deprecated 59 | // }}} 60 | ) 61 | -------------------------------------------------------------------------------- /testdata/finetune-retrieve.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "ft-AF1WoRqd3aJAHsqc9NY7iL8F", 3 | "object": "fine-tune", 4 | "model": "curie", 5 | "created_at": 1614807352, 6 | "events": [ 7 | { 8 | "object": "fine-tune-event", 9 | "created_at": 1614807352, 10 | "level": "info", 11 | "message": "Job enqueued. Waiting for jobs ahead to complete. Queue number: 0." 12 | }, 13 | { 14 | "object": "fine-tune-event", 15 | "created_at": 1614807356, 16 | "level": "info", 17 | "message": "Job started." 18 | }, 19 | { 20 | "object": "fine-tune-event", 21 | "created_at": 1614807861, 22 | "level": "info", 23 | "message": "Uploaded snapshot: curie:ft-acmeco-2021-03-03-21-44-20." 24 | }, 25 | { 26 | "object": "fine-tune-event", 27 | "created_at": 1614807864, 28 | "level": "info", 29 | "message": "Uploaded result files: file-QQm6ZpqdNwAaVC3aSz5sWwLT." 30 | }, 31 | { 32 | "object": "fine-tune-event", 33 | "created_at": 1614807864, 34 | "level": "info", 35 | "message": "Job succeeded." 36 | } 37 | ], 38 | "fine_tuned_model": "curie:ft-acmeco-2021-03-03-21-44-20", 39 | "hyperparams": { 40 | "batch_size": 4, 41 | "learning_rate_multiplier": 0.1, 42 | "n_epochs": 4, 43 | "prompt_loss_weight": 0.1 44 | }, 45 | "organization_id": "org-...", 46 | "result_files": [ 47 | { 48 | "id": "file-QQm6ZpqdNwAaVC3aSz5sWwLT", 49 | "object": "file", 50 | "bytes": 81509, 51 | "created_at": 1614807863, 52 | "filename": "compiled_results.csv", 53 | "purpose": "fine-tune-results" 54 | } 55 | ], 56 | "status": "succeeded", 57 | "validation_files": [], 58 | "training_files": [ 59 | { 60 | "id": "file-XGinujblHPwGLSztz8cPS8XY", 61 | "object": "file", 62 | "bytes": 1547276, 63 | "created_at": 1610062281, 64 | "filename": "my-data-train.jsonl", 65 | "purpose": "fine-tune-train" 66 | } 67 | ], 68 | "updated_at": 1614807865 69 | } -------------------------------------------------------------------------------- /chatgpt/example_test.go: -------------------------------------------------------------------------------- 1 | package chatgpt 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | 8 | "github.com/otiai10/openaigo" 9 | "github.com/otiai10/openaigo/functioncall" 10 | ) 11 | 12 | var funcs = functioncall.Funcs{ 13 | "get_user_locatin": functioncall.Func{ 14 | Value: func() string { 15 | return "Tokyo" 16 | }, 17 | Description: "Get user's location", 18 | }, 19 | "get_date": functioncall.Func{ 20 | Value: func() string { 21 | return "2023-07-07" 22 | }, 23 | Description: "Get date of today", 24 | }, 25 | "get_weather": functioncall.Func{ 26 | Value: func(location, date string) string { 27 | return fmt.Sprintf("Weather in %s on %s is sunny", location, date) 28 | }, 29 | Description: "Get weather of the location on the date", 30 | Parameters: functioncall.Params{ 31 | {Name: "location", Type: "string", Description: "Location to get weather", Required: true}, 32 | {Name: "date", Type: "string", Description: "Date to get weather", Required: true}, 33 | }, 34 | }, 35 | } 36 | 37 | func ExampleAI() { 38 | 39 | key := os.Getenv("OPENAI_API_KEY") 40 | ai := New(key, openaigo.GPT4o) 41 | ai.Functions = funcs 42 | conv := []Message{ 43 | User("Should I bring my umbrella tomorrow? You can use functions to get necessary information."), 44 | } 45 | res, err := ai.Chat(context.Background(), conv) 46 | if err != nil { 47 | panic(err) 48 | } 49 | for i, m := range res { 50 | if i != 0 { 51 | fmt.Print("->") 52 | } 53 | // fmt.Printf("%s (%s): %s\n", m.Role, m.Name, m.Content) // DEBUG 54 | fmt.Printf("[%d]%s", i, m.Role) 55 | continue 56 | // fmt.Printf("[%d] ", i) 57 | // // Print role name in different color 58 | // switch m.Role { 59 | // case "user": 60 | // fmt.Print("\033[36m") 61 | // case "assistant": 62 | // fmt.Print("\033[32m") 63 | // case "function": 64 | // fmt.Print("\033[33m") 65 | // } 66 | // if m.Role == "assistant" && m.FunctionCall != nil { 67 | // fmt.Printf("%s\033[0m %s\n", m.Role, "(function_call)") 68 | // fmt.Printf(" > `%s(%+v)`\n", m.FunctionCall.Name(), m.FunctionCall.Args()) 69 | // } else { 70 | // fmt.Printf("%s\033[0m\n", m.Role) 71 | // fmt.Printf(" > %s\n", strings.Trim(m.Content, "\n")) 72 | // } 73 | } 74 | // Output: [0]user->[1]assistant->[2]function->[3]assistant->[4]function->[5]assistant->[6]function->[7]assistant 75 | } 76 | -------------------------------------------------------------------------------- /image_test.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | 7 | . "github.com/otiai10/mint" 8 | ) 9 | 10 | func TestClient_CreateImage(t *testing.T) { 11 | client := NewClient("") 12 | client.BaseURL = mockserver.URL 13 | res, err := client.CreateImage(nil, ImageGenerationRequestBody{ 14 | Prompt: "A cute baby sea otter", 15 | }) 16 | Expect(t, err).ToBe(nil) 17 | Expect(t, res).TypeOf("openaigo.ImageGenerationResponse") 18 | } 19 | 20 | func TestClient_EditImage(t *testing.T) { 21 | f, err := os.Open("./testdata/baby-sea-otter.png") 22 | if err != nil { 23 | panic(err) 24 | } 25 | defer f.Close() 26 | mask, err := os.Open("./testdata/baby-sea-otter.png") 27 | if err != nil { 28 | panic(err) 29 | } 30 | defer mask.Close() 31 | 32 | res, err := (&Client{BaseURL: mockserver.URL}).EditImage(nil, ImageEditRequestBody{ 33 | Image: f, 34 | Prompt: "make it cuter", 35 | }) 36 | Expect(t, err).ToBe(nil) 37 | Expect(t, res).TypeOf("openaigo.ImageEditResponse") 38 | 39 | res, err = (&Client{BaseURL: mockserver.URL}).EditImage(nil, ImageEditRequestBody{ 40 | Image: f, 41 | Prompt: "make it cuter", 42 | Mask: mask, 43 | N: 6, 44 | Size: Size512, 45 | ResponseFormat: "url", 46 | User: "otiai20", 47 | }) 48 | Expect(t, err).ToBe(nil) 49 | Expect(t, res).TypeOf("openaigo.ImageEditResponse") 50 | 51 | _, err = (&Client{BaseURL: mockserver.URL}).EditImage(nil, ImageEditRequestBody{ 52 | Image: nil, 53 | Prompt: "make it cuter", 54 | }) 55 | Expect(t, err).Not().ToBe(nil) 56 | } 57 | 58 | func TestClient_CreateImageVariation(t *testing.T) { 59 | f, err := os.Open("./testdata/baby-sea-otter.png") 60 | if err != nil { 61 | panic(err) 62 | } 63 | defer f.Close() 64 | res, err := (&Client{BaseURL: mockserver.URL}).CreateImageVariation(nil, ImageVariationRequestBody{ 65 | Image: f, 66 | }) 67 | Expect(t, err).ToBe(nil) 68 | Expect(t, res).TypeOf("openaigo.ImageVariationResponse") 69 | 70 | res, err = (&Client{BaseURL: mockserver.URL}).CreateImageVariation(nil, ImageVariationRequestBody{ 71 | Image: f, 72 | N: 4, 73 | Size: Size256, 74 | ResponseFormat: "b64_json", 75 | User: "otiai10", 76 | }) 77 | Expect(t, err).ToBe(nil) 78 | Expect(t, res).TypeOf("openaigo.ImageVariationResponse") 79 | 80 | _, err = (&Client{BaseURL: mockserver.URL}).CreateImageVariation(nil, ImageVariationRequestBody{ 81 | Image: nil, 82 | }) 83 | Expect(t, err).Not().ToBe(nil) 84 | } 85 | -------------------------------------------------------------------------------- /functioncall/functioncall.go: -------------------------------------------------------------------------------- 1 | package functioncall 2 | 3 | import ( 4 | "encoding/json" 5 | ) 6 | 7 | type Funcs map[string]Func 8 | 9 | type Func struct { 10 | Value any `json:"-"` 11 | Description string `json:"description,omitempty"` 12 | Parameters Params `json:"parameters,omitempty"` 13 | } 14 | 15 | type Params []Param 16 | 17 | type NestedParams []Param 18 | 19 | type Param struct { 20 | Name string `json:"-"` 21 | Type string `json:"type,omitempty"` 22 | Description string `json:"description,omitempty"` 23 | Required bool `json:"-"` 24 | // Enum []any `json:"enum,omitempty"` 25 | Items NestedParams `json:",omitempty"` 26 | } 27 | 28 | func (funcs Funcs) MarshalJSON() ([]byte, error) { 29 | // Convert map to slice 30 | sl := []map[string]any{} 31 | for key, fun := range funcs { 32 | f := map[string]any{ 33 | "name": key, 34 | "description": fun.Description, 35 | "parameters": fun.Parameters, 36 | } 37 | sl = append(sl, f) 38 | } 39 | return json.Marshal(sl) 40 | } 41 | 42 | func (params Params) MarshalJSON() ([]byte, error) { 43 | return marshalObject(params) 44 | } 45 | 46 | func (params NestedParams) MarshalJSON() ([]byte, error) { 47 | if len(params) == 1 { 48 | return json.Marshal(params[0]) 49 | } 50 | 51 | return marshalObject(params) 52 | } 53 | 54 | func marshalObject[T ~[]Param](params T) ([]byte, error) { 55 | required := []string{} 56 | props := map[string]Param{} 57 | for _, p := range params { 58 | if p.Required { 59 | required = append(required, p.Name) 60 | } 61 | props[p.Name] = p 62 | } 63 | 64 | schema := map[string]any{ 65 | "type": "object", 66 | "properties": props, 67 | "required": required, 68 | } 69 | return json.Marshal(schema) 70 | } 71 | 72 | func (param Param) MarshalJSON() ([]byte, error) { 73 | switch param.Type { 74 | case "array": 75 | schema := map[string]any{ 76 | "type": "array", 77 | "items": param.Items, 78 | } 79 | if param.Description != "" { 80 | schema["description"] = param.Description 81 | } 82 | return json.Marshal(schema) 83 | case "object": 84 | return marshalObject(param.Items) 85 | default: 86 | type Alias Param 87 | return json.Marshal(Alias(param)) 88 | } 89 | } 90 | 91 | func As[T any](funcs Funcs) (dest T) { 92 | b, err := funcs.MarshalJSON() 93 | if err != nil { 94 | panic(err) 95 | } 96 | err = json.Unmarshal(b, &dest) 97 | if err != nil { 98 | panic(err) 99 | } 100 | return dest 101 | } 102 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ main ] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [ main ] 20 | schedule: 21 | - cron: '17 20 * * 0' 22 | 23 | jobs: 24 | analyze: 25 | name: Analyze 26 | runs-on: ubuntu-latest 27 | permissions: 28 | actions: read 29 | contents: read 30 | security-events: write 31 | 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | language: [ 'go' ] 36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] 37 | # Learn more about CodeQL language support at https://git.io/codeql-language-support 38 | 39 | steps: 40 | - name: Checkout repository 41 | uses: actions/checkout@v4 42 | 43 | # Initializes the CodeQL tools for scanning. 44 | - name: Initialize CodeQL 45 | uses: github/codeql-action/init@v2 46 | with: 47 | languages: ${{ matrix.language }} 48 | # If you wish to specify custom queries, you can do so here or in a config file. 49 | # By default, queries listed here will override any specified in a config file. 50 | # Prefix the list here with "+" to use these queries and those in the config file. 51 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 52 | 53 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 54 | # If this step fails, then you should remove it and run the build manually (see below) 55 | - name: Autobuild 56 | uses: github/codeql-action/autobuild@v2 57 | 58 | # ℹ️ Command-line programs to run using the OS shell. 59 | # 📚 https://git.io/JvXDl 60 | 61 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 62 | # and modify them (or add more) to build your code if your project 63 | # uses a compiled language 64 | 65 | #- run: | 66 | # make bootstrap 67 | # make release 68 | 69 | - name: Perform CodeQL Analysis 70 | uses: github/codeql-action/analyze@v2 71 | -------------------------------------------------------------------------------- /finetune.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | type FineTuneCreateRequestBody struct { 4 | TrainingFile string `json:"training_file"` 5 | ValidationFile string `json:"validation_file,omitempty"` 6 | Model string `json:"model,omitempty"` 7 | NEpochs int `json:"n_epochs,omitempty"` 8 | BatchSize int `json:"batch_size,omitempty"` 9 | LearningRateMultiplier float32 `json:"learning_rate_multiplier,omitempty"` 10 | PromptLossWeight float32 `json:"prompt_loss_weight,omitempty"` 11 | ComputeClassificationMetrics bool `json:"compute_classification_metrics,omitempty"` 12 | ClassificationNClasses int `json:"classification_n_classes,omitempty"` 13 | ClassificationPositiveClass string `json:"classification_positive_class,omitempty"` 14 | ClassificationBetas []float32 `json:"classification_betas,omitempty"` 15 | Suffix string `json:"suffix,omitempty"` 16 | } 17 | 18 | type FineTuneData struct { 19 | ID string `json:"id"` 20 | Object string `json:"object"` 21 | Model string `json:"model"` 22 | CreatedAt int64 `json:"created_at"` 23 | Events []FineTuneEvent `json:"events"` 24 | FineTunedModel interface{} `json:"fine_tuned_model"` // TODO: typing 25 | Hyperparams Hyperparams `json:"hyperparams"` 26 | OrganizationID string `json:"organization_id"` 27 | ResultFiles []FileData `json:"result_files"` 28 | Status string `json:"status"` 29 | ValidationFiles []FileData `json:"validation_files"` 30 | TrainingFiles []FileData `json:"training_files"` 31 | UpdatedAt int64 `json:"updated_at"` 32 | } 33 | 34 | type FineTuneCreateResponse struct { 35 | Events []FineTuneEvent `json:"events"` 36 | FineTuneData `json:",inline"` 37 | } 38 | 39 | type FineTuneEvent struct { 40 | Object string `json:"object"` 41 | CreatedAt int64 `json:"created_at"` 42 | Level string `json:"level"` 43 | Message string `json:"message"` 44 | } 45 | 46 | type Hyperparams struct { 47 | BatchSize int `json:"batch_size"` 48 | LearningRateMultiplier float32 `json:"learning_rate_multiplier"` 49 | NEpochs int `json:"n_epochs"` 50 | PromptLossWeight float32 `json:"prompt_loss_weight"` 51 | } 52 | 53 | type FineTuneListResponse struct { 54 | Object string `json:"object"` 55 | Data []FineTuneData `json:"data"` 56 | } 57 | 58 | type FineTuneRetrieveResponse struct { 59 | Events []FineTuneEvent `json:"events"` 60 | FineTuneData `json:",inline"` 61 | } 62 | 63 | type FineTuneCancelResponse struct { 64 | Events []FineTuneEvent `json:"events"` 65 | FineTuneData `json:",inline"` 66 | } 67 | 68 | type FineTuneListEventsResponse struct { 69 | Object string `json:"object"` 70 | Data []FineTuneEvent `json:"data"` 71 | } 72 | 73 | type FineTuneDeleteModelResponse struct { 74 | ID string `json:"string"` 75 | Object string `json:"object"` 76 | Deleted bool `json:"deleted"` 77 | } 78 | -------------------------------------------------------------------------------- /client.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "context" 7 | "encoding/json" 8 | "fmt" 9 | "io" 10 | "net/http" 11 | "net/url" 12 | "strings" 13 | ) 14 | 15 | const DefaultOpenAIAPIURL = "https://api.openai.com/v1" 16 | 17 | // Client for api.openai.com API endpoints. 18 | type Client struct { 19 | 20 | // APIKey issued by OpenAI console. 21 | // See https://beta.openai.com/account/api-keys 22 | APIKey string 23 | 24 | // BaseURL of API including the version. 25 | // e.g., https://api.openai.com/v1 26 | BaseURL string 27 | 28 | // Organization 29 | Organization string 30 | 31 | // HTTPClient (optional) to proxy HTTP request. 32 | // If nil, *http.DefaultClient will be used. 33 | HTTPClient *http.Client 34 | } 35 | 36 | type callback[T any] func(response T, done bool, err error) 37 | 38 | var ( 39 | StreamPrefixDATA = []byte("data: ") 40 | StreamPrefixERROR = []byte("error: ") 41 | StreamDataDONE = []byte("[DONE]") 42 | ) 43 | 44 | func NewClient(apikey string) *Client { 45 | return &Client{ 46 | APIKey: apikey, 47 | // Organization: org-GXjGDRs5UuJ4CvQ2u9d5uy0k 48 | // BaseURL: DefaultOpenAIAPIURL, 49 | // HTTPClient: http.DefaultClient, 50 | } 51 | } 52 | 53 | func (client *Client) endpoint(p string) (string, error) { 54 | if client.BaseURL == "" { 55 | client.BaseURL = DefaultOpenAIAPIURL 56 | } 57 | u, err := url.Parse(client.BaseURL) 58 | if err != nil { 59 | return "", err 60 | } 61 | u.Path = strings.Join([]string{ 62 | strings.TrimRight(u.Path, "/"), 63 | strings.TrimLeft(p, "/"), 64 | }, "/") 65 | return u.String(), nil 66 | } 67 | 68 | func (client *Client) build(ctx context.Context, method, p string, body interface{}) (req *http.Request, err error) { 69 | endpoint, err := client.endpoint(p) 70 | if err != nil { 71 | return nil, err 72 | } 73 | r, contenttype, err := client.bodyToReader(body) 74 | if err != nil { 75 | return nil, fmt.Errorf("failed to build request buf from given body: %v", err) 76 | } 77 | req, err = http.NewRequest(method, endpoint, r) 78 | if err != nil { 79 | return nil, fmt.Errorf("failed to init request: %v", err) 80 | } 81 | req.Header.Add("Content-Type", contenttype) 82 | req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", client.APIKey)) 83 | if client.Organization != "" { 84 | req.Header.Add("OpenAI-Organization", client.Organization) 85 | } 86 | if ctx != nil { 87 | req = req.WithContext(ctx) 88 | } 89 | return req, nil 90 | } 91 | 92 | func (client *Client) bodyToReader(body interface{}) (io.Reader, string, error) { 93 | var r io.Reader 94 | switch v := body.(type) { 95 | // case io.Reader: 96 | // r = v 97 | case nil: 98 | r = nil 99 | case MultipartFormDataRequestBody: // TODO: Refactor 100 | buf, ct, err := v.ToMultipartFormData() 101 | if err != nil { 102 | return nil, "", err 103 | } 104 | return buf, ct, nil 105 | default: 106 | b, err := json.Marshal(body) 107 | if err != nil { 108 | return nil, "", err 109 | } 110 | r = bytes.NewBuffer(b) 111 | } 112 | return r, "application/json", nil 113 | } 114 | 115 | func execute[T any](client *Client, req *http.Request, response *T, cb callback[T]) error { 116 | if client.HTTPClient == nil { 117 | client.HTTPClient = http.DefaultClient 118 | } 119 | httpres, err := client.HTTPClient.Do(req) 120 | if err != nil { 121 | return err 122 | } 123 | if httpres.StatusCode >= 400 { 124 | defer httpres.Body.Close() 125 | return client.apiError(httpres) 126 | } 127 | if cb != nil { 128 | go listen(httpres, cb) 129 | return nil 130 | } 131 | defer httpres.Body.Close() 132 | if err := json.NewDecoder(httpres.Body).Decode(response); err != nil { 133 | return fmt.Errorf("failed to decode response to %T: %v", response, err) 134 | } 135 | return nil 136 | } 137 | 138 | // https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#event_stream_format 139 | func listen[T any](res *http.Response, cb callback[T]) { 140 | defer res.Body.Close() 141 | scanner := bufio.NewScanner(res.Body) 142 | for scanner.Scan() { 143 | var entry T 144 | b := scanner.Bytes() 145 | switch { 146 | case len(b) == 0: 147 | continue 148 | case bytes.HasPrefix(b, StreamPrefixDATA): 149 | if bytes.HasSuffix(b, StreamDataDONE) { 150 | cb(entry, true, nil) 151 | return 152 | } 153 | if err := json.Unmarshal(b[len(StreamPrefixDATA):], &entry); err != nil { 154 | cb(entry, true, err) 155 | return 156 | } 157 | cb(entry, false, nil) 158 | // TODO: Any error case? 159 | // case bytes.HasPrefix(b, StreamPrefixERROR): 160 | // cb(entry, true, fmt.Errorf(string(b))) 161 | // return 162 | // TODO: Any other case? 163 | // default: 164 | // cb(entry, true, fmt.Errorf(string(b))) 165 | // return 166 | } 167 | } 168 | } 169 | 170 | func call[T any](ctx context.Context, client *Client, method string, p string, body interface{}, resp T, cb callback[T]) (T, error) { 171 | req, err := client.build(ctx, method, p, body) 172 | if err != nil { 173 | return resp, err 174 | } 175 | err = execute(client, req, &resp, cb) 176 | return resp, err 177 | } 178 | -------------------------------------------------------------------------------- /functioncall/all_test.go: -------------------------------------------------------------------------------- 1 | package functioncall 2 | 3 | import ( 4 | "encoding/json" 5 | "reflect" 6 | "testing" 7 | 8 | . "github.com/otiai10/mint" 9 | ) 10 | 11 | func TestFunctions(t *testing.T) { 12 | funcs := Funcs{} 13 | Expect(t, funcs).TypeOf("functioncall.Funcs") 14 | } 15 | 16 | func TestFunctions_MarshalJSON(t *testing.T) { 17 | repeat := func(word string, count int) (r string) { 18 | for i := 0; i < count; i++ { 19 | r += word 20 | } 21 | return r 22 | } 23 | funcs := Funcs{ 24 | "repeat": Func{Value: repeat, Description: "Repeat given string N times", Parameters: Params{ 25 | {Name: "word", Type: "string", Description: "String to be repeated", Required: true}, 26 | {Name: "count", Type: "number", Description: "How many times to repeat", Required: true}, 27 | }}, 28 | } 29 | b, err := funcs.MarshalJSON() 30 | Expect(t, err).ToBe(nil) 31 | 32 | v := []map[string]any{} 33 | err = json.Unmarshal(b, &v) 34 | Expect(t, err).ToBe(nil) 35 | 36 | Expect(t, v).Query("0.name").ToBe("repeat") 37 | Expect(t, v).Query("0.description").ToBe("Repeat given string N times") 38 | Expect(t, v).Query("0.parameters.type").ToBe("object") 39 | Expect(t, v).Query("0.parameters.properties.word.type").ToBe("string") 40 | Expect(t, v).Query("0.parameters.required.1").ToBe("count") 41 | } 42 | 43 | func TestAs(t *testing.T) { 44 | repeat := func(word string, count int) (r string) { 45 | for i := 0; i < count; i++ { 46 | r += word 47 | } 48 | return r 49 | } 50 | funcs := Funcs{ 51 | "repeat": Func{Value: repeat, Description: "Repeat given string N times", Parameters: Params{ 52 | {Name: "word", Type: "string", Description: "String to be repeated", Required: true}, 53 | {Name: "count", Type: "number", Description: "How many times to repeat", Required: true}, 54 | }}, 55 | } 56 | a := As[[]map[string]any](funcs) 57 | Expect(t, a).TypeOf("[]map[string]interface {}") 58 | Expect(t, a).Query("0.name").ToBe("repeat") 59 | Expect(t, a).Query("0.parameters.type").ToBe("object") 60 | } 61 | 62 | func TestParams_MarshalJSON(t *testing.T) { 63 | tests := []struct { 64 | name string 65 | params Params 66 | want []byte 67 | wantErr bool 68 | }{ 69 | { 70 | name: "nested", 71 | params: []Param{ 72 | { 73 | Name: "quality", 74 | Type: "object", 75 | Description: "", 76 | Required: true, 77 | Items: []Param{ 78 | { 79 | Name: "pros", 80 | Type: "array", 81 | Description: "Write 3 points why this text is well written", 82 | Required: true, 83 | Items: []Param{ 84 | {Type: "string"}, 85 | }, 86 | }, 87 | }, 88 | }, 89 | }, 90 | want: []byte(`{"properties":{"quality":{"properties":{"pros":{"description":"Write 3 points why this text is well written","items":{"type":"string"},"type":"array"}},"required":["pros"],"type":"object"}},"required":["quality"],"type":"object"}`), 91 | wantErr: false, 92 | }, 93 | { 94 | name: "nested_example", 95 | params: []Param{ 96 | { 97 | Name: "ingredients", 98 | Type: "array", 99 | Required: true, 100 | Items: []Param{ 101 | { 102 | Type: "object", 103 | Items: []Param{ 104 | { 105 | Name: "name", 106 | Type: "string", 107 | Required: true, 108 | }, 109 | { 110 | Name: "unit", 111 | Type: "string", 112 | // Enum: []any{"grams", "ml", "cups", "pieces", "teaspoons"}, 113 | Required: true, 114 | }, 115 | { 116 | Name: "amount", 117 | Type: "number", 118 | Required: true, 119 | }, 120 | }, 121 | }, 122 | }, 123 | }, 124 | { 125 | Name: "instructions", 126 | Type: "array", 127 | Required: true, 128 | Items: []Param{ 129 | { 130 | Type: "string", 131 | }, 132 | }, 133 | Description: "Steps to prepare the recipe (no numbering)", 134 | }, 135 | { 136 | Name: "time_to_cook", 137 | Type: "number", 138 | Description: "Total time to prepare the recipe in minutes", 139 | Required: true, 140 | }, 141 | }, 142 | want: []byte(`{"properties":{"ingredients":{"items":{"properties":{"amount":{"type":"number"},"name":{"type":"string"},"unit":{"type":"string"}},"required":["name","unit","amount"],"type":"object"},"type":"array"},"instructions":{"description":"Steps to prepare the recipe (no numbering)","items":{"type":"string"},"type":"array"},"time_to_cook":{"type":"number","description":"Total time to prepare the recipe in minutes"}},"required":["ingredients","instructions","time_to_cook"],"type":"object"}`), 143 | wantErr: false, 144 | }, 145 | } 146 | for _, tt := range tests { 147 | t.Run(tt.name, func(t *testing.T) { 148 | got, err := json.Marshal(tt.params) 149 | if (err != nil) != tt.wantErr { 150 | t.Errorf("MarshalJSON() error = %v, wantErr %v", err, tt.wantErr) 151 | return 152 | } 153 | if !reflect.DeepEqual(got, tt.want) { 154 | t.Errorf("MarshalJSON() got = %s, want %s", got, tt.want) 155 | } 156 | }) 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /image.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "io" 7 | "mime/multipart" 8 | ) 9 | 10 | const ( 11 | Size256 string = "256x256" 12 | Size512 string = "512x512" 13 | Size1024 string = "1024x1024" 14 | ) 15 | 16 | type ImageGenerationRequestBody struct { 17 | Prompt string `json:"prompt"` 18 | N int `json:"n,omitempty"` 19 | Size string `json:"size,omitempty"` 20 | ResponseFormat string `json:"response_format,omitempty"` 21 | User string `json:"user,omitempty"` 22 | } 23 | 24 | type ImageResponse struct { 25 | Created int64 `json:"created"` 26 | Data []ImageData `json:"data"` 27 | } 28 | 29 | type ImageData struct { 30 | Base64 string `json:"b64_json"` 31 | URL string `json:"url"` 32 | } 33 | 34 | type ImageGenerationResponse ImageResponse 35 | 36 | type ImageEditRequestBody struct { 37 | // image Required 38 | // The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. 39 | // User MUST close it if it's like ReadCloser. 40 | Image io.Reader 41 | 42 | // n integer Optional Defaults to 1 43 | // The number of images to generate. Must be between 1 and 10. 44 | N int 45 | 46 | // size string Optional Defaults to 1024x1024 47 | // The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024. 48 | Size string 49 | 50 | // response_format string Optional Defaults to url 51 | // The format in which the generated images are returned. Must be one of url or b64_json. 52 | ResponseFormat string 53 | 54 | // user string Optional 55 | // A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 56 | // Learn more: https://beta.openai.com/docs/guides/safety-best-practices/end-user-ids 57 | User string 58 | 59 | // mask string Optional 60 | // An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image should be edited. 61 | // Must be a valid PNG file, less than 4MB, and have the same dimensions as image. 62 | // User MUST close it if it's like ReadCloser. 63 | Mask io.Reader 64 | 65 | // prompt string Required 66 | // A text description of the desired image(s). The maximum length is 1000 characters. 67 | Prompt string 68 | } 69 | 70 | func (body ImageEditRequestBody) ToMultipartFormData() (buf *bytes.Buffer, contenttype string, err error) { 71 | if body.Image == nil { 72 | return nil, "", fmt.Errorf("body.Image must not be nil") 73 | } 74 | buf = bytes.NewBuffer(nil) 75 | w := multipart.NewWriter(buf) 76 | imgw, err := w.CreateFormFile("image", "image.png") 77 | if err != nil { 78 | return nil, "", fmt.Errorf("failed to create FormFile: %v", err) 79 | } 80 | if _, err := io.Copy(imgw, body.Image); err != nil { 81 | return nil, "", fmt.Errorf("failed to copy io.Reader to buffer: %v", err) 82 | } 83 | 84 | if body.Mask != nil { 85 | maskw, err := w.CreateFormFile("mask", "mask.png") 86 | if err != nil { 87 | return nil, "", err 88 | } 89 | if _, err := io.Copy(maskw, body.Mask); err != nil { 90 | return nil, "", err 91 | } 92 | } 93 | 94 | // prompt is required for image edit. 95 | w.WriteField("prompt", body.Prompt) 96 | 97 | if body.N > 1 { 98 | w.WriteField("n", fmt.Sprintf("%d", body.N)) 99 | } 100 | if body.Size != "" { 101 | w.WriteField("size", body.Size) 102 | } 103 | if body.ResponseFormat != "" { 104 | w.WriteField("response_format", body.ResponseFormat) 105 | } 106 | if body.User != "" { 107 | w.WriteField("user", body.User) 108 | } 109 | 110 | if err = w.Close(); err != nil { 111 | return nil, "", err 112 | } 113 | 114 | return buf, w.FormDataContentType(), err 115 | } 116 | 117 | type ImageEditResponse ImageResponse 118 | 119 | type ImageVariationRequestBody struct { 120 | // image Required 121 | // The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. 122 | // User MUST close it if it's like ReadCloser. 123 | Image io.Reader 124 | 125 | // n integer Optional Defaults to 1 126 | // The number of images to generate. Must be between 1 and 10. 127 | N int 128 | 129 | // size string Optional Defaults to 1024x1024 130 | // The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024. 131 | Size string 132 | 133 | // response_format string Optional Defaults to url 134 | // The format in which the generated images are returned. Must be one of url or b64_json. 135 | ResponseFormat string 136 | 137 | // user string Optional 138 | // A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 139 | // Learn more: https://beta.openai.com/docs/guides/safety-best-practices/end-user-ids 140 | User string 141 | } 142 | 143 | func (body ImageVariationRequestBody) ToMultipartFormData() (buf *bytes.Buffer, contenttype string, err error) { 144 | if body.Image == nil { 145 | return nil, "", fmt.Errorf("body.Image must not be nil") 146 | } 147 | buf = bytes.NewBuffer(nil) 148 | w := multipart.NewWriter(buf) 149 | defer w.Close() 150 | imgw, err := w.CreateFormFile("image", "image.png") 151 | if err != nil { 152 | return nil, "", err 153 | } 154 | if _, err := io.Copy(imgw, body.Image); err != nil { 155 | return nil, "", err 156 | } 157 | if body.N > 1 { 158 | w.WriteField("n", fmt.Sprintf("%d", body.N)) 159 | } 160 | if body.Size != "" { 161 | w.WriteField("size", body.Size) 162 | } 163 | if body.ResponseFormat != "" { 164 | w.WriteField("response_format", body.ResponseFormat) 165 | } 166 | if body.User != "" { 167 | w.WriteField("user", body.User) 168 | } 169 | return buf, w.FormDataContentType(), err 170 | } 171 | 172 | type ImageVariationResponse ImageResponse 173 | -------------------------------------------------------------------------------- /completion.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | type CompletionRequestBody struct { 4 | 5 | // Model: ID of the model to use. 6 | // You can use the List models API to see all of your available models, or see our Model overview for descriptions of them. 7 | // See https://beta.openai.com/docs/api-reference/completions/create#completions/create-model 8 | Model string `json:"model"` 9 | 10 | // Prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. 11 | // Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. 12 | // See https://beta.openai.com/docs/api-reference/completions/create#completions/create-prompt 13 | Prompt []string `json:"prompt"` 14 | 15 | // MaxTokens: The maximum number of tokens to generate in the completion. 16 | // The token count of your prompt plus max_tokens cannot exceed the model's context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096). 17 | // See https://beta.openai.com/docs/api-reference/completions/create#completions/create-max_tokens 18 | MaxTokens int `json:"max_tokens,omitempty"` 19 | 20 | // Temperature: What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. 21 | // We generally recommend altering this or top_p but not both. 22 | // See https://beta.openai.com/docs/api-reference/completions/create#completions/create-temperature 23 | Temperature float32 `json:"temperature,omitempty"` 24 | 25 | // Suffix: The suffix that comes after a completion of inserted text. 26 | // See https://beta.openai.com/docs/api-reference/completions/create#completions/create-suffix 27 | Suffix string `json:"suffix,omitempty"` 28 | 29 | // TopP: An alternative to sampling with temperature, called nucleus sampling, 30 | // where the model considers the results of the tokens with top_p probability mass. 31 | // So 0.1 means only the tokens comprising the top 10% probability mass are considered. 32 | // We generally recommend altering this or temperature but not both. 33 | // See https://beta.openai.com/docs/api-reference/completions/create#completions/create-top_p 34 | TopP float32 `json:"top_p,omitempty"` 35 | 36 | // N: How many completions to generate for each prompt. 37 | // Note: Because this parameter generates many completions, it can quickly consume your token quota. 38 | // Use carefully and ensure that you have reasonable settings for max_tokens and stop. 39 | // See https://beta.openai.com/docs/api-reference/completions/create#completions/create-n 40 | N int `json:"n,omitempty"` 41 | 42 | // Stream: Whether to stream back partial progress. 43 | // If set, tokens will be sent as data-only server-sent events as they become available, 44 | // with the stream terminated by a data: [DONE] message. 45 | // See https://beta.openai.com/docs/api-reference/completions/create#completions/create-stream 46 | Stream bool `json:"stream,omitempty"` 47 | 48 | // LogProbs: Include the log probabilities on the logprobs most likely tokens, as well the chosen tokens. 49 | // For example, if logprobs is 5, the API will return a list of the 5 most likely tokens. The API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response. 50 | // The maximum value for logprobs is 5. If you need more than this, please contact us through our Help center and describe your use case. 51 | // See https://beta.openai.com/docs/api-reference/completions/create#completions/create-logprobs 52 | LogProbs int `json:"logprobs,omitempty"` 53 | 54 | // Echo: Echo back the prompt in addition to the completion. 55 | // See https://beta.openai.com/docs/api-reference/completions/create#completions/create-echo 56 | Echo bool `json:"echo,omitempty"` 57 | 58 | // Stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. 59 | // See https://beta.openai.com/docs/api-reference/completions/create#completions/create-stop 60 | Stop []string `json:"stop,omitempty"` 61 | 62 | // PresencePenalty: Number between -2.0 and 2.0. 63 | // Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. 64 | // See more information about frequency and presence penalties. 65 | // See https://beta.openai.com/docs/api-reference/completions/create#completions/create-presence_penalty 66 | PresencePenalty float32 `json:"presence_penalty,omitempty"` 67 | 68 | // FrequencyPenalty: Number between -2.0 and 2.0. 69 | // Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. 70 | // See more information about frequency and presence penalties. 71 | // See https://beta.openai.com/docs/api-reference/completions/create#completions/create-frequency_penalty 72 | FrequencyPenalty float32 `json:"frequency_penalty,omitempty"` 73 | 74 | // BestOf: Generates best_of completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. 75 | // When used with n, best_of controls the number of candidate completions and n specifies how many to return – best_of must be greater than n. 76 | // Note: Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for max_tokens and stop. 77 | // See https://beta.openai.com/docs/api-reference/completions/create#completions/create-best_of 78 | BestOf int `json:"best_of,omitempty"` 79 | 80 | // LogitBias: Modify the likelihood of specified tokens appearing in the completion. 81 | // Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this tokenizer tool (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. 82 | // As an example, you can pass {"50256": -100} to prevent the <|endoftext|> token from being generated. 83 | // See https://beta.openai.com/docs/api-reference/completions/create#completions/create-logit_bias 84 | LogitBias map[string]int `json:"logit_bias,omitempty"` 85 | 86 | // User: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more. 87 | // See https://beta.openai.com/docs/api-reference/completions/create#completions/create-user 88 | User string `json:"user,omitempty"` 89 | } 90 | 91 | type CompletionResponse struct { 92 | ID string `json:"id"` 93 | Object ObjectType `json:"object"` 94 | Created int64 `json:"created"` 95 | Model string `json:"model"` 96 | Choices []CompletionChoice `json:"choices"` 97 | Usage Usage 98 | } 99 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # openaigo 2 | 3 | [![Go](https://github.com/otiai10/openaigo/actions/workflows/go.yml/badge.svg)](https://github.com/otiai10/openaigo/actions/workflows/go.yml) 4 | [![CodeQL](https://github.com/otiai10/openaigo/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/otiai10/openaigo/actions/workflows/codeql-analysis.yml) 5 | [![App Test over API](https://github.com/otiai10/openaigo/actions/workflows/api.yml/badge.svg)](https://github.com/otiai10/openaigo/actions/workflows/api.yml) 6 | [![License](https://img.shields.io/github/license/otiai10/openaigo)](https://github.com/otiai10/openaigo/blob/main/LICENSE) 7 | [![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fotiai10%2Fopenaigo.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Fotiai10%2Fopenaigo?ref=badge_shield) 8 |
[![Maintainability](https://api.codeclimate.com/v1/badges/20c434b47940ce8ed511/maintainability)](https://codeclimate.com/github/otiai10/openaigo/maintainability) 9 | [![Go Report Card](https://goreportcard.com/badge/github.com/otiai10/openaigo)](https://goreportcard.com/report/github.com/otiai10/openaigo) 10 | [![codecov](https://codecov.io/github/otiai10/openaigo/branch/main/graph/badge.svg?token=mfAYgn6Uto)](https://codecov.io/github/otiai10/openaigo) 11 |
[![Reference](https://img.shields.io/github/v/tag/otiai10/openaigo?sort=semver)](https://pkg.go.dev/github.com/otiai10/openaigo) 12 | [![GoDoc](https://pkg.go.dev/badge/github.com/otiai10/openaigo)](https://pkg.go.dev/github.com/otiai10/openaigo) 13 | 14 | Yet another API client for `api.openai.com`. 15 | 16 | This library is community-maintained, NOT officially supported by OpenAI. 17 | 18 | # Usage Example 19 | 20 | ```go 21 | package main 22 | 23 | import ( 24 | "context" 25 | "fmt" 26 | "os" 27 | 28 | "github.com/otiai10/openaigo" 29 | ) 30 | 31 | func main() { 32 | client := openaigo.NewClient(os.Getenv("OPENAI_API_KEY")) 33 | request := openaigo.ChatRequest{ 34 | Model: "gpt-4o", 35 | Messages: []openaigo.Message{ 36 | {Role: "user", Content: "Hello!"}, 37 | }, 38 | } 39 | ctx := context.Background() 40 | response, err := client.Chat(ctx, request) 41 | fmt.Println(response, err) 42 | } 43 | 44 | ``` 45 | 46 | if you just want to try, hit commands below. 47 | 48 | ```shell 49 | git clone https://github.com/otiai10/openaigo.git 50 | cd openaigo 51 | OPENAI_API_KEY=YourAPIKey go run ./testapp/main.go 52 | ``` 53 | 54 | See [test app](https://github.com/otiai10/openaigo/blob/main/testapp/main.go) as a working example. 55 | 56 | # API Keys? 57 | 58 | Visit https://beta.openai.com/account/api-keys and you can create your own API key to get started [for free](https://openai.com/api/pricing/). 59 | 60 | # Endpoint Support 61 | 62 | - Models 63 | - [x] [List models](https://beta.openai.com/docs/api-reference/models/list) 64 | - [x] [Retrieve model](https://beta.openai.com/docs/api-reference/models/retrieve) 65 | - Text Completions 66 | - [x] [Create completion](https://beta.openai.com/docs/api-reference/completions/create) 67 | - **Chat Completions** 68 | - [x] [Create Chat Completions](https://platform.openai.com/docs/api-reference/chat/create) 69 | - [x] [with function_call](https://openai.com/blog/function-calling-and-other-api-updates) <- New 70 | - Edits 71 | - [x] [Create edits](https://beta.openai.com/docs/api-reference/edits/create) 72 | - Images 73 | - [x] [Create image (beta)](https://beta.openai.com/docs/api-reference/images/create) 74 | - [x] [Create image edit (beta)](https://beta.openai.com/docs/api-reference/images/create-edit) 75 | - [x] [Create image variation (beta)](https://beta.openai.com/docs/api-reference/images/create-variation) 76 | - Embeddings 77 | - [x] [Create embeddings](https://beta.openai.com/docs/api-reference/embeddings/create) 78 | - Files 79 | - [x] [List files](https://beta.openai.com/docs/api-reference/files/list) 80 | - [x] [Upload file](https://beta.openai.com/docs/api-reference/files/upload) 81 | - [x] [Delete file](https://beta.openai.com/docs/api-reference/files/delete) 82 | - [x] [Retrieve file](https://beta.openai.com/docs/api-reference/files/retrieve) 83 | - [x] [Retrieve file content](https://beta.openai.com/docs/api-reference/files/retrieve-content) 84 | - Fine-tunes 85 | - [x] [Create fine-tune](https://beta.openai.com/docs/api-reference/fine-tunes/create) 86 | - [x] [List fine-tunes](https://beta.openai.com/docs/api-reference/fine-tunes/list) 87 | - [x] [Retrieve fine-tune](https://beta.openai.com/docs/api-reference/fine-tunes/retrieve) 88 | - [x] [Cancel fine-tune](https://beta.openai.com/docs/api-reference/fine-tunes/cancel) 89 | - [x] [List fine-tune events](https://beta.openai.com/docs/api-reference/fine-tunes/events) 90 | - [x] [Delete fine-tune model](https://beta.openai.com/docs/api-reference/fine-tunes/delete-model) 91 | - Moderation 92 | - [x] [Create moderation](https://beta.openai.com/docs/api-reference/moderations/create) 93 | - ~~Engines~~ *(deprecated)* 94 | - ~~[List engines](https://beta.openai.com/docs/api-reference/engines/list)~~ 95 | - ~~[Retrieve engine](https://beta.openai.com/docs/api-reference/engines/retrieve)~~ 96 | 97 | # Need `function_call`? 98 | 99 | ```go 100 | request := openaigo.ChatRequest{ 101 | Messages: []openaigo.Message{ 102 | {Role: "user", Content: "How's the weather today in Tokyo?"}, 103 | }, 104 | Functions: []openaigo.Function{ 105 | { 106 | Name: "get_weather", 107 | Parameters: openaigo.Parameters{ 108 | Type: "object", 109 | Properties: map[string]map[string]any{ 110 | "location": {"type": "string"}, 111 | "date": {"type": "string", "description": "ISO 8601 date string"}, 112 | }, 113 | Required: []string{"location"}, 114 | }, 115 | } 116 | }, 117 | } 118 | ``` 119 | 120 | if you want **shorthand**, use [`functioncall`](https://pkg.go.dev/github.com/otiai10/openaigo@v1.4.0/functioncall). 121 | 122 | ```go 123 | import fc "github.com/otiai10/openaigo/functioncall" 124 | 125 | request.Functions = fc.Funcs{ 126 | "get_weather": {GetWeather, "Get weather of the location", fc.Params{ 127 | {"location", "string", "location of the weather", true}, 128 | {"date", "string", "ISO 8601 date string", true}, 129 | }}, 130 | } 131 | ``` 132 | 133 | See [test app](https://github.com/otiai10/openaigo/blob/main/testapp/main.go) as a working example. 134 | 135 | # Need `stream`? 136 | 137 | ```go 138 | client := openaigo.NewClient(OPENAI_API_KEY) 139 | request := openaigo.ChatRequest{ 140 | Stream: true, 141 | StreamCallback: func(res ChatCompletionResponse, done bool, err error) { 142 | // Do what you want! 143 | // You might need chan handling here. 144 | // See test app how you can do it. 145 | // https://github.com/otiai10/openaigo/search?q=chat_completion_stream 146 | }, 147 | } 148 | ``` 149 | 150 | # Need Proxy? 151 | 152 | ```go 153 | client := openaigo.NewClient(OPENAI_API_KEY) 154 | // You can set whatever you want 155 | transport := &http.Transport{ Proxy: http.ProxyFromEnvironment } 156 | client.HTTPClient = &http.Client{ Transport: transport } 157 | // Done! 158 | ``` 159 | 160 | # Issues 161 | 162 | Report any issue here or any feedback is welcomed. 163 | 164 | * https://github.com/otiai10/openaigo/issues 165 | -------------------------------------------------------------------------------- /chatgpt/chatgpt.go: -------------------------------------------------------------------------------- 1 | package chatgpt 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/otiai10/openaigo" 8 | "github.com/otiai10/openaigo/functioncall" 9 | ) 10 | 11 | const DefaultMaxAutoFunctionCall = 8 12 | 13 | type Client struct { 14 | openaigo.Client `json:"-"` 15 | 16 | // Model: ID of the model to use. 17 | // Currently, only gpt-3.5-turbo and gpt-3.5-turbo-0301 are supported. 18 | Model string `json:"model"` 19 | 20 | // Temperature: What sampling temperature to use, between 0 and 2. 21 | // Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. 22 | // We generally recommend altering this or top_p but not both. 23 | // Defaults to 1. 24 | Temperature float32 `json:"temperature,omitempty"` 25 | 26 | // TopP: An alternative to sampling with temperature, called nucleus sampling, 27 | // where the model considers the results of the tokens with top_p probability mass. 28 | // So 0.1 means only the tokens comprising the top 10% probability mass are considered. 29 | // We generally recommend altering this or temperature but not both. 30 | // Defaults to 1. 31 | TopP float32 `json:"top_p,omitempty"` 32 | 33 | // N: How many chat completion choices to generate for each input message. 34 | // Defaults to 1. 35 | N int `json:"n,omitempty"` 36 | 37 | // TODO: 38 | // Stream: If set, partial message deltas will be sent, like in ChatGPT. 39 | // Tokens will be sent as data-only server-sent events as they become available, 40 | // with the stream terminated by a data: [DONE] message. 41 | // Stream bool `json:"stream,omitempty"` 42 | 43 | // TODO: 44 | // StreamCallback is a callback funciton to handle stream response. 45 | // If provided, this library automatically set `Stream` `true`. 46 | // This field is added by github.com/otiai10/openaigo only to handle Stream. 47 | // Thus, it is omitted when the client excute HTTP request. 48 | // StreamCallback func(res ChatCompletionResponse, done bool, err error) `json:"-"` 49 | 50 | // Stop: Up to 4 sequences where the API will stop generating further tokens. 51 | // Defaults to null. 52 | Stop []string `json:"stop,omitempty"` 53 | 54 | // MaxTokens: The maximum number of tokens allowed for the generated answer. 55 | // By default, the number of tokens the model can return will be (4096 - prompt tokens). 56 | MaxTokens int `json:"max_tokens,omitempty"` 57 | 58 | // PresencePenalty: Number between -2.0 and 2.0. 59 | // Positive values penalize new tokens based on whether they appear in the text so far, 60 | // increasing the model's likelihood to talk about new topics. 61 | // See more information about frequency and presence penalties. 62 | // https://platform.openai.com/docs/api-reference/parameter-details 63 | PresencePenalty float32 `json:"presence_penalty,omitempty"` 64 | 65 | // FrequencyPenalty: Number between -2.0 and 2.0. 66 | // Positive values penalize new tokens based on their existing frequency in the text so far, 67 | // decreasing the model's likelihood to repeat the same line verbatim. 68 | // See more information about frequency and presence penalties. 69 | // https://platform.openai.com/docs/api-reference/parameter-details 70 | FrequencyPenalty float32 `json:"frequency_penalty,omitempty"` 71 | 72 | // LogitBias: Modify the likelihood of specified tokens appearing in the completion. 73 | // Accepts a json object that maps tokens (specified by their token ID in the tokenizer) 74 | // to an associated bias value from -100 to 100. 75 | // Mathematically, the bias is added to the logits generated by the model prior to sampling. 76 | // The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; 77 | // values like -100 or 100 should result in a ban or exclusive selection of the relevant token. 78 | LogitBias map[string]int `json:"logit_bias,omitempty"` 79 | 80 | // User: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more. 81 | // https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids 82 | User string `json:"user,omitempty"` 83 | 84 | // Functions: A list of functions which GPT is allowed to request to call. 85 | // Functions []Function `json:"functions,omitempty"` 86 | Functions functioncall.Funcs `json:"functions,omitempty"` 87 | 88 | // FunctionCall: You ain't need it. Default is "auto". 89 | FunctionCall string `json:"function_call,omitempty"` 90 | 91 | // Max number of calling function automatically 92 | MaxFunctionCallHandling int `json:"-"` 93 | } 94 | 95 | type Message struct { 96 | openaigo.Message 97 | autocalled bool 98 | } 99 | 100 | func New(apikey, model string) *Client { 101 | return &Client{ 102 | Client: openaigo.Client{ 103 | APIKey: apikey, 104 | }, 105 | Model: model, 106 | MaxFunctionCallHandling: DefaultMaxAutoFunctionCall, 107 | } 108 | } 109 | 110 | func (c *Client) Chat(ctx context.Context, conv []Message) ([]Message, error) { 111 | // Create messages from conv 112 | messages := make([]openaigo.Message, len(conv)) 113 | for i, m := range conv { 114 | messages[i] = openaigo.Message(m.Message) 115 | } 116 | // Create request 117 | req := openaigo.ChatRequest{ 118 | Model: c.Model, 119 | Messages: messages, 120 | Functions: functioncall.Funcs(c.Functions), 121 | // TODO: more options from from *Client 122 | } 123 | // Call API 124 | res, err := c.Client.Chat(ctx, req) 125 | if err != nil { 126 | return conv, err 127 | } 128 | conv = append(conv, Message{ 129 | Message: res.Choices[0].Message, 130 | }) 131 | 132 | if res.Choices[0].Message.FunctionCall != nil { 133 | if c.shouldCallFunction(conv) { 134 | call := res.Choices[0].Message.FunctionCall 135 | m := Func(call.Name(), c.Functions.Call(call)) 136 | m.autocalled = true 137 | conv, err = c.Chat(ctx, append(conv, m)) 138 | } 139 | } 140 | 141 | // Now clean up the auto-called flags 142 | // so that the caller can reuse this slice to restart chat. 143 | for i := range conv { 144 | conv[i].autocalled = false 145 | } 146 | 147 | return conv, err 148 | } 149 | 150 | func (c *Client) shouldCallFunction(conv []Message) bool { 151 | // Always allow if negative 152 | if c.MaxFunctionCallHandling < 0 { 153 | return true 154 | } 155 | cnt := 0 156 | for _, m := range conv { 157 | if m.autocalled { 158 | cnt++ 159 | } 160 | } 161 | return cnt < c.MaxFunctionCallHandling 162 | } 163 | 164 | func User(message string) Message { 165 | return Message{ 166 | Message: openaigo.Message{ 167 | Role: "user", 168 | Content: message, 169 | }, 170 | } 171 | } 172 | 173 | func Func(name string, data interface{}) Message { 174 | return Message{ 175 | Message: openaigo.Message{ 176 | Role: "function", 177 | Name: name, 178 | Content: fmt.Sprintf("%+v\n", data), 179 | }, 180 | } 181 | } 182 | 183 | func System(message string) Message { 184 | return Message{ 185 | Message: openaigo.Message{ 186 | Role: "system", 187 | Content: message, 188 | }, 189 | } 190 | } 191 | 192 | func Assistant(message string) Message { 193 | return Message{ 194 | Message: openaigo.Message{ 195 | Role: "assistant", 196 | Content: message, 197 | }, 198 | } 199 | } 200 | -------------------------------------------------------------------------------- /testapp/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "fmt" 7 | "os" 8 | "strings" 9 | "time" 10 | 11 | "github.com/otiai10/openaigo" 12 | fc "github.com/otiai10/openaigo/functioncall" 13 | ) 14 | 15 | type Scenario struct { 16 | Name string 17 | Run func() (any, error) 18 | } 19 | 20 | const ( 21 | SKIP = "\033[0;33m====> SKIP\033[0m\n\n" 22 | ) 23 | 24 | func GetWeather(location string, date float64) (string, error) { 25 | return "sunny", nil 26 | } 27 | 28 | func GetDate() int { 29 | now := time.Now() 30 | return now.Year()*10000 + int(now.Month())*100 + now.Day() 31 | } 32 | 33 | var ( 34 | OPENAI_API_KEY string 35 | 36 | scenarios = []Scenario{ 37 | { 38 | Name: "completion", 39 | Run: func() (any, error) { 40 | client := openaigo.NewClient(OPENAI_API_KEY) 41 | request := openaigo.ChatCompletionRequestBody{ 42 | Model: openaigo.GPT4o, 43 | Messages: []openaigo.Message{ 44 | {Role: "user", Content: "What is the capital of Japan?"}, 45 | }, 46 | } 47 | return client.ChatCompletion(nil, request) 48 | }, 49 | }, 50 | { 51 | Name: "image_edit", 52 | Run: func() (any, error) { 53 | client := openaigo.NewClient(OPENAI_API_KEY) 54 | f, err := os.Open("./testdata/baby-sea-otter.png") 55 | if err != nil { 56 | return nil, err 57 | } 58 | defer f.Close() 59 | request := openaigo.ImageEditRequestBody{ 60 | Image: f, 61 | Prompt: "A cute baby sea otter with big cheese", 62 | Size: openaigo.Size256, 63 | } 64 | return client.EditImage(nil, request) 65 | }, 66 | }, 67 | { 68 | Name: "image_variation", 69 | Run: func() (any, error) { 70 | client := openaigo.NewClient(OPENAI_API_KEY) 71 | f, err := os.Open("./testdata/baby-sea-otter.png") 72 | if err != nil { 73 | return nil, err 74 | } 75 | defer f.Close() 76 | request := openaigo.ImageVariationRequestBody{ 77 | Image: f, 78 | Size: openaigo.Size256, 79 | } 80 | return client.CreateImageVariation(nil, request) 81 | 82 | }, 83 | }, 84 | { 85 | Name: "chat_completion", 86 | Run: func() (any, error) { 87 | client := openaigo.NewClient(OPENAI_API_KEY) 88 | request := openaigo.ChatRequest{ 89 | Model: openaigo.GPT4o, 90 | Messages: []openaigo.Message{ 91 | {Role: "user", Content: "Hello!"}, 92 | }, 93 | } 94 | return client.Chat(nil, request) 95 | }, 96 | }, 97 | { 98 | // https://platform.openai.com/docs/models/gpt-4 99 | Name: "[SKIP] chat_completion_GPT4", 100 | Run: func() (any, error) { 101 | client := openaigo.NewClient(OPENAI_API_KEY) 102 | request := openaigo.ChatRequest{ 103 | Model: openaigo.GPT4o, 104 | Messages: []openaigo.Message{ 105 | {Role: "user", Content: "Who are you?"}, 106 | }, 107 | } 108 | return client.Chat(nil, request) 109 | }, 110 | }, 111 | { 112 | Name: "chat_completion_stream", 113 | Run: func() (any, error) { 114 | client := openaigo.NewClient(OPENAI_API_KEY) 115 | data := make(chan openaigo.ChatCompletionResponse) 116 | done := make(chan error) 117 | defer close(data) 118 | defer close(done) 119 | calback := func(r openaigo.ChatCompletionResponse, d bool, e error) { 120 | if d { 121 | done <- e 122 | } else { 123 | data <- r 124 | } 125 | } 126 | request := openaigo.ChatCompletionRequestBody{ 127 | Model: openaigo.GPT4o, 128 | StreamCallback: calback, 129 | Messages: []openaigo.Message{ 130 | { 131 | Role: "user", 132 | Content: fmt.Sprintf("What are the historical events happend on %s", time.Now().Format("01/02"))}, 133 | }, 134 | } 135 | res, err := client.ChatCompletion(context.Background(), request) 136 | if err != nil { 137 | return res, err 138 | } 139 | for { 140 | select { 141 | case payload := <-data: 142 | fmt.Print(payload.Choices[0].Delta.Content) 143 | case err = <-done: 144 | fmt.Print("\n") 145 | return res, err 146 | } 147 | } 148 | }, 149 | }, 150 | 151 | // Test case using "function_call" 152 | { 153 | Name: "function_call", 154 | Run: func() (any, error) { 155 | conversation := []openaigo.Message{ 156 | {Role: "user", Content: "Should I bring an umbrella tomorrow? I'm living around Tokyo."}, 157 | } 158 | funcs := fc.Funcs{ 159 | "GetDate": fc.Func{GetDate, "A function to get date today", fc.Params{}}, 160 | "GetWeather": fc.Func{GetWeather, "A function to get weather information", fc.Params{ 161 | {"location", "string", "location of the wather", true, nil}, 162 | {"date", "integer", "date MMDD as number", true, nil}, 163 | }}, 164 | } 165 | client := openaigo.NewClient(OPENAI_API_KEY) 166 | request := openaigo.ChatRequest{ 167 | Model: openaigo.GPT4o_20240513, 168 | Messages: conversation, 169 | Functions: funcs, 170 | } 171 | res_1, err := client.Chat(nil, request) 172 | if err != nil { 173 | return nil, err 174 | } 175 | conversation = append(conversation, res_1.Choices[0].Message) 176 | if res_1.Choices[0].Message.FunctionCall != nil { 177 | fmt.Printf("%+v\n", res_1.Choices[0].Message.FunctionCall) 178 | conversation = append(conversation, openaigo.Message{ 179 | Role: "function", 180 | Name: res_1.Choices[0].Message.FunctionCall.Name(), 181 | Content: funcs.Call(res_1.Choices[0].Message.FunctionCall), 182 | }) 183 | } 184 | request.Messages = conversation 185 | res_2, err := client.Chat(nil, request) 186 | if err != nil { 187 | return nil, err 188 | } 189 | conversation = append(conversation, res_2.Choices[0].Message) 190 | if res_2.Choices[0].Message.FunctionCall != nil { 191 | fmt.Printf("%+v\n", res_2.Choices[0].Message.FunctionCall) 192 | conversation = append(conversation, openaigo.Message{ 193 | Role: "function", 194 | Name: res_2.Choices[0].Message.FunctionCall.Name(), 195 | Content: funcs.Call(res_2.Choices[0].Message.FunctionCall), 196 | }) 197 | } 198 | request.Messages = conversation 199 | res_3, err := client.Chat(nil, request) 200 | return res_3, err 201 | }, 202 | }, 203 | } 204 | 205 | list bool 206 | ) 207 | 208 | func init() { 209 | flag.BoolVar(&list, "list", false, "List up all names of scenario") 210 | flag.Parse() 211 | OPENAI_API_KEY = os.Getenv("OPENAI_API_KEY") 212 | } 213 | 214 | func main() { 215 | 216 | if list { 217 | for i, scenario := range scenarios { 218 | fmt.Printf("% 2d %s\n", i, scenario.Name) 219 | } 220 | return 221 | } 222 | match := flag.Arg(0) 223 | total := 0 224 | var dur time.Duration = 0 225 | errors := []error{} 226 | for i, scenario := range scenarios { 227 | fmt.Printf("\033[1;34m[%03d] %s\033[0m\n", i+1, scenario.Name) 228 | if strings.HasPrefix(scenario.Name, "[SKIP]") { 229 | fmt.Print(SKIP) 230 | continue 231 | } 232 | if match != "" { 233 | if !strings.Contains(scenario.Name, match) && !strings.Contains(fmt.Sprintf("%d", i), match) { 234 | fmt.Print(SKIP) 235 | continue 236 | } 237 | } 238 | begin := time.Now() 239 | res, err := scenario.Run() 240 | elapsed := time.Since(begin) 241 | if err != nil { 242 | fmt.Printf("\033[31mError:\033[0m %+v\n", err) 243 | if e, ok := err.(openaigo.APIError); ok { 244 | fmt.Println("++++++++++++++++++++++") 245 | // fmt.Println("StatusCode:", e.StatusCode) 246 | fmt.Println("Status: ", e.Status) 247 | fmt.Println("Type: ", e.Type) 248 | fmt.Println("Message: ", e.Message) 249 | fmt.Println("Code: ", e.Code) 250 | fmt.Println("Param: ", e.Param) 251 | fmt.Println("++++++++++++++++++++++") 252 | } 253 | errors = append(errors, err) 254 | fmt.Print("Time: ") 255 | } else { 256 | fmt.Printf("%+v\n\033[32mTime:\033[0m ", res) 257 | } 258 | fmt.Printf("%v\n\n", elapsed) 259 | dur += elapsed 260 | total++ 261 | } 262 | fmt.Println("===============================================") 263 | fmt.Printf("Total %d scenario executed in %v.\n", total, dur) 264 | if len(errors) > 0 { 265 | os.Exit(1) 266 | } 267 | } 268 | -------------------------------------------------------------------------------- /chat.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | import "encoding/json" 4 | 5 | // ChatCompletionRequestBody: 6 | // https://platform.openai.com/docs/guides/chat/chat-completions-beta 7 | // https://platform.openai.com/docs/api-reference/chat 8 | type ChatCompletionRequestBody struct { 9 | 10 | // Model: ID of the model to use. 11 | // Currently, only gpt-3.5-turbo and gpt-3.5-turbo-0301 are supported. 12 | Model string `json:"model"` 13 | 14 | // Messages: The messages to generate chat completions for, in the chat format. 15 | // https://platform.openai.com/docs/guides/chat/introduction 16 | // Including the conversation history helps when user instructions refer to prior messages. 17 | // In the example above, the user’s final question of “Where was it played?” only makes sense in the context of the prior messages about the World Series of 2020. 18 | // Because the models have no memory of past requests, all relevant information must be supplied via the conversation. 19 | // If a conversation cannot fit within the model’s token limit, it will need to be shortened in some way. 20 | Messages []Message `json:"messages"` 21 | 22 | // Temperature: What sampling temperature to use, between 0 and 2. 23 | // Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. 24 | // We generally recommend altering this or top_p but not both. 25 | // Defaults to 1. 26 | Temperature float32 `json:"temperature,omitempty"` 27 | 28 | // TopP: An alternative to sampling with temperature, called nucleus sampling, 29 | // where the model considers the results of the tokens with top_p probability mass. 30 | // So 0.1 means only the tokens comprising the top 10% probability mass are considered. 31 | // We generally recommend altering this or temperature but not both. 32 | // Defaults to 1. 33 | TopP float32 `json:"top_p,omitempty"` 34 | 35 | // N: How many chat completion choices to generate for each input message. 36 | // Defaults to 1. 37 | N int `json:"n,omitempty"` 38 | 39 | // Stream: If set, partial message deltas will be sent, like in ChatGPT. 40 | // Tokens will be sent as data-only server-sent events as they become available, 41 | // with the stream terminated by a data: [DONE] message. 42 | Stream bool `json:"stream,omitempty"` 43 | 44 | // StreamCallback is a callback funciton to handle stream response. 45 | // If provided, this library automatically set `Stream` `true`. 46 | // This field is added by github.com/otiai10/openaigo only to handle Stream. 47 | // Thus, it is omitted when the client excute HTTP request. 48 | StreamCallback func(res ChatCompletionResponse, done bool, err error) `json:"-"` 49 | 50 | // Stop: Up to 4 sequences where the API will stop generating further tokens. 51 | // Defaults to null. 52 | Stop []string `json:"stop,omitempty"` 53 | 54 | // MaxTokens: The maximum number of tokens allowed for the generated answer. 55 | // By default, the number of tokens the model can return will be (4096 - prompt tokens). 56 | MaxTokens int `json:"max_tokens,omitempty"` 57 | 58 | // PresencePenalty: Number between -2.0 and 2.0. 59 | // Positive values penalize new tokens based on whether they appear in the text so far, 60 | // increasing the model's likelihood to talk about new topics. 61 | // See more information about frequency and presence penalties. 62 | // https://platform.openai.com/docs/api-reference/parameter-details 63 | PresencePenalty float32 `json:"presence_penalty,omitempty"` 64 | 65 | // FrequencyPenalty: Number between -2.0 and 2.0. 66 | // Positive values penalize new tokens based on their existing frequency in the text so far, 67 | // decreasing the model's likelihood to repeat the same line verbatim. 68 | // See more information about frequency and presence penalties. 69 | // https://platform.openai.com/docs/api-reference/parameter-details 70 | FrequencyPenalty float32 `json:"frequency_penalty,omitempty"` 71 | 72 | // LogitBias: Modify the likelihood of specified tokens appearing in the completion. 73 | // Accepts a json object that maps tokens (specified by their token ID in the tokenizer) 74 | // to an associated bias value from -100 to 100. 75 | // Mathematically, the bias is added to the logits generated by the model prior to sampling. 76 | // The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; 77 | // values like -100 or 100 should result in a ban or exclusive selection of the relevant token. 78 | LogitBias map[string]int `json:"logit_bias,omitempty"` 79 | 80 | // User: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more. 81 | // https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids 82 | User string `json:"user,omitempty"` 83 | 84 | // Functions: A list of functions which GPT is allowed to request to call. 85 | // Functions []Function `json:"functions,omitempty"` 86 | Functions json.Marshaler `json:"functions,omitempty"` 87 | 88 | // FunctionCall: You ain't need it. Default is "auto". 89 | FunctionCall string `json:"function_call,omitempty"` 90 | } 91 | 92 | type Functions []Function 93 | 94 | func (funcs Functions) MarshalJSON() ([]byte, error) { 95 | if len(funcs) == 0 { 96 | return []byte("[]"), nil 97 | } 98 | return json.Marshal([]Function(funcs)) 99 | } 100 | 101 | type Function struct { 102 | Name string `json:"name,omitempty"` 103 | Description string `json:"description,omitempty"` 104 | Parameters Parameters `json:"parameters,omitempty"` 105 | } 106 | 107 | type Parameters struct { 108 | Type string `json:"type,omitempty"` // Must be "object" 109 | Properties map[string]map[string]any `json:"properties,omitempty"` 110 | Required []string `json:"required,omitempty"` 111 | } 112 | 113 | // ChatRequest is just an alias of ChatCompletionRequestBody. 114 | type ChatRequest ChatCompletionRequestBody 115 | 116 | // Message: An element of messages parameter. 117 | // The main input is the messages parameter. Messages must be an array of message objects, 118 | // where each object has a role (either “system”, “user”, or “assistant”) 119 | // and content (the content of the message). 120 | // Conversations can be as short as 1 message or fill many pages. 121 | // See https://platform.openai.com/docs/api-reference/chat/create#chat/create-messages 122 | type Message struct { 123 | 124 | // Role: Either of "system", "user", "assistant". 125 | // Typically, a conversation is formatted with a system message first, followed by alternating user and assistant messages. 126 | // The system message helps set the behavior of the assistant. In the example above, the assistant was instructed with “You are a helpful assistant.” 127 | // The user messages help instruct the assistant. They can be generated by the end users of an application, or set by a developer as an instruction. 128 | // The assistant messages help store prior responses. They can also be written by a developer to help give examples of desired behavior. 129 | Role string `json:"role"` 130 | 131 | // Content: A content of the message. 132 | Content string `json:"content"` 133 | 134 | // FunctionCall requested by ChatGPT. 135 | // Only appears in a response from ChatGPT in which ChatGPT wants to call a function. 136 | FunctionCall *FunctionCall `json:"function_call,omitempty"` 137 | 138 | // Name of the function called, to tell this message is a result of function_call. 139 | // Only appears in a request from us when the previous message is "function_call" requested by ChatGPT. 140 | Name string `json:"name,omitempty"` 141 | } 142 | 143 | type FunctionCall struct { 144 | NameRaw string `json:"name,omitempty"` 145 | ArgumentsRaw string `json:"arguments,omitempty"` 146 | // Arguments map[string]any `json:"arguments,omitempty"` 147 | } 148 | 149 | func (fc *FunctionCall) Name() string { 150 | return fc.NameRaw 151 | } 152 | 153 | func (fc *FunctionCall) Args() map[string]any { 154 | var args map[string]any 155 | json.Unmarshal([]byte(fc.ArgumentsRaw), &args) 156 | return args 157 | } 158 | 159 | type ChatCompletionResponse struct { 160 | ID string `json:"id"` 161 | Object string `json:"object"` 162 | Created int64 `json:"created"` 163 | Choices []Choice `json:"choices"` 164 | Usage Usage `json:"usage"` 165 | } 166 | 167 | type Choice struct { 168 | Index int `json:"index"` 169 | Message Message `json:"message"` 170 | FinishReason string `json:"finish_reason"` 171 | Delta Message `json:"delta"` // Only appears in stream response 172 | } 173 | -------------------------------------------------------------------------------- /all_test.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | import ( 4 | "encoding/json" 5 | "io" 6 | "io/ioutil" 7 | "net/http" 8 | "net/http/httptest" 9 | "os" 10 | "testing" 11 | ) 12 | 13 | var mockserver *httptest.Server 14 | 15 | func TestMain(m *testing.M) { 16 | mockserver = testserverV1() 17 | code := m.Run() 18 | os.Exit(code) 19 | } 20 | 21 | func testserverV1() *httptest.Server { 22 | mux := http.NewServeMux() 23 | mux.HandleFunc("/completions", func(w http.ResponseWriter, req *http.Request) { 24 | switch req.Method { 25 | case http.MethodPost: 26 | f, e := os.Open("./testdata/completion.json") 27 | if e != nil { 28 | panic(e) 29 | } 30 | defer f.Close() 31 | io.Copy(w, f) 32 | } 33 | }) 34 | mux.HandleFunc("/models", func(w http.ResponseWriter, req *http.Request) { 35 | switch req.Method { 36 | case http.MethodGet: 37 | f, e := os.Open("./testdata/models_list.json") 38 | if e != nil { 39 | panic(e) 40 | } 41 | defer f.Close() 42 | io.Copy(w, f) 43 | } 44 | }) 45 | mux.HandleFunc("/models/abcdefg", func(w http.ResponseWriter, req *http.Request) { 46 | switch req.Method { 47 | case http.MethodDelete: 48 | f, e := os.Open("./testdata/text-davinci-003.json") 49 | if e != nil { 50 | panic(e) 51 | } 52 | defer f.Close() 53 | io.Copy(w, f) 54 | } 55 | }) 56 | mux.HandleFunc("/models/200-but-invalidjson", func(w http.ResponseWriter, req *http.Request) { 57 | w.Write([]byte("{")) 58 | }) 59 | mux.HandleFunc("/models/text-davinci-003", func(w http.ResponseWriter, req *http.Request) { 60 | switch req.Method { 61 | case http.MethodGet: 62 | f, e := os.Open("./testdata/text-davinci-003.json") 63 | if e != nil { 64 | panic(e) 65 | } 66 | defer f.Close() 67 | io.Copy(w, f) 68 | } 69 | }) 70 | mux.HandleFunc("/edits", func(w http.ResponseWriter, req *http.Request) { 71 | switch req.Method { 72 | case http.MethodPost: 73 | f, e := os.Open("./testdata/edits-create.json") 74 | if e != nil { 75 | panic(e) 76 | } 77 | defer f.Close() 78 | io.Copy(w, f) 79 | } 80 | }) 81 | mux.HandleFunc("/images/generations", func(w http.ResponseWriter, req *http.Request) { 82 | switch req.Method { 83 | case http.MethodPost: 84 | f, e := os.Open("./testdata/image-create.json") 85 | if e != nil { 86 | panic(e) 87 | } 88 | defer f.Close() 89 | io.Copy(w, f) 90 | } 91 | }) 92 | mux.HandleFunc("/images/edits", func(w http.ResponseWriter, req *http.Request) { 93 | switch req.Method { 94 | case http.MethodPost: 95 | json.NewEncoder(w).Encode(map[string]any{ 96 | "created": 1670725494, 97 | "data": []any{ 98 | map[string]any{"url": "https://otiai10.com/foobaa"}, 99 | }, 100 | }) 101 | } 102 | }) 103 | mux.HandleFunc("/images/variations", func(w http.ResponseWriter, req *http.Request) { 104 | switch req.Method { 105 | case http.MethodPost: 106 | json.NewEncoder(w).Encode(map[string]any{ 107 | "created": 1670725494, 108 | "data": []any{ 109 | map[string]any{"url": "https://otiai10.com/foobaa"}, 110 | }, 111 | }) 112 | } 113 | }) 114 | mux.HandleFunc("/files", func(w http.ResponseWriter, req *http.Request) { 115 | switch req.Method { 116 | case http.MethodGet: 117 | json.NewEncoder(w).Encode(map[string]any{ 118 | "object": "list", 119 | "data": []any{ 120 | map[string]any{ 121 | "id": "file-ccdDZrC3iZVNiQVeEA6Z66wf", 122 | "object": "file", 123 | "bytes": 175, 124 | "created_at": 1613677385, 125 | "filename": "train.jsonl", 126 | "purpose": "search", 127 | }, 128 | }, 129 | }) 130 | case http.MethodPost: 131 | json.NewEncoder(w).Encode(map[string]any{ 132 | "id": "file-ccdDZrC3iZVNiQVeEA6Z66wf", 133 | "object": "file", 134 | "bytes": 175, 135 | "created_at": 1613677385, 136 | "filename": "train.jsonl", 137 | "purpose": "search", 138 | }) 139 | } 140 | }) 141 | mux.HandleFunc("/files/abcdefg", func(w http.ResponseWriter, req *http.Request) { 142 | switch req.Method { 143 | case http.MethodGet: 144 | json.NewEncoder(w).Encode(map[string]any{ 145 | "id": "file-ccdDZrC3iZVNiQVeEA6Z66wf", 146 | "object": "file", 147 | "bytes": 175, 148 | "created_at": 1613677385, 149 | "filename": "train.jsonl", 150 | "purpose": "search", 151 | }) 152 | case http.MethodDelete: 153 | json.NewEncoder(w).Encode(map[string]any{ 154 | "id": "file-ccdDZrC3iZVNiQVeEA6Z66wf", 155 | "object": "file", 156 | "deleted": true, 157 | }) 158 | } 159 | }) 160 | mux.HandleFunc("/files/abc", func(w http.ResponseWriter, req *http.Request) { 161 | switch req.Method { 162 | case http.MethodGet: 163 | w.WriteHeader(http.StatusNotFound) 164 | json.NewEncoder(w).Encode(map[string]any{ 165 | "error": map[string]any{ 166 | "message": "No such File object: abc", 167 | "type": "invalid_request_error", 168 | "param": "id", 169 | "code": nil, 170 | }, 171 | }) 172 | } 173 | }) 174 | mux.HandleFunc("/files/zzz", func(w http.ResponseWriter, req *http.Request) { 175 | switch req.Method { 176 | case http.MethodGet: 177 | w.WriteHeader(http.StatusBadRequest) 178 | w.Write([]byte("{....///")) 179 | } 180 | }) 181 | mux.HandleFunc("/files/abcdefg/content", func(w http.ResponseWriter, req *http.Request) { 182 | switch req.Method { 183 | case http.MethodGet: 184 | f, err := os.Open("./testdata/train.jsonl") 185 | if err != nil { 186 | w.WriteHeader(http.StatusInternalServerError) 187 | return 188 | } 189 | defer f.Close() 190 | io.Copy(w, f) 191 | } 192 | }) 193 | mux.HandleFunc("/embeddings", func(w http.ResponseWriter, req *http.Request) { 194 | switch req.Method { 195 | case http.MethodPost: 196 | json.NewEncoder(w).Encode(map[string]any{ 197 | "object": "list", 198 | "data": []any{ 199 | map[string]any{ 200 | "object": "embedding", 201 | "embedding": []float32{ 202 | 0.018990106880664825, 203 | -0.0073809814639389515, 204 | 0.021276434883475304, 205 | }, 206 | "index": 0, 207 | }, 208 | }, 209 | "usage": map[string]any{ 210 | "prompt_tokens": 8, 211 | "total_tokens": 8, 212 | }, 213 | }) 214 | } 215 | }) 216 | mux.HandleFunc("/moderations", func(w http.ResponseWriter, req *http.Request) { 217 | switch req.Method { 218 | case http.MethodPost: 219 | f, err := os.Open("./testdata/moderation-create.json") 220 | if err != nil { 221 | w.WriteHeader(http.StatusInternalServerError) 222 | return 223 | } 224 | defer f.Close() 225 | io.Copy(w, f) 226 | } 227 | }) 228 | mux.HandleFunc("/fine-tunes", func(w http.ResponseWriter, req *http.Request) { 229 | switch req.Method { 230 | case http.MethodGet: 231 | f, err := os.Open("./testdata/finetune-list.json") 232 | if err != nil { 233 | w.WriteHeader(http.StatusInternalServerError) 234 | return 235 | } 236 | defer f.Close() 237 | io.Copy(w, f) 238 | case http.MethodPost: 239 | f, err := os.Open("./testdata/finetune-create.json") 240 | if err != nil { 241 | w.WriteHeader(http.StatusInternalServerError) 242 | return 243 | } 244 | defer f.Close() 245 | io.Copy(w, f) 246 | } 247 | }) 248 | mux.HandleFunc("/fine-tunes/abcdefghi", func(w http.ResponseWriter, req *http.Request) { 249 | switch req.Method { 250 | case http.MethodGet: 251 | f, err := os.Open("./testdata/finetune-retrieve.json") 252 | if err != nil { 253 | w.WriteHeader(http.StatusInternalServerError) 254 | return 255 | } 256 | defer f.Close() 257 | io.Copy(w, f) 258 | } 259 | }) 260 | mux.HandleFunc("/fine-tunes/abcdefghi/cancel", func(w http.ResponseWriter, req *http.Request) { 261 | switch req.Method { 262 | case http.MethodPost: 263 | f, err := os.Open("./testdata/finetune-retrieve.json") 264 | if err != nil { 265 | w.WriteHeader(http.StatusInternalServerError) 266 | return 267 | } 268 | defer f.Close() 269 | io.Copy(w, f) 270 | } 271 | }) 272 | mux.HandleFunc("/fine-tunes/abcdefghi/events", func(w http.ResponseWriter, req *http.Request) { 273 | switch req.Method { 274 | case http.MethodGet: 275 | f, err := os.Open("./testdata/finetune-events-list.json") 276 | if err != nil { 277 | w.WriteHeader(http.StatusInternalServerError) 278 | return 279 | } 280 | defer f.Close() 281 | io.Copy(w, f) 282 | } 283 | }) 284 | mux.HandleFunc("/fine_tuning/jobs", func(w http.ResponseWriter, req *http.Request) { 285 | switch req.Method { 286 | case http.MethodPost: 287 | f, err := os.Open("./testdata/finetuning-create.json") 288 | if err != nil { 289 | w.WriteHeader(http.StatusInternalServerError) 290 | return 291 | } 292 | defer f.Close() 293 | io.Copy(w, f) 294 | } 295 | }) 296 | mux.HandleFunc("/fine_tuning/jobs/abcdefghi/cancel", func(w http.ResponseWriter, req *http.Request) { 297 | switch req.Method { 298 | case http.MethodPost: 299 | f, err := os.Open("./testdata/finetuning-cancel.json") 300 | if err != nil { 301 | w.WriteHeader(http.StatusInternalServerError) 302 | return 303 | } 304 | defer f.Close() 305 | io.Copy(w, f) 306 | } 307 | }) 308 | mux.HandleFunc("/fine_tuning/jobs/abcdefghi", func(w http.ResponseWriter, req *http.Request) { 309 | switch req.Method { 310 | case http.MethodGet: 311 | f, err := os.Open("./testdata/finetuning-retrieve.json") 312 | if err != nil { 313 | w.WriteHeader(http.StatusInternalServerError) 314 | return 315 | } 316 | defer f.Close() 317 | io.Copy(w, f) 318 | } 319 | }) 320 | mux.HandleFunc("/fine_tuning/jobs/abcdefghi/events", func(w http.ResponseWriter, req *http.Request) { 321 | switch req.Method { 322 | case http.MethodGet: 323 | f, err := os.Open("./testdata/finetuning-event-list.json") 324 | if err != nil { 325 | w.WriteHeader(http.StatusInternalServerError) 326 | return 327 | } 328 | defer f.Close() 329 | io.Copy(w, f) 330 | } 331 | }) 332 | mux.HandleFunc("/chat/completions", func(w http.ResponseWriter, req *http.Request) { 333 | f, err := os.Open("./testdata/chat-completion.json") 334 | if err != nil { 335 | w.WriteHeader(http.StatusInternalServerError) 336 | return 337 | } 338 | defer f.Close() 339 | body := struct { 340 | Stream bool `json:"stream"` 341 | }{} 342 | json.NewDecoder(req.Body).Decode(&body) 343 | if body.Stream { 344 | b, _ := ioutil.ReadAll(f) 345 | b = append([]byte("data: "), b...) 346 | b = append(b, []byte("\n\n\ndata: [DONE]")...) 347 | w.Write(b) 348 | } else { 349 | io.Copy(w, f) 350 | } 351 | }) 352 | return httptest.NewServer(mux) 353 | } 354 | -------------------------------------------------------------------------------- /endpoints.go: -------------------------------------------------------------------------------- 1 | package openaigo 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | "net/http" 8 | ) 9 | 10 | // ListModels: GET /models 11 | // Lists the currently available models, and provides basic information about each one such as the owner and availability. 12 | // See https://beta.openai.com/docs/api-reference/models/list 13 | func (client *Client) ListModels(ctx context.Context) (resp ModelsListResponse, err error) { 14 | p := "/models" 15 | return call(ctx, client, http.MethodGet, p, nil, resp, nil) 16 | } 17 | 18 | // RetrieveModel: GET /models/{model} 19 | // Retrieves a model instance, providing basic information about the model such as the owner and permissioning. 20 | // See https://beta.openai.com/docs/api-reference/models/retrieve 21 | func (client *Client) RetrieveModel(ctx context.Context, model string) (resp ModelRetrieveResponse, err error) { 22 | p := fmt.Sprintf("/models/%s", model) 23 | return call(ctx, client, http.MethodGet, p, nil, resp, nil) 24 | } 25 | 26 | // Completion: POST https://api.openai.com/v1/completions 27 | // Creates a completion for the provided prompt and parameters 28 | // See https://beta.openai.com/docs/api-reference/completions/create 29 | func (client *Client) Completion_Legacy(ctx context.Context, body CompletionRequestBody) (resp CompletionResponse, err error) { 30 | p := "/completions" 31 | return call(ctx, client, http.MethodPost, p, body, resp, nil) 32 | } 33 | 34 | // Edit: POST https://api.openai.com/v1/edits 35 | // Creates a new edit for the provided input, instruction, and parameters. 36 | // See https://beta.openai.com/docs/api-reference/edits/create 37 | func (client *Client) CreateEdit(ctx context.Context, body EditCreateRequestBody) (resp EditCreateResponse, err error) { 38 | p := "/edits" 39 | return call(ctx, client, http.MethodPost, p, body, resp, nil) 40 | } 41 | 42 | // CreateImage: POST https://api.openai.com/v1/images/generations 43 | // Creates an image given a prompt. 44 | // See https://beta.openai.com/docs/api-reference/images/create 45 | func (client *Client) CreateImage(ctx context.Context, body ImageGenerationRequestBody) (resp ImageGenerationResponse, err error) { 46 | p := "/images/generations" 47 | return call(ctx, client, http.MethodPost, p, body, resp, nil) 48 | } 49 | 50 | func (client *Client) EditImage(ctx context.Context, body ImageEditRequestBody) (resp ImageEditResponse, err error) { 51 | p := "/images/edits" 52 | return call(ctx, client, http.MethodPost, p, body, resp, nil) 53 | } 54 | 55 | // CreateImageVariation: POST https://api.openai.com/v1/images/variations 56 | // Creates a variation of a given image. 57 | // See https://beta.openai.com/docs/api-reference/images/create-variation 58 | func (client *Client) CreateImageVariation(ctx context.Context, body ImageVariationRequestBody) (resp ImageVariationResponse, err error) { 59 | p := "/images/variations" 60 | return call(ctx, client, http.MethodPost, p, body, resp, nil) 61 | } 62 | 63 | // CreateEmbedding: POST https://api.openai.com/v1/embeddings 64 | // Creates an embedding vector representing the input text. 65 | // See https://beta.openai.com/docs/api-reference/embeddings/create 66 | func (client *Client) CreateEmbedding(ctx context.Context, body EmbeddingCreateRequestBody) (resp EmbeddingCreateResponse, err error) { 67 | p := "/embeddings" 68 | return call(ctx, client, http.MethodPost, p, body, resp, nil) 69 | } 70 | 71 | // ListFiles: GET https://api.openai.com/v1/files 72 | // Returns a list of files that belong to the user's organization. 73 | // See https://beta.openai.com/docs/api-reference/files/list 74 | func (client *Client) ListFiles(ctx context.Context) (resp FileListResponse, err error) { 75 | p := "/files" 76 | return call(ctx, client, http.MethodGet, p, nil, resp, nil) 77 | } 78 | 79 | // UploadFile: POST https://api.openai.com/v1/files 80 | // Upload a file that contains document(s) to be used across various endpoints/features. 81 | // Currently, the size of all the files uploaded by one organization can be up to 1 GB. 82 | // Please contact us if you need to increase the storage limit. 83 | // See https://beta.openai.com/docs/api-reference/files/upload 84 | func (client *Client) UploadFile(ctx context.Context, body FileUploadRequestBody) (resp FileUploadResponse, err error) { 85 | p := "/files" 86 | return call(ctx, client, http.MethodPost, p, body, resp, nil) 87 | } 88 | 89 | // DeleteFile: DELETE https://api.openai.com/v1/files/{file_id} 90 | // Delete a file. 91 | // See https://beta.openai.com/docs/api-reference/files/delete 92 | func (client *Client) DeleteFile(ctx context.Context, id string) (resp FileDeleteResponse, err error) { 93 | p := fmt.Sprintf("/files/%s", id) 94 | return call(ctx, client, http.MethodDelete, p, nil, resp, nil) 95 | } 96 | 97 | // RetrieveFile: GET https://api.openai.com/v1/files/{file_id} 98 | // Returns information about a specific file. 99 | // See https://beta.openai.com/docs/api-reference/files/retrieve 100 | func (client *Client) RetrieveFile(ctx context.Context, id string) (resp FileRetrieveResponse, err error) { 101 | p := fmt.Sprintf("/files/%s", id) 102 | return call(ctx, client, http.MethodGet, p, nil, resp, nil) 103 | } 104 | 105 | // RetrieveFileContent: GET https://api.openai.com/v1/files/{file_id}/content 106 | // Returns the contents of the specified file. 107 | // User must Close response after used. 108 | // See https://beta.openai.com/docs/api-reference/files/retrieve-content 109 | func (client *Client) RetrieveFileContent(ctx context.Context, id string) (res io.ReadCloser, err error) { 110 | endpoint, err := client.endpoint(fmt.Sprintf("/files/%s/content", id)) 111 | if err != nil { 112 | return nil, err 113 | } 114 | req, err := http.NewRequest(http.MethodGet, endpoint, nil) 115 | if err != nil { 116 | return nil, err 117 | } 118 | req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", client.APIKey)) 119 | if ctx != nil { 120 | req = req.WithContext(ctx) 121 | } 122 | if client.HTTPClient == nil { 123 | client.HTTPClient = http.DefaultClient 124 | } 125 | response, err := client.HTTPClient.Do(req) 126 | if err != nil { 127 | return nil, err 128 | } 129 | if response.StatusCode >= 400 { 130 | return nil, client.apiError(response) 131 | } 132 | return response.Body, nil 133 | } 134 | 135 | // CreateModeration: POST https://api.openai.com/v1/moderations 136 | // Classifies if text violates OpenAI's Content Policy. 137 | // See https://beta.openai.com/docs/api-reference/moderations/create 138 | func (client *Client) CreateModeration(ctx context.Context, body ModerationCreateRequestBody) (resp ModerationCreateResponse, err error) { 139 | p := "/moderations" 140 | return call(ctx, client, http.MethodPost, p, body, resp, nil) 141 | } 142 | 143 | // CreateFineTune: POST https://api.openai.com/v1/fine-tunes 144 | // Deprecated: you should consider using the updating fine-tuning API https://platform.openai.com/docs/guides/fine-tuning 145 | // Creates a job that fine-tunes a specified model from a given dataset. 146 | // Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. 147 | // Learn more about Fine-tuning: https://platform.openai.com/docs/api-reference/fine-tuning 148 | // See https://platform.openai.com/docs/api-reference/fine-tunes/create 149 | func (client *Client) CreateFineTune(ctx context.Context, body FineTuneCreateRequestBody) (resp FineTuneCreateResponse, err error) { 150 | p := "/fine-tunes" 151 | return call(ctx, client, http.MethodPost, p, body, resp, nil) 152 | } 153 | 154 | // ListFineTunes: GET https://api.openai.com/v1/fine-tunes 155 | // Deprecated: you should consider using the updating fine-tuning API https://platform.openai.com/docs/guides/fine-tuning 156 | // List your organization's fine-tuning jobs. 157 | // See https://platform.openai.com/docs/api-reference/fine-tunes/list 158 | func (client *Client) ListFineTunes(ctx context.Context) (resp FineTuneListResponse, err error) { 159 | p := "/fine-tunes" 160 | return call(ctx, client, http.MethodGet, p, nil, resp, nil) 161 | } 162 | 163 | // RetrieveFineTune: GET https://api.openai.com/v1/fine-tunes/{fine_tune_id} 164 | // Deprecated: you should consider using the updating fine-tuning API https://platform.openai.com/docs/guides/fine-tuning 165 | // Gets info about the fine-tune job. 166 | // Learn more about Fine-tuning https://platform.openai.com/docs/api-reference/fine-tuning 167 | // See https://platform.openai.com/docs/api-reference/fine-tunes/retrieve 168 | func (client *Client) RetrieveFineTune(ctx context.Context, id string) (resp FineTuneRetrieveResponse, err error) { 169 | p := fmt.Sprintf("/fine-tunes/%s", id) 170 | return call(ctx, client, http.MethodGet, p, nil, resp, nil) 171 | } 172 | 173 | // CancelFineTune: POST https://api.openai.com/v1/fine-tunes/{fine_tune_id}/cancel 174 | // Deprecated: you should consider using the updating fine-tuning API https://platform.openai.com/docs/guides/fine-tuning 175 | // Immediately cancel a fine-tune job. 176 | // See https://platform.openai.com/docs/api-reference/fine-tunes/cancel 177 | func (client *Client) CancelFineTune(ctx context.Context, id string) (resp FineTuneCancelResponse, err error) { 178 | p := fmt.Sprintf("/fine-tunes/%s/cancel", id) 179 | return call(ctx, client, http.MethodPost, p, nil, resp, nil) 180 | } 181 | 182 | // ListFineTuneEvents: GET https://api.openai.com/v1/fine-tunes/{fine_tune_id}/events 183 | // Deprecated: you should consider using the updating fine-tuning API https://platform.openai.com/docs/guides/fine-tuning 184 | // Get fine-grained status updates for a fine-tune job. 185 | // See https://platform.openai.com/docs/api-reference/fine-tunes/events 186 | func (client *Client) ListFineTuneEvents(ctx context.Context, id string) (resp FineTuneListEventsResponse, err error) { 187 | p := fmt.Sprintf("/fine-tunes/%s/events", id) 188 | return call(ctx, client, http.MethodGet, p, nil, resp, nil) 189 | } 190 | 191 | // DeleteFineTuneModel: DELETE https://api.openai.com/v1/models/{model} 192 | // Deprecated: you should consider using the updating fine-tuning API https://platform.openai.com/docs/guides/fine-tuning 193 | // Delete a fine-tuned model. You must have the Owner role in your organization. 194 | // See https://platform.openai.com/docs/api-reference/fine-tunes/delete-model 195 | func (client *Client) DeleteFineTuneModel(ctx context.Context, id string) (resp FineTuneDeleteModelResponse, err error) { 196 | p := fmt.Sprintf("/models/%s", id) 197 | return call(ctx, client, http.MethodDelete, p, nil, resp, nil) 198 | } 199 | 200 | // Chat, short-hand of ChatCompletion. 201 | // Creates a completion for the chat message. 202 | func (client *Client) Chat(ctx context.Context, body ChatRequest) (resp ChatCompletionResponse, err error) { 203 | return client.ChatCompletion(ctx, ChatCompletionRequestBody(body)) 204 | } 205 | 206 | // ChatCompletion: POST https://api.openai.com/v1/chat/completions 207 | // Creates a completion for the chat message. 208 | // See https://platform.openai.com/docs/api-reference/chat/create 209 | func (client *Client) ChatCompletion(ctx context.Context, body ChatCompletionRequestBody) (resp ChatCompletionResponse, err error) { 210 | p := "/chat/completions" 211 | if body.StreamCallback != nil { 212 | body.Stream = true // Nosy ;) 213 | return call(ctx, client, http.MethodPost, p, body, resp, body.StreamCallback) 214 | } 215 | return call(ctx, client, http.MethodPost, p, body, resp, nil) 216 | } 217 | 218 | // CreateFineTuning: POST https://api.openai.com/v1/fine_tuning/jobs 219 | // Creates a job that fine-tunes a specified model from a given dataset. 220 | // Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. 221 | // Learn more about Fine-tuning: https://platform.openai.com/docs/guides/fine-tuning 222 | // See https://platform.openai.com/docs/api-reference/fine-tuning/create 223 | func (client *Client) CreateFineTuning(ctx context.Context, body FineTuningCreateRequestBody) (resp FineTuningJob, err error) { 224 | p := "/fine_tuning/jobs" 225 | return call(ctx, client, http.MethodPost, p, body, resp, nil) 226 | } 227 | 228 | // CancelFineTuning: POST https://api.openai.com/v1/fine_tuning/{fine_tuning_job_id}/cancel 229 | // Immediately cancel a fine tuning job. 230 | // Learn more about Fine-tuning https://platform.openai.com/docs/guides/fine-tuning 231 | // See https://platform.openai.com/docs/api-reference/fine-tuning/cancel 232 | func (client *Client) CancelFineTuning(ctx context.Context, id string) (resp FineTuningJob, err error) { 233 | p := "/fine_tuning/jobs/" + id + "/cancel" 234 | return call(ctx, client, http.MethodPost, p, nil, resp, nil) 235 | } 236 | 237 | // RetrieveFineTuning: GET https://api.openai.com/v1/fine_tuning/jobs//{fine_tuning_job_id} 238 | // Gets info about the fine-tuning job. 239 | // Learn more about Fine-tuning https://platform.openai.com/docs/guides/fine-tuning 240 | // See https://platform.openai.com/docs/api-reference/fine-tuning/retrieve 241 | func (client *Client) RetrieveFineTuning(ctx context.Context, id string) (resp FineTuningJob, err error) { 242 | p := fmt.Sprintf("/fine_tuning/jobs/%s", id) 243 | return call(ctx, client, http.MethodGet, p, nil, resp, nil) 244 | } 245 | 246 | // ListFineTuningEvents: GET https://api.openai.com/v1/fine_tuning/jobs/{fine_tuning_job_id}/events 247 | // Get fine-grained status updates for a fine-tuning job. 248 | // Learn more about Fine-tuning https://platform.openai.com/docs/guides/fine-tuning 249 | // See https://platform.openai.com/docs/api-reference/fine-tuning/list-events 250 | func (client *Client) ListFineTuningEvents(ctx context.Context, id string) (resp FineTuningListEventsResponse, err error) { 251 | p := fmt.Sprintf("/fine_tuning/jobs/%s/events", id) 252 | return call(ctx, client, http.MethodGet, p, nil, resp, nil) 253 | } 254 | -------------------------------------------------------------------------------- /testdata/models_list.json: -------------------------------------------------------------------------------- 1 | { 2 | "object": "list", 3 | "data": [ 4 | { 5 | "id": "babbage", 6 | "object": "model", 7 | "created": 1649358449, 8 | "owned_by": "openai", 9 | "permission": [ 10 | { 11 | "id": "modelperm-49FUp5v084tBB49tC4z8LPH5", 12 | "object": "model_permission", 13 | "created": 1669085501, 14 | "allow_create_engine": false, 15 | "allow_sampling": true, 16 | "allow_logprobs": true, 17 | "allow_search_indices": false, 18 | "allow_view": true, 19 | "allow_fine_tuning": false, 20 | "organization": "*", 21 | "group": null, 22 | "is_blocking": false 23 | } 24 | ], 25 | "root": "babbage", 26 | "parent": null 27 | }, 28 | { 29 | "id": "ada", 30 | "object": "model", 31 | "created": 1649357491, 32 | "owned_by": "openai", 33 | "permission": [ 34 | { 35 | "id": "modelperm-xTOEYvDZGN7UDnQ65VpzRRHz", 36 | "object": "model_permission", 37 | "created": 1669087301, 38 | "allow_create_engine": false, 39 | "allow_sampling": true, 40 | "allow_logprobs": true, 41 | "allow_search_indices": false, 42 | "allow_view": true, 43 | "allow_fine_tuning": false, 44 | "organization": "*", 45 | "group": null, 46 | "is_blocking": false 47 | } 48 | ], 49 | "root": "ada", 50 | "parent": null 51 | }, 52 | { 53 | "id": "davinci", 54 | "object": "model", 55 | "created": 1649359874, 56 | "owned_by": "openai", 57 | "permission": [ 58 | { 59 | "id": "modelperm-U6ZwlyAd0LyMk4rcMdz33Yc3", 60 | "object": "model_permission", 61 | "created": 1669066355, 62 | "allow_create_engine": false, 63 | "allow_sampling": true, 64 | "allow_logprobs": true, 65 | "allow_search_indices": false, 66 | "allow_view": true, 67 | "allow_fine_tuning": false, 68 | "organization": "*", 69 | "group": null, 70 | "is_blocking": false 71 | } 72 | ], 73 | "root": "davinci", 74 | "parent": null 75 | }, 76 | { 77 | "id": "babbage-code-search-code", 78 | "object": "model", 79 | "created": 1651172509, 80 | "owned_by": "openai-dev", 81 | "permission": [ 82 | { 83 | "id": "modelperm-4qRnA3Hj8HIJbgo0cGbcmErn", 84 | "object": "model_permission", 85 | "created": 1669085863, 86 | "allow_create_engine": false, 87 | "allow_sampling": true, 88 | "allow_logprobs": true, 89 | "allow_search_indices": true, 90 | "allow_view": true, 91 | "allow_fine_tuning": false, 92 | "organization": "*", 93 | "group": null, 94 | "is_blocking": false 95 | } 96 | ], 97 | "root": "babbage-code-search-code", 98 | "parent": null 99 | }, 100 | { 101 | "id": "text-similarity-babbage-001", 102 | "object": "model", 103 | "created": 1651172505, 104 | "owned_by": "openai-dev", 105 | "permission": [ 106 | { 107 | "id": "modelperm-48kcCHhfzvnfY84OtJf5m8Cz", 108 | "object": "model_permission", 109 | "created": 1669081947, 110 | "allow_create_engine": false, 111 | "allow_sampling": true, 112 | "allow_logprobs": true, 113 | "allow_search_indices": true, 114 | "allow_view": true, 115 | "allow_fine_tuning": false, 116 | "organization": "*", 117 | "group": null, 118 | "is_blocking": false 119 | } 120 | ], 121 | "root": "text-similarity-babbage-001", 122 | "parent": null 123 | }, 124 | { 125 | "id": "text-davinci-001", 126 | "object": "model", 127 | "created": 1649364042, 128 | "owned_by": "openai", 129 | "permission": [ 130 | { 131 | "id": "modelperm-MVM5NfoRjXkDve3uQW3YZDDt", 132 | "object": "model_permission", 133 | "created": 1669066355, 134 | "allow_create_engine": false, 135 | "allow_sampling": true, 136 | "allow_logprobs": true, 137 | "allow_search_indices": false, 138 | "allow_view": true, 139 | "allow_fine_tuning": false, 140 | "organization": "*", 141 | "group": null, 142 | "is_blocking": false 143 | } 144 | ], 145 | "root": "text-davinci-001", 146 | "parent": null 147 | }, 148 | { 149 | "id": "curie-instruct-beta", 150 | "object": "model", 151 | "created": 1649364042, 152 | "owned_by": "openai", 153 | "permission": [ 154 | { 155 | "id": "modelperm-JlSyMbxXeFm42SDjN0wTD26Y", 156 | "object": "model_permission", 157 | "created": 1669070162, 158 | "allow_create_engine": false, 159 | "allow_sampling": true, 160 | "allow_logprobs": true, 161 | "allow_search_indices": false, 162 | "allow_view": true, 163 | "allow_fine_tuning": false, 164 | "organization": "*", 165 | "group": null, 166 | "is_blocking": false 167 | } 168 | ], 169 | "root": "curie-instruct-beta", 170 | "parent": null 171 | }, 172 | { 173 | "id": "babbage-code-search-text", 174 | "object": "model", 175 | "created": 1651172509, 176 | "owned_by": "openai-dev", 177 | "permission": [ 178 | { 179 | "id": "modelperm-Lftf8H4ZPDxNxVs0hHPJBUoe", 180 | "object": "model_permission", 181 | "created": 1669085863, 182 | "allow_create_engine": false, 183 | "allow_sampling": true, 184 | "allow_logprobs": true, 185 | "allow_search_indices": true, 186 | "allow_view": true, 187 | "allow_fine_tuning": false, 188 | "organization": "*", 189 | "group": null, 190 | "is_blocking": false 191 | } 192 | ], 193 | "root": "babbage-code-search-text", 194 | "parent": null 195 | }, 196 | { 197 | "id": "babbage-similarity", 198 | "object": "model", 199 | "created": 1651172505, 200 | "owned_by": "openai-dev", 201 | "permission": [ 202 | { 203 | "id": "modelperm-mS20lnPqhebTaFPrcCufyg7m", 204 | "object": "model_permission", 205 | "created": 1669081947, 206 | "allow_create_engine": false, 207 | "allow_sampling": true, 208 | "allow_logprobs": true, 209 | "allow_search_indices": true, 210 | "allow_view": true, 211 | "allow_fine_tuning": false, 212 | "organization": "*", 213 | "group": null, 214 | "is_blocking": false 215 | } 216 | ], 217 | "root": "babbage-similarity", 218 | "parent": null 219 | }, 220 | { 221 | "id": "curie-search-query", 222 | "object": "model", 223 | "created": 1651172509, 224 | "owned_by": "openai-dev", 225 | "permission": [ 226 | { 227 | "id": "modelperm-O30H5MRAHribJNyy87ugfPWF", 228 | "object": "model_permission", 229 | "created": 1669066354, 230 | "allow_create_engine": false, 231 | "allow_sampling": true, 232 | "allow_logprobs": true, 233 | "allow_search_indices": true, 234 | "allow_view": true, 235 | "allow_fine_tuning": false, 236 | "organization": "*", 237 | "group": null, 238 | "is_blocking": false 239 | } 240 | ], 241 | "root": "curie-search-query", 242 | "parent": null 243 | }, 244 | { 245 | "id": "code-search-babbage-text-001", 246 | "object": "model", 247 | "created": 1651172507, 248 | "owned_by": "openai-dev", 249 | "permission": [ 250 | { 251 | "id": "modelperm-EC5ASz4NLChtEV1Cwkmrwm57", 252 | "object": "model_permission", 253 | "created": 1669085863, 254 | "allow_create_engine": false, 255 | "allow_sampling": true, 256 | "allow_logprobs": true, 257 | "allow_search_indices": true, 258 | "allow_view": true, 259 | "allow_fine_tuning": false, 260 | "organization": "*", 261 | "group": null, 262 | "is_blocking": false 263 | } 264 | ], 265 | "root": "code-search-babbage-text-001", 266 | "parent": null 267 | }, 268 | { 269 | "id": "text-davinci-003", 270 | "object": "model", 271 | "created": 1669599635, 272 | "owned_by": "openai-internal", 273 | "permission": [ 274 | { 275 | "id": "modelperm-NFSavcOn3jzqNG8maspoV7WA", 276 | "object": "model_permission", 277 | "created": 1670634735, 278 | "allow_create_engine": false, 279 | "allow_sampling": true, 280 | "allow_logprobs": true, 281 | "allow_search_indices": false, 282 | "allow_view": true, 283 | "allow_fine_tuning": false, 284 | "organization": "*", 285 | "group": null, 286 | "is_blocking": false 287 | } 288 | ], 289 | "root": "text-davinci-003", 290 | "parent": null 291 | }, 292 | { 293 | "id": "code-cushman-001", 294 | "object": "model", 295 | "created": 1656081837, 296 | "owned_by": "openai", 297 | "permission": [ 298 | { 299 | "id": "modelperm-M6pwNXr8UmY3mqdUEe4VFXdY", 300 | "object": "model_permission", 301 | "created": 1669066355, 302 | "allow_create_engine": false, 303 | "allow_sampling": true, 304 | "allow_logprobs": true, 305 | "allow_search_indices": false, 306 | "allow_view": true, 307 | "allow_fine_tuning": false, 308 | "organization": "*", 309 | "group": null, 310 | "is_blocking": false 311 | } 312 | ], 313 | "root": "code-cushman-001", 314 | "parent": null 315 | }, 316 | { 317 | "id": "code-search-babbage-code-001", 318 | "object": "model", 319 | "created": 1651172507, 320 | "owned_by": "openai-dev", 321 | "permission": [ 322 | { 323 | "id": "modelperm-64LWHdlANgak2rHzc3K5Stt0", 324 | "object": "model_permission", 325 | "created": 1669085864, 326 | "allow_create_engine": false, 327 | "allow_sampling": true, 328 | "allow_logprobs": true, 329 | "allow_search_indices": true, 330 | "allow_view": true, 331 | "allow_fine_tuning": false, 332 | "organization": "*", 333 | "group": null, 334 | "is_blocking": false 335 | } 336 | ], 337 | "root": "code-search-babbage-code-001", 338 | "parent": null 339 | }, 340 | { 341 | "id": "text-ada-001", 342 | "object": "model", 343 | "created": 1649364042, 344 | "owned_by": "openai", 345 | "permission": [ 346 | { 347 | "id": "modelperm-KN5dRBCEW4az6gwcGXkRkMwK", 348 | "object": "model_permission", 349 | "created": 1669088497, 350 | "allow_create_engine": false, 351 | "allow_sampling": true, 352 | "allow_logprobs": true, 353 | "allow_search_indices": false, 354 | "allow_view": true, 355 | "allow_fine_tuning": false, 356 | "organization": "*", 357 | "group": null, 358 | "is_blocking": false 359 | } 360 | ], 361 | "root": "text-ada-001", 362 | "parent": null 363 | }, 364 | { 365 | "id": "text-similarity-ada-001", 366 | "object": "model", 367 | "created": 1651172505, 368 | "owned_by": "openai-dev", 369 | "permission": [ 370 | { 371 | "id": "modelperm-DdCqkqmORpqxqdg4TkFRAgmw", 372 | "object": "model_permission", 373 | "created": 1669092759, 374 | "allow_create_engine": false, 375 | "allow_sampling": true, 376 | "allow_logprobs": true, 377 | "allow_search_indices": true, 378 | "allow_view": true, 379 | "allow_fine_tuning": false, 380 | "organization": "*", 381 | "group": null, 382 | "is_blocking": false 383 | } 384 | ], 385 | "root": "text-similarity-ada-001", 386 | "parent": null 387 | }, 388 | { 389 | "id": "text-davinci-insert-002", 390 | "object": "model", 391 | "created": 1649880484, 392 | "owned_by": "openai", 393 | "permission": [ 394 | { 395 | "id": "modelperm-V5YQoSyiapAf4km5wisXkNXh", 396 | "object": "model_permission", 397 | "created": 1669066354, 398 | "allow_create_engine": false, 399 | "allow_sampling": true, 400 | "allow_logprobs": true, 401 | "allow_search_indices": false, 402 | "allow_view": true, 403 | "allow_fine_tuning": false, 404 | "organization": "*", 405 | "group": null, 406 | "is_blocking": false 407 | } 408 | ], 409 | "root": "text-davinci-insert-002", 410 | "parent": null 411 | }, 412 | { 413 | "id": "ada-code-search-code", 414 | "object": "model", 415 | "created": 1651172505, 416 | "owned_by": "openai-dev", 417 | "permission": [ 418 | { 419 | "id": "modelperm-wa8tg4Pi9QQNaWdjMTM8dkkx", 420 | "object": "model_permission", 421 | "created": 1669087421, 422 | "allow_create_engine": false, 423 | "allow_sampling": true, 424 | "allow_logprobs": true, 425 | "allow_search_indices": true, 426 | "allow_view": true, 427 | "allow_fine_tuning": false, 428 | "organization": "*", 429 | "group": null, 430 | "is_blocking": false 431 | } 432 | ], 433 | "root": "ada-code-search-code", 434 | "parent": null 435 | }, 436 | { 437 | "id": "ada-similarity", 438 | "object": "model", 439 | "created": 1651172507, 440 | "owned_by": "openai-dev", 441 | "permission": [ 442 | { 443 | "id": "modelperm-LtSIwCEReeDcvGTmM13gv6Fg", 444 | "object": "model_permission", 445 | "created": 1669092759, 446 | "allow_create_engine": false, 447 | "allow_sampling": true, 448 | "allow_logprobs": true, 449 | "allow_search_indices": true, 450 | "allow_view": true, 451 | "allow_fine_tuning": false, 452 | "organization": "*", 453 | "group": null, 454 | "is_blocking": false 455 | } 456 | ], 457 | "root": "ada-similarity", 458 | "parent": null 459 | }, 460 | { 461 | "id": "code-search-ada-text-001", 462 | "object": "model", 463 | "created": 1651172507, 464 | "owned_by": "openai-dev", 465 | "permission": [ 466 | { 467 | "id": "modelperm-JBssaJSmbgvJfTkX71y71k2J", 468 | "object": "model_permission", 469 | "created": 1669087421, 470 | "allow_create_engine": false, 471 | "allow_sampling": true, 472 | "allow_logprobs": true, 473 | "allow_search_indices": true, 474 | "allow_view": true, 475 | "allow_fine_tuning": false, 476 | "organization": "*", 477 | "group": null, 478 | "is_blocking": false 479 | } 480 | ], 481 | "root": "code-search-ada-text-001", 482 | "parent": null 483 | }, 484 | { 485 | "id": "text-search-ada-query-001", 486 | "object": "model", 487 | "created": 1651172505, 488 | "owned_by": "openai-dev", 489 | "permission": [ 490 | { 491 | "id": "modelperm-1YiiBMYC8it0mpQCBK7t8uSP", 492 | "object": "model_permission", 493 | "created": 1669092640, 494 | "allow_create_engine": false, 495 | "allow_sampling": true, 496 | "allow_logprobs": true, 497 | "allow_search_indices": true, 498 | "allow_view": true, 499 | "allow_fine_tuning": false, 500 | "organization": "*", 501 | "group": null, 502 | "is_blocking": false 503 | } 504 | ], 505 | "root": "text-search-ada-query-001", 506 | "parent": null 507 | }, 508 | { 509 | "id": "text-curie-001", 510 | "object": "model", 511 | "created": 1649364043, 512 | "owned_by": "openai", 513 | "permission": [ 514 | { 515 | "id": "modelperm-fGAoEKBH01KNZ3zz81Sro34Q", 516 | "object": "model_permission", 517 | "created": 1669066352, 518 | "allow_create_engine": false, 519 | "allow_sampling": true, 520 | "allow_logprobs": true, 521 | "allow_search_indices": false, 522 | "allow_view": true, 523 | "allow_fine_tuning": false, 524 | "organization": "*", 525 | "group": null, 526 | "is_blocking": false 527 | } 528 | ], 529 | "root": "text-curie-001", 530 | "parent": null 531 | }, 532 | { 533 | "id": "text-davinci-edit-001", 534 | "object": "model", 535 | "created": 1649809179, 536 | "owned_by": "openai", 537 | "permission": [ 538 | { 539 | "id": "modelperm-VzNMGrIRm3HxhEl64gkjZdEh", 540 | "object": "model_permission", 541 | "created": 1669066354, 542 | "allow_create_engine": false, 543 | "allow_sampling": true, 544 | "allow_logprobs": true, 545 | "allow_search_indices": false, 546 | "allow_view": true, 547 | "allow_fine_tuning": false, 548 | "organization": "*", 549 | "group": null, 550 | "is_blocking": false 551 | } 552 | ], 553 | "root": "text-davinci-edit-001", 554 | "parent": null 555 | }, 556 | { 557 | "id": "davinci-search-document", 558 | "object": "model", 559 | "created": 1651172509, 560 | "owned_by": "openai-dev", 561 | "permission": [ 562 | { 563 | "id": "modelperm-M43LVJQRGxz6ode34ctLrCaG", 564 | "object": "model_permission", 565 | "created": 1669066355, 566 | "allow_create_engine": false, 567 | "allow_sampling": true, 568 | "allow_logprobs": true, 569 | "allow_search_indices": true, 570 | "allow_view": true, 571 | "allow_fine_tuning": false, 572 | "organization": "*", 573 | "group": null, 574 | "is_blocking": false 575 | } 576 | ], 577 | "root": "davinci-search-document", 578 | "parent": null 579 | }, 580 | { 581 | "id": "text-davinci-002", 582 | "object": "model", 583 | "created": 1649880484, 584 | "owned_by": "openai", 585 | "permission": [ 586 | { 587 | "id": "modelperm-8KR0E7NnNJoVI9k6YKFbMcQi", 588 | "object": "model_permission", 589 | "created": 1670635824, 590 | "allow_create_engine": false, 591 | "allow_sampling": true, 592 | "allow_logprobs": true, 593 | "allow_search_indices": false, 594 | "allow_view": true, 595 | "allow_fine_tuning": false, 596 | "organization": "*", 597 | "group": null, 598 | "is_blocking": false 599 | } 600 | ], 601 | "root": "text-davinci-002", 602 | "parent": null 603 | }, 604 | { 605 | "id": "ada-code-search-text", 606 | "object": "model", 607 | "created": 1651172510, 608 | "owned_by": "openai-dev", 609 | "permission": [ 610 | { 611 | "id": "modelperm-kFc17wOI4d1FjZEaCqnk4Frg", 612 | "object": "model_permission", 613 | "created": 1669087421, 614 | "allow_create_engine": false, 615 | "allow_sampling": true, 616 | "allow_logprobs": true, 617 | "allow_search_indices": true, 618 | "allow_view": true, 619 | "allow_fine_tuning": false, 620 | "organization": "*", 621 | "group": null, 622 | "is_blocking": false 623 | } 624 | ], 625 | "root": "ada-code-search-text", 626 | "parent": null 627 | }, 628 | { 629 | "id": "text-search-ada-doc-001", 630 | "object": "model", 631 | "created": 1651172507, 632 | "owned_by": "openai-dev", 633 | "permission": [ 634 | { 635 | "id": "modelperm-kbHvYouDlkD78ehcmMOGdKpK", 636 | "object": "model_permission", 637 | "created": 1669092640, 638 | "allow_create_engine": false, 639 | "allow_sampling": true, 640 | "allow_logprobs": true, 641 | "allow_search_indices": true, 642 | "allow_view": true, 643 | "allow_fine_tuning": false, 644 | "organization": "*", 645 | "group": null, 646 | "is_blocking": false 647 | } 648 | ], 649 | "root": "text-search-ada-doc-001", 650 | "parent": null 651 | }, 652 | { 653 | "id": "code-davinci-edit-001", 654 | "object": "model", 655 | "created": 1649880484, 656 | "owned_by": "openai", 657 | "permission": [ 658 | { 659 | "id": "modelperm-WwansDxcKNvZtKugNqJnsvfv", 660 | "object": "model_permission", 661 | "created": 1669066354, 662 | "allow_create_engine": false, 663 | "allow_sampling": true, 664 | "allow_logprobs": true, 665 | "allow_search_indices": false, 666 | "allow_view": true, 667 | "allow_fine_tuning": false, 668 | "organization": "*", 669 | "group": null, 670 | "is_blocking": false 671 | } 672 | ], 673 | "root": "code-davinci-edit-001", 674 | "parent": null 675 | }, 676 | { 677 | "id": "davinci-instruct-beta", 678 | "object": "model", 679 | "created": 1649364042, 680 | "owned_by": "openai", 681 | "permission": [ 682 | { 683 | "id": "modelperm-k9kuMYlfd9nvFiJV2ug0NWws", 684 | "object": "model_permission", 685 | "created": 1669066356, 686 | "allow_create_engine": false, 687 | "allow_sampling": true, 688 | "allow_logprobs": true, 689 | "allow_search_indices": false, 690 | "allow_view": true, 691 | "allow_fine_tuning": false, 692 | "organization": "*", 693 | "group": null, 694 | "is_blocking": false 695 | } 696 | ], 697 | "root": "davinci-instruct-beta", 698 | "parent": null 699 | }, 700 | { 701 | "id": "text-babbage-001", 702 | "object": "model", 703 | "created": 1649364043, 704 | "owned_by": "openai", 705 | "permission": [ 706 | { 707 | "id": "modelperm-hAf2iBGMqLmqB9HZiwrp1gL7", 708 | "object": "model_permission", 709 | "created": 1669086409, 710 | "allow_create_engine": false, 711 | "allow_sampling": true, 712 | "allow_logprobs": true, 713 | "allow_search_indices": false, 714 | "allow_view": true, 715 | "allow_fine_tuning": false, 716 | "organization": "*", 717 | "group": null, 718 | "is_blocking": false 719 | } 720 | ], 721 | "root": "text-babbage-001", 722 | "parent": null 723 | }, 724 | { 725 | "id": "text-similarity-curie-001", 726 | "object": "model", 727 | "created": 1651172507, 728 | "owned_by": "openai-dev", 729 | "permission": [ 730 | { 731 | "id": "modelperm-6dgTTyXrZE7d53Licw4hYkvd", 732 | "object": "model_permission", 733 | "created": 1669079883, 734 | "allow_create_engine": false, 735 | "allow_sampling": true, 736 | "allow_logprobs": true, 737 | "allow_search_indices": true, 738 | "allow_view": true, 739 | "allow_fine_tuning": false, 740 | "organization": "*", 741 | "group": null, 742 | "is_blocking": false 743 | } 744 | ], 745 | "root": "text-similarity-curie-001", 746 | "parent": null 747 | }, 748 | { 749 | "id": "code-search-ada-code-001", 750 | "object": "model", 751 | "created": 1651172507, 752 | "owned_by": "openai-dev", 753 | "permission": [ 754 | { 755 | "id": "modelperm-8soch45iiGvux5Fg1ORjdC4s", 756 | "object": "model_permission", 757 | "created": 1669087421, 758 | "allow_create_engine": false, 759 | "allow_sampling": true, 760 | "allow_logprobs": true, 761 | "allow_search_indices": true, 762 | "allow_view": true, 763 | "allow_fine_tuning": false, 764 | "organization": "*", 765 | "group": null, 766 | "is_blocking": false 767 | } 768 | ], 769 | "root": "code-search-ada-code-001", 770 | "parent": null 771 | }, 772 | { 773 | "id": "ada-search-query", 774 | "object": "model", 775 | "created": 1651172505, 776 | "owned_by": "openai-dev", 777 | "permission": [ 778 | { 779 | "id": "modelperm-b753xmIzAUkluQ1L20eDZLtQ", 780 | "object": "model_permission", 781 | "created": 1669092640, 782 | "allow_create_engine": false, 783 | "allow_sampling": true, 784 | "allow_logprobs": true, 785 | "allow_search_indices": true, 786 | "allow_view": true, 787 | "allow_fine_tuning": false, 788 | "organization": "*", 789 | "group": null, 790 | "is_blocking": false 791 | } 792 | ], 793 | "root": "ada-search-query", 794 | "parent": null 795 | }, 796 | { 797 | "id": "text-search-davinci-query-001", 798 | "object": "model", 799 | "created": 1651172505, 800 | "owned_by": "openai-dev", 801 | "permission": [ 802 | { 803 | "id": "modelperm-9McKbsEYSaDshU9M3bp6ejUb", 804 | "object": "model_permission", 805 | "created": 1669066353, 806 | "allow_create_engine": false, 807 | "allow_sampling": true, 808 | "allow_logprobs": true, 809 | "allow_search_indices": true, 810 | "allow_view": true, 811 | "allow_fine_tuning": false, 812 | "organization": "*", 813 | "group": null, 814 | "is_blocking": false 815 | } 816 | ], 817 | "root": "text-search-davinci-query-001", 818 | "parent": null 819 | }, 820 | { 821 | "id": "code-davinci-002", 822 | "object": "model", 823 | "created": 1649880485, 824 | "owned_by": "openai", 825 | "permission": [ 826 | { 827 | "id": "modelperm-0nYerYB9CGacUm6ruEqzNiQ4", 828 | "object": "model_permission", 829 | "created": 1670635824, 830 | "allow_create_engine": false, 831 | "allow_sampling": true, 832 | "allow_logprobs": true, 833 | "allow_search_indices": false, 834 | "allow_view": true, 835 | "allow_fine_tuning": false, 836 | "organization": "*", 837 | "group": null, 838 | "is_blocking": false 839 | } 840 | ], 841 | "root": "code-davinci-002", 842 | "parent": null 843 | }, 844 | { 845 | "id": "curie-similarity", 846 | "object": "model", 847 | "created": 1651172510, 848 | "owned_by": "openai-dev", 849 | "permission": [ 850 | { 851 | "id": "modelperm-z9GtwMD6HcxKqvsPfDm0PSg6", 852 | "object": "model_permission", 853 | "created": 1669079884, 854 | "allow_create_engine": false, 855 | "allow_sampling": true, 856 | "allow_logprobs": true, 857 | "allow_search_indices": true, 858 | "allow_view": true, 859 | "allow_fine_tuning": false, 860 | "organization": "*", 861 | "group": null, 862 | "is_blocking": false 863 | } 864 | ], 865 | "root": "curie-similarity", 866 | "parent": null 867 | }, 868 | { 869 | "id": "davinci-search-query", 870 | "object": "model", 871 | "created": 1651172505, 872 | "owned_by": "openai-dev", 873 | "permission": [ 874 | { 875 | "id": "modelperm-lYkiTZMmJMWm8jvkPx2duyHE", 876 | "object": "model_permission", 877 | "created": 1669066353, 878 | "allow_create_engine": false, 879 | "allow_sampling": true, 880 | "allow_logprobs": true, 881 | "allow_search_indices": true, 882 | "allow_view": true, 883 | "allow_fine_tuning": false, 884 | "organization": "*", 885 | "group": null, 886 | "is_blocking": false 887 | } 888 | ], 889 | "root": "davinci-search-query", 890 | "parent": null 891 | }, 892 | { 893 | "id": "text-davinci-insert-001", 894 | "object": "model", 895 | "created": 1649880484, 896 | "owned_by": "openai", 897 | "permission": [ 898 | { 899 | "id": "modelperm-3gRQMBOMoccZIURE3ZxboZWA", 900 | "object": "model_permission", 901 | "created": 1669066354, 902 | "allow_create_engine": false, 903 | "allow_sampling": true, 904 | "allow_logprobs": true, 905 | "allow_search_indices": false, 906 | "allow_view": true, 907 | "allow_fine_tuning": false, 908 | "organization": "*", 909 | "group": null, 910 | "is_blocking": false 911 | } 912 | ], 913 | "root": "text-davinci-insert-001", 914 | "parent": null 915 | }, 916 | { 917 | "id": "babbage-search-document", 918 | "object": "model", 919 | "created": 1651172510, 920 | "owned_by": "openai-dev", 921 | "permission": [ 922 | { 923 | "id": "modelperm-5qFV9kxCRGKIXpBEP75chmp7", 924 | "object": "model_permission", 925 | "created": 1669084981, 926 | "allow_create_engine": false, 927 | "allow_sampling": true, 928 | "allow_logprobs": true, 929 | "allow_search_indices": true, 930 | "allow_view": true, 931 | "allow_fine_tuning": false, 932 | "organization": "*", 933 | "group": null, 934 | "is_blocking": false 935 | } 936 | ], 937 | "root": "babbage-search-document", 938 | "parent": null 939 | }, 940 | { 941 | "id": "ada-search-document", 942 | "object": "model", 943 | "created": 1651172507, 944 | "owned_by": "openai-dev", 945 | "permission": [ 946 | { 947 | "id": "modelperm-8qUMuMAbo4EwedbGamV7e9hq", 948 | "object": "model_permission", 949 | "created": 1669092640, 950 | "allow_create_engine": false, 951 | "allow_sampling": true, 952 | "allow_logprobs": true, 953 | "allow_search_indices": true, 954 | "allow_view": true, 955 | "allow_fine_tuning": false, 956 | "organization": "*", 957 | "group": null, 958 | "is_blocking": false 959 | } 960 | ], 961 | "root": "ada-search-document", 962 | "parent": null 963 | }, 964 | { 965 | "id": "curie", 966 | "object": "model", 967 | "created": 1649359874, 968 | "owned_by": "openai", 969 | "permission": [ 970 | { 971 | "id": "modelperm-NvPNUvr0g9gAt3B6Uw4sZ2do", 972 | "object": "model_permission", 973 | "created": 1669080023, 974 | "allow_create_engine": false, 975 | "allow_sampling": true, 976 | "allow_logprobs": true, 977 | "allow_search_indices": false, 978 | "allow_view": true, 979 | "allow_fine_tuning": false, 980 | "organization": "*", 981 | "group": null, 982 | "is_blocking": false 983 | } 984 | ], 985 | "root": "curie", 986 | "parent": null 987 | }, 988 | { 989 | "id": "text-search-babbage-doc-001", 990 | "object": "model", 991 | "created": 1651172509, 992 | "owned_by": "openai-dev", 993 | "permission": [ 994 | { 995 | "id": "modelperm-ao2r26P2Th7nhRFleHwy2gn5", 996 | "object": "model_permission", 997 | "created": 1669084981, 998 | "allow_create_engine": false, 999 | "allow_sampling": true, 1000 | "allow_logprobs": true, 1001 | "allow_search_indices": true, 1002 | "allow_view": true, 1003 | "allow_fine_tuning": false, 1004 | "organization": "*", 1005 | "group": null, 1006 | "is_blocking": false 1007 | } 1008 | ], 1009 | "root": "text-search-babbage-doc-001", 1010 | "parent": null 1011 | }, 1012 | { 1013 | "id": "text-search-curie-doc-001", 1014 | "object": "model", 1015 | "created": 1651172509, 1016 | "owned_by": "openai-dev", 1017 | "permission": [ 1018 | { 1019 | "id": "modelperm-zjXVr8IzHdqV5Qtg5lgxS7Ci", 1020 | "object": "model_permission", 1021 | "created": 1669066353, 1022 | "allow_create_engine": false, 1023 | "allow_sampling": true, 1024 | "allow_logprobs": true, 1025 | "allow_search_indices": true, 1026 | "allow_view": true, 1027 | "allow_fine_tuning": false, 1028 | "organization": "*", 1029 | "group": null, 1030 | "is_blocking": false 1031 | } 1032 | ], 1033 | "root": "text-search-curie-doc-001", 1034 | "parent": null 1035 | }, 1036 | { 1037 | "id": "text-search-curie-query-001", 1038 | "object": "model", 1039 | "created": 1651172509, 1040 | "owned_by": "openai-dev", 1041 | "permission": [ 1042 | { 1043 | "id": "modelperm-a58jAWPMqgJQffbNus8is1EM", 1044 | "object": "model_permission", 1045 | "created": 1669066357, 1046 | "allow_create_engine": false, 1047 | "allow_sampling": true, 1048 | "allow_logprobs": true, 1049 | "allow_search_indices": true, 1050 | "allow_view": true, 1051 | "allow_fine_tuning": false, 1052 | "organization": "*", 1053 | "group": null, 1054 | "is_blocking": false 1055 | } 1056 | ], 1057 | "root": "text-search-curie-query-001", 1058 | "parent": null 1059 | }, 1060 | { 1061 | "id": "babbage-search-query", 1062 | "object": "model", 1063 | "created": 1651172509, 1064 | "owned_by": "openai-dev", 1065 | "permission": [ 1066 | { 1067 | "id": "modelperm-wSs1hMXDKsrcErlbN8HmzlLE", 1068 | "object": "model_permission", 1069 | "created": 1669084981, 1070 | "allow_create_engine": false, 1071 | "allow_sampling": true, 1072 | "allow_logprobs": true, 1073 | "allow_search_indices": true, 1074 | "allow_view": true, 1075 | "allow_fine_tuning": false, 1076 | "organization": "*", 1077 | "group": null, 1078 | "is_blocking": false 1079 | } 1080 | ], 1081 | "root": "babbage-search-query", 1082 | "parent": null 1083 | }, 1084 | { 1085 | "id": "text-search-davinci-doc-001", 1086 | "object": "model", 1087 | "created": 1651172505, 1088 | "owned_by": "openai-dev", 1089 | "permission": [ 1090 | { 1091 | "id": "modelperm-qhSf1j2MJMujcu3t7cHnF1DN", 1092 | "object": "model_permission", 1093 | "created": 1669066353, 1094 | "allow_create_engine": false, 1095 | "allow_sampling": true, 1096 | "allow_logprobs": true, 1097 | "allow_search_indices": true, 1098 | "allow_view": true, 1099 | "allow_fine_tuning": false, 1100 | "organization": "*", 1101 | "group": null, 1102 | "is_blocking": false 1103 | } 1104 | ], 1105 | "root": "text-search-davinci-doc-001", 1106 | "parent": null 1107 | }, 1108 | { 1109 | "id": "text-search-babbage-query-001", 1110 | "object": "model", 1111 | "created": 1651172509, 1112 | "owned_by": "openai-dev", 1113 | "permission": [ 1114 | { 1115 | "id": "modelperm-Kg70kkFxD93QQqsVe4Zw8vjc", 1116 | "object": "model_permission", 1117 | "created": 1669084981, 1118 | "allow_create_engine": false, 1119 | "allow_sampling": true, 1120 | "allow_logprobs": true, 1121 | "allow_search_indices": true, 1122 | "allow_view": true, 1123 | "allow_fine_tuning": false, 1124 | "organization": "*", 1125 | "group": null, 1126 | "is_blocking": false 1127 | } 1128 | ], 1129 | "root": "text-search-babbage-query-001", 1130 | "parent": null 1131 | }, 1132 | { 1133 | "id": "curie-search-document", 1134 | "object": "model", 1135 | "created": 1651172508, 1136 | "owned_by": "openai-dev", 1137 | "permission": [ 1138 | { 1139 | "id": "modelperm-1xwmXNDpvKlQj3erOEVKZVjO", 1140 | "object": "model_permission", 1141 | "created": 1669066353, 1142 | "allow_create_engine": false, 1143 | "allow_sampling": true, 1144 | "allow_logprobs": true, 1145 | "allow_search_indices": true, 1146 | "allow_view": true, 1147 | "allow_fine_tuning": false, 1148 | "organization": "*", 1149 | "group": null, 1150 | "is_blocking": false 1151 | } 1152 | ], 1153 | "root": "curie-search-document", 1154 | "parent": null 1155 | }, 1156 | { 1157 | "id": "text-similarity-davinci-001", 1158 | "object": "model", 1159 | "created": 1651172505, 1160 | "owned_by": "openai-dev", 1161 | "permission": [ 1162 | { 1163 | "id": "modelperm-OvmcfYoq5V9SF9xTYw1Oz6Ue", 1164 | "object": "model_permission", 1165 | "created": 1669066356, 1166 | "allow_create_engine": false, 1167 | "allow_sampling": true, 1168 | "allow_logprobs": true, 1169 | "allow_search_indices": true, 1170 | "allow_view": true, 1171 | "allow_fine_tuning": false, 1172 | "organization": "*", 1173 | "group": null, 1174 | "is_blocking": false 1175 | } 1176 | ], 1177 | "root": "text-similarity-davinci-001", 1178 | "parent": null 1179 | }, 1180 | { 1181 | "id": "audio-transcribe-001", 1182 | "object": "model", 1183 | "created": 1656447449, 1184 | "owned_by": "openai", 1185 | "permission": [ 1186 | { 1187 | "id": "modelperm-DEyvUa4t6g4mVL1AmmtB0SHO", 1188 | "object": "model_permission", 1189 | "created": 1669066355, 1190 | "allow_create_engine": false, 1191 | "allow_sampling": true, 1192 | "allow_logprobs": true, 1193 | "allow_search_indices": false, 1194 | "allow_view": true, 1195 | "allow_fine_tuning": false, 1196 | "organization": "*", 1197 | "group": null, 1198 | "is_blocking": false 1199 | } 1200 | ], 1201 | "root": "audio-transcribe-001", 1202 | "parent": null 1203 | }, 1204 | { 1205 | "id": "davinci-similarity", 1206 | "object": "model", 1207 | "created": 1651172509, 1208 | "owned_by": "openai-dev", 1209 | "permission": [ 1210 | { 1211 | "id": "modelperm-lYYgng3LM0Y97HvB5CDc8no2", 1212 | "object": "model_permission", 1213 | "created": 1669066353, 1214 | "allow_create_engine": false, 1215 | "allow_sampling": true, 1216 | "allow_logprobs": true, 1217 | "allow_search_indices": true, 1218 | "allow_view": true, 1219 | "allow_fine_tuning": false, 1220 | "organization": "*", 1221 | "group": null, 1222 | "is_blocking": false 1223 | } 1224 | ], 1225 | "root": "davinci-similarity", 1226 | "parent": null 1227 | }, 1228 | { 1229 | "id": "cushman:2020-05-03", 1230 | "object": "model", 1231 | "created": 1590625110, 1232 | "owned_by": "system", 1233 | "permission": [ 1234 | { 1235 | "id": "snapperm-FAup8P1KqclNlTsunLDRiesT", 1236 | "object": "model_permission", 1237 | "created": 1590625111, 1238 | "allow_create_engine": false, 1239 | "allow_sampling": true, 1240 | "allow_logprobs": true, 1241 | "allow_search_indices": false, 1242 | "allow_view": true, 1243 | "allow_fine_tuning": true, 1244 | "organization": "*", 1245 | "group": null, 1246 | "is_blocking": false 1247 | } 1248 | ], 1249 | "root": "cushman:2020-05-03", 1250 | "parent": null 1251 | }, 1252 | { 1253 | "id": "ada:2020-05-03", 1254 | "object": "model", 1255 | "created": 1607631625, 1256 | "owned_by": "system", 1257 | "permission": [ 1258 | { 1259 | "id": "snapperm-9TYofAqUs54vytKYL0IX91rX", 1260 | "object": "model_permission", 1261 | "created": 1607631626, 1262 | "allow_create_engine": false, 1263 | "allow_sampling": true, 1264 | "allow_logprobs": true, 1265 | "allow_search_indices": false, 1266 | "allow_view": true, 1267 | "allow_fine_tuning": false, 1268 | "organization": "*", 1269 | "group": null, 1270 | "is_blocking": false 1271 | } 1272 | ], 1273 | "root": "ada:2020-05-03", 1274 | "parent": null 1275 | }, 1276 | { 1277 | "id": "babbage:2020-05-03", 1278 | "object": "model", 1279 | "created": 1607632611, 1280 | "owned_by": "system", 1281 | "permission": [ 1282 | { 1283 | "id": "snapperm-jaLAcmyyNuaVmalCE1BGTGwf", 1284 | "object": "model_permission", 1285 | "created": 1607632613, 1286 | "allow_create_engine": false, 1287 | "allow_sampling": true, 1288 | "allow_logprobs": true, 1289 | "allow_search_indices": false, 1290 | "allow_view": true, 1291 | "allow_fine_tuning": false, 1292 | "organization": "*", 1293 | "group": null, 1294 | "is_blocking": false 1295 | } 1296 | ], 1297 | "root": "babbage:2020-05-03", 1298 | "parent": null 1299 | }, 1300 | { 1301 | "id": "curie:2020-05-03", 1302 | "object": "model", 1303 | "created": 1607632725, 1304 | "owned_by": "system", 1305 | "permission": [ 1306 | { 1307 | "id": "snapperm-bt6R8PWbB2SwK5evFo0ZxSs4", 1308 | "object": "model_permission", 1309 | "created": 1607632727, 1310 | "allow_create_engine": false, 1311 | "allow_sampling": true, 1312 | "allow_logprobs": true, 1313 | "allow_search_indices": false, 1314 | "allow_view": true, 1315 | "allow_fine_tuning": false, 1316 | "organization": "*", 1317 | "group": null, 1318 | "is_blocking": false 1319 | } 1320 | ], 1321 | "root": "curie:2020-05-03", 1322 | "parent": null 1323 | }, 1324 | { 1325 | "id": "davinci:2020-05-03", 1326 | "object": "model", 1327 | "created": 1607640163, 1328 | "owned_by": "system", 1329 | "permission": [ 1330 | { 1331 | "id": "snapperm-99cbfQTYDVeLkTYndX3UMpSr", 1332 | "object": "model_permission", 1333 | "created": 1607640164, 1334 | "allow_create_engine": false, 1335 | "allow_sampling": true, 1336 | "allow_logprobs": true, 1337 | "allow_search_indices": false, 1338 | "allow_view": true, 1339 | "allow_fine_tuning": false, 1340 | "organization": "*", 1341 | "group": null, 1342 | "is_blocking": false 1343 | } 1344 | ], 1345 | "root": "davinci:2020-05-03", 1346 | "parent": null 1347 | }, 1348 | { 1349 | "id": "if-davinci-v2", 1350 | "object": "model", 1351 | "created": 1610745990, 1352 | "owned_by": "openai", 1353 | "permission": [ 1354 | { 1355 | "id": "snapperm-58q0TdK2K4kMgL3MoHvGWMlH", 1356 | "object": "model_permission", 1357 | "created": 1610746036, 1358 | "allow_create_engine": false, 1359 | "allow_sampling": true, 1360 | "allow_logprobs": true, 1361 | "allow_search_indices": false, 1362 | "allow_view": true, 1363 | "allow_fine_tuning": false, 1364 | "organization": "*", 1365 | "group": null, 1366 | "is_blocking": false 1367 | } 1368 | ], 1369 | "root": "if-davinci-v2", 1370 | "parent": null 1371 | }, 1372 | { 1373 | "id": "if-curie-v2", 1374 | "object": "model", 1375 | "created": 1610745968, 1376 | "owned_by": "openai", 1377 | "permission": [ 1378 | { 1379 | "id": "snapperm-fwAseHVq6NGe6Ple6tKfzRSK", 1380 | "object": "model_permission", 1381 | "created": 1610746043, 1382 | "allow_create_engine": false, 1383 | "allow_sampling": true, 1384 | "allow_logprobs": true, 1385 | "allow_search_indices": false, 1386 | "allow_view": true, 1387 | "allow_fine_tuning": false, 1388 | "organization": "*", 1389 | "group": null, 1390 | "is_blocking": false 1391 | } 1392 | ], 1393 | "root": "if-curie-v2", 1394 | "parent": null 1395 | }, 1396 | { 1397 | "id": "if-davinci:3.0.0", 1398 | "object": "model", 1399 | "created": 1629420755, 1400 | "owned_by": "openai", 1401 | "permission": [ 1402 | { 1403 | "id": "snapperm-T53lssiyMWwiuJwhyO9ic53z", 1404 | "object": "model_permission", 1405 | "created": 1629421809, 1406 | "allow_create_engine": false, 1407 | "allow_sampling": true, 1408 | "allow_logprobs": true, 1409 | "allow_search_indices": false, 1410 | "allow_view": true, 1411 | "allow_fine_tuning": true, 1412 | "organization": "*", 1413 | "group": null, 1414 | "is_blocking": false 1415 | } 1416 | ], 1417 | "root": "if-davinci:3.0.0", 1418 | "parent": null 1419 | }, 1420 | { 1421 | "id": "davinci-if:3.0.0", 1422 | "object": "model", 1423 | "created": 1629498070, 1424 | "owned_by": "openai", 1425 | "permission": [ 1426 | { 1427 | "id": "snapperm-s6ZIAVMwlZwrLGGClTXqSK3Q", 1428 | "object": "model_permission", 1429 | "created": 1629498084, 1430 | "allow_create_engine": false, 1431 | "allow_sampling": true, 1432 | "allow_logprobs": true, 1433 | "allow_search_indices": false, 1434 | "allow_view": true, 1435 | "allow_fine_tuning": true, 1436 | "organization": "*", 1437 | "group": null, 1438 | "is_blocking": false 1439 | } 1440 | ], 1441 | "root": "davinci-if:3.0.0", 1442 | "parent": null 1443 | }, 1444 | { 1445 | "id": "davinci-instruct-beta:2.0.0", 1446 | "object": "model", 1447 | "created": 1629501914, 1448 | "owned_by": "openai", 1449 | "permission": [ 1450 | { 1451 | "id": "snapperm-c70U4TBfiOD839xptP5pJzyc", 1452 | "object": "model_permission", 1453 | "created": 1629501939, 1454 | "allow_create_engine": false, 1455 | "allow_sampling": true, 1456 | "allow_logprobs": true, 1457 | "allow_search_indices": false, 1458 | "allow_view": true, 1459 | "allow_fine_tuning": true, 1460 | "organization": "*", 1461 | "group": null, 1462 | "is_blocking": false 1463 | } 1464 | ], 1465 | "root": "davinci-instruct-beta:2.0.0", 1466 | "parent": null 1467 | }, 1468 | { 1469 | "id": "text-ada:001", 1470 | "object": "model", 1471 | "created": 1641949608, 1472 | "owned_by": "system", 1473 | "permission": [ 1474 | { 1475 | "id": "snapperm-d2PSnwFG1Yn9of6PvrrhkBcU", 1476 | "object": "model_permission", 1477 | "created": 1641949610, 1478 | "allow_create_engine": false, 1479 | "allow_sampling": true, 1480 | "allow_logprobs": true, 1481 | "allow_search_indices": false, 1482 | "allow_view": true, 1483 | "allow_fine_tuning": false, 1484 | "organization": "*", 1485 | "group": null, 1486 | "is_blocking": false 1487 | } 1488 | ], 1489 | "root": "text-ada:001", 1490 | "parent": null 1491 | }, 1492 | { 1493 | "id": "text-davinci:001", 1494 | "object": "model", 1495 | "created": 1641943966, 1496 | "owned_by": "system", 1497 | "permission": [ 1498 | { 1499 | "id": "snapperm-Fj1O3zkKXOQy6AkcfQXRKcWA", 1500 | "object": "model_permission", 1501 | "created": 1641944340, 1502 | "allow_create_engine": false, 1503 | "allow_sampling": true, 1504 | "allow_logprobs": true, 1505 | "allow_search_indices": false, 1506 | "allow_view": true, 1507 | "allow_fine_tuning": false, 1508 | "organization": "*", 1509 | "group": null, 1510 | "is_blocking": false 1511 | } 1512 | ], 1513 | "root": "text-davinci:001", 1514 | "parent": null 1515 | }, 1516 | { 1517 | "id": "text-curie:001", 1518 | "object": "model", 1519 | "created": 1641955047, 1520 | "owned_by": "system", 1521 | "permission": [ 1522 | { 1523 | "id": "snapperm-BI9TAT6SCj43JRsUb9CYadsz", 1524 | "object": "model_permission", 1525 | "created": 1641955123, 1526 | "allow_create_engine": false, 1527 | "allow_sampling": true, 1528 | "allow_logprobs": true, 1529 | "allow_search_indices": false, 1530 | "allow_view": true, 1531 | "allow_fine_tuning": false, 1532 | "organization": "*", 1533 | "group": null, 1534 | "is_blocking": false 1535 | } 1536 | ], 1537 | "root": "text-curie:001", 1538 | "parent": null 1539 | }, 1540 | { 1541 | "id": "text-babbage:001", 1542 | "object": "model", 1543 | "created": 1642018370, 1544 | "owned_by": "openai", 1545 | "permission": [ 1546 | { 1547 | "id": "snapperm-7oP3WFr9x7qf5xb3eZrVABAH", 1548 | "object": "model_permission", 1549 | "created": 1642018480, 1550 | "allow_create_engine": false, 1551 | "allow_sampling": true, 1552 | "allow_logprobs": true, 1553 | "allow_search_indices": false, 1554 | "allow_view": true, 1555 | "allow_fine_tuning": false, 1556 | "organization": "*", 1557 | "group": null, 1558 | "is_blocking": false 1559 | } 1560 | ], 1561 | "root": "text-babbage:001", 1562 | "parent": null 1563 | } 1564 | ] 1565 | } 1566 | --------------------------------------------------------------------------------