├── test
├── test_helper.exs
├── testdata
│ ├── audio.wav
│ └── duck.png
├── audio_tts_test.exs
├── audio_transcription_test.exs
├── models_test.exs
├── ex_openai
│ ├── codegen_test.exs
│ └── codegen
│ │ ├── parse_type_test.exs
│ │ ├── type_spec_test.exs
│ │ ├── convert_response_test.exs
│ │ ├── finalize_schema_test.exs
│ │ └── parse_path_test.exs
├── config_test.exs
├── completion_test.exs
├── client_test.exs
├── pass_api_credentials_test.exs
├── assistant_test.exs
├── image_test.exs
├── jason_encoder_test.exs
├── chat_test.exs
├── fixture
│ └── vcr_cassettes
│ │ ├── list_models_custom_key.json
│ │ ├── list_models_custom_key_env.json
│ │ ├── completion_basic_prompt.json
│ │ ├── audio_transcription.json
│ │ ├── chat_basic_completion.json
│ │ ├── image_variation.json
│ │ ├── image_variation_tuple.json
│ │ ├── responses_basic_usage.json
│ │ ├── responses_basic_usage_second_message.json
│ │ └── math_assistant.json
├── responses_test.exs
└── streaming_client_test.exs
├── .tool-versions
├── images
├── autocomplete.png
├── diagnostics.png
└── functiondocs.png
├── .formatter.exs
├── .editorconfig
├── config
└── config.exs
├── .github
└── workflows
│ └── test.yml
├── lib
├── mix
│ └── tasks
│ │ └── update_openai_docs.ex
├── ex_openai
│ ├── Jason.ex
│ ├── config.ex
│ ├── components
│ │ └── model.ex
│ ├── client.ex
│ └── streaming_client.ex
└── ex_openai.ex
├── .gitignore
├── LICENSE
├── mix.exs
├── README.md
├── docs
├── configuration.md
├── streaming.md
├── examples.md
└── codegen.md
└── mix.lock
/test/test_helper.exs:
--------------------------------------------------------------------------------
1 | ExUnit.start()
2 |
--------------------------------------------------------------------------------
/.tool-versions:
--------------------------------------------------------------------------------
1 | elixir 1.16.1
2 | erlang 26.2
3 |
--------------------------------------------------------------------------------
/images/autocomplete.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dvcrn/ex_openai/HEAD/images/autocomplete.png
--------------------------------------------------------------------------------
/images/diagnostics.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dvcrn/ex_openai/HEAD/images/diagnostics.png
--------------------------------------------------------------------------------
/images/functiondocs.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dvcrn/ex_openai/HEAD/images/functiondocs.png
--------------------------------------------------------------------------------
/test/testdata/audio.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dvcrn/ex_openai/HEAD/test/testdata/audio.wav
--------------------------------------------------------------------------------
/test/testdata/duck.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dvcrn/ex_openai/HEAD/test/testdata/duck.png
--------------------------------------------------------------------------------
/.formatter.exs:
--------------------------------------------------------------------------------
1 | # Used by "mix format"
2 | [
3 | inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"]
4 | ]
5 |
--------------------------------------------------------------------------------
/.editorconfig:
--------------------------------------------------------------------------------
1 | root = true
2 |
3 | [*]
4 | indent_style = tab
5 | indent_size = 2
6 | end_of_line = lf
7 | charset = utf-8
8 | trim_trailing_whitespace = true
9 | insert_final_newline = true
10 |
--------------------------------------------------------------------------------
/config/config.exs:
--------------------------------------------------------------------------------
1 | import Config
2 |
3 | if config_env() == :test do
4 | config :exvcr,
5 | filter_request_headers: [
6 | "OpenAI-Organization",
7 | "Openai-Organization",
8 | "openai-organization",
9 | "Authorization",
10 | "Set-Cookie"
11 | ]
12 | end
13 |
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | on: [push, pull_request]
2 |
3 | jobs:
4 | test:
5 | runs-on: ubuntu-latest
6 | name: Run test
7 | steps:
8 | - uses: actions/checkout@v3
9 | - uses: erlef/setup-beam@v1
10 | with:
11 | version-file: ".tool-versions"
12 | version-type: "strict"
13 | - run: mix deps.get
14 | - run: mix test
15 |
--------------------------------------------------------------------------------
/lib/mix/tasks/update_openai_docs.ex:
--------------------------------------------------------------------------------
1 | defmodule Mix.Tasks.UpdateOpenaiDocs do
2 | @moduledoc """
3 | Updates OpenAI API documentation files
4 | """
5 |
6 | use Mix.Task
7 |
8 | @impl Mix.Task
9 |
10 | @target_dir "lib/ex_openai/docs"
11 | def run(_) do
12 | File.mkdir_p!(@target_dir)
13 | docs_url = "https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml"
14 | System.cmd("curl", ["-L", docs_url, "-o", "#{@target_dir}/docs.yaml"])
15 | end
16 | end
17 |
--------------------------------------------------------------------------------
/test/audio_tts_test.exs:
--------------------------------------------------------------------------------
1 | defmodule ExOpenAI.TextToSpeechTest do
2 | use ExUnit.Case, async: true
3 | use ExVCR.Mock, adapter: ExVCR.Adapter.Hackney
4 |
5 | setup do
6 | ExVCR.Config.cassette_library_dir("./test/fixture/vcr_cassettes")
7 | :ok
8 | end
9 |
10 | test "audio text-to-speech" do
11 | use_cassette "audio_text_to_speech" do
12 | {:ok, res} =
13 | ExOpenAI.Audio.create_speech("Hello, hello, hello, just a test.", :"tts-1-hd", :shimmer)
14 |
15 | assert res != nil
16 | assert byte_size(res) == 37920
17 | end
18 | end
19 | end
20 |
--------------------------------------------------------------------------------
/test/audio_transcription_test.exs:
--------------------------------------------------------------------------------
1 | defmodule ExOpenAI.AudioTest do
2 | use ExUnit.Case, async: true
3 | use ExVCR.Mock, adapter: ExVCR.Adapter.Hackney
4 |
5 | setup do
6 | ExVCR.Config.cassette_library_dir("./test/fixture/vcr_cassettes")
7 | :ok
8 | end
9 |
10 | test "audio transcription" do
11 | use_cassette "audio_transcription" do
12 | audio = File.read!("#{__DIR__}/testdata/audio.wav")
13 |
14 | {:ok, res} = ExOpenAI.Audio.create_transcription({"audio.wav", audio}, "whisper-1")
15 |
16 | assert res.text == "Hello, hello, hello, just a test."
17 | end
18 | end
19 | end
20 |
--------------------------------------------------------------------------------
/test/models_test.exs:
--------------------------------------------------------------------------------
1 | defmodule ExOpenAI.ModelsTest do
2 | use ExUnit.Case, async: true
3 | use ExVCR.Mock, adapter: ExVCR.Adapter.Hackney
4 |
5 | setup do
6 | ExVCR.Config.cassette_library_dir("./test/fixture/vcr_cassettes")
7 | :ok
8 | end
9 |
10 | # list models is one of those endpoints that returns a bunch of stuff that's not included
11 | # in the official openapi docs, causing unknown atoms to be created
12 | test "list models" do
13 | use_cassette "list_models" do
14 | {:ok, res} = ExOpenAI.Models.list_models()
15 | assert Enum.count(res.data) == 69
16 | end
17 | end
18 | end
19 |
--------------------------------------------------------------------------------
/test/ex_openai/codegen_test.exs:
--------------------------------------------------------------------------------
1 | defmodule ExOpenAI.CodegenTest do
2 | use ExUnit.Case, async: true
3 |
4 | @moduledoc """
5 | This file serves as an entry point for the codegen tests.
6 | The actual tests have been split into separate files for better organization:
7 |
8 | - type_spec_test.exs - Tests for type_to_spec functionality
9 | - component_schema_test.exs - Tests for parse_component_schema functionality
10 | - parse_type_test.exs - Tests for parse_type functionality
11 | - parse_path_test.exs - Tests for parse_path functionality
12 |
13 | See the individual test files in the codegen/ directory for details.
14 | """
15 | end
16 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # The directory Mix will write compiled artifacts to.
2 | /_build/
3 |
4 | # If you run "mix test --cover", coverage assets end up here.
5 | /cover/
6 |
7 | # The directory Mix downloads your dependencies sources to.
8 | /deps/
9 |
10 | # Where third-party dependencies like ExDoc output generated docs.
11 | /doc/
12 |
13 | # Ignore .fetch files in case you like to edit your project deps locally.
14 | /.fetch
15 |
16 | # If the VM crashes, it generates a dump, let's ignore it too.
17 | erl_crash.dump
18 |
19 | # Also ignore archive artifacts (built via "mix archive.build").
20 | *.ez
21 |
22 | # Ignore package tarball (built via "mix hex.build").
23 | openai-*.tar
24 |
25 |
26 | # Temporary files for e.g. tests
27 | /tmp
28 |
29 | .aider*
30 |
--------------------------------------------------------------------------------
/lib/ex_openai/Jason.ex:
--------------------------------------------------------------------------------
1 | defmodule ExOpenAI.Jason do
2 | @moduledoc false
3 |
4 | # Module to package all the protocol stuff for Jason away
5 |
6 | defmacro __using__(_opts) do
7 | quote do
8 | defimpl Jason.Encoder, for: [__MODULE__] do
9 | # remove nil fields
10 | def encode(struct, opts) when is_struct(struct) do
11 | to_encode =
12 | for {key, value} <- Map.to_list(struct),
13 | value != nil,
14 | key != :__struct__,
15 | do: {key, value}
16 |
17 | Jason.Encode.keyword(to_encode, opts)
18 | end
19 |
20 | # fallback
21 | def encode(atom, opts) do
22 | Jason.Encode.encode(atom, opts)
23 | end
24 | end
25 | end
26 | end
27 | end
28 |
--------------------------------------------------------------------------------
/test/config_test.exs:
--------------------------------------------------------------------------------
1 | defmodule ExOpenAI.ConfigTest do
2 | use ExUnit.Case
3 | alias ExOpenAI.Config
4 |
5 | @application :ex_openai
6 |
7 | setup_all do
8 | reset_env()
9 | on_exit(&reset_env/0)
10 | end
11 |
12 | test "http_options/0 should return value or default" do
13 | assert Config.http_options() == []
14 |
15 | Application.put_env(@application, :http_options, recv_timeout: 30_000)
16 | assert Config.http_options() == [recv_timeout: 30_000]
17 | end
18 |
19 | test "base_url/1 should return value or default" do
20 | assert Config.api_url() == "https://api.openai.com/v1"
21 |
22 | Application.put_env(@application, :base_url, "https://example.com/foobar")
23 | assert Config.api_url() == "https://example.com/foobar"
24 | end
25 |
26 | defp reset_env() do
27 | Application.get_all_env(@application)
28 | |> Keyword.keys()
29 | |> Enum.each(&Application.delete_env(@application, &1))
30 | end
31 | end
32 |
--------------------------------------------------------------------------------
/test/completion_test.exs:
--------------------------------------------------------------------------------
1 | defmodule ExOpenAI.CompletionTest do
2 | use ExUnit.Case, async: true
3 | use ExVCR.Mock, adapter: ExVCR.Adapter.Hackney
4 |
5 | setup do
6 | ExVCR.Config.cassette_library_dir("./test/fixture/vcr_cassettes")
7 | :ok
8 | end
9 |
10 | test "basic completion" do
11 | use_cassette "completion_basic_prompt" do
12 | {:ok, res} =
13 | ExOpenAI.Completions.create_completion(
14 | "text-davinci-003",
15 | prompt: "The apple is",
16 | temperature: 0.28,
17 | max_tokens: 100
18 | )
19 |
20 | assert Enum.count(res.choices) == 1
21 |
22 | assert List.first(res.choices) == %{
23 | finish_reason: "stop",
24 | index: 0,
25 | logprobs: nil,
26 | text:
27 | " red\n\nThe apple is indeed red. Apples can come in a variety of colors, including red, green, yellow, and even pink."
28 | }
29 | end
30 | end
31 | end
32 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy of
4 | this software and associated documentation files (the "Software"), to deal in
5 | the Software without restriction, including without limitation the rights to
6 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
7 | the Software, and to permit persons to whom the Software is furnished to do so,
8 | subject to the following conditions:
9 |
10 | The above copyright notice and this permission notice shall be included in all
11 | copies or substantial portions of the Software.
12 |
13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
15 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
16 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
17 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------------
/test/client_test.exs:
--------------------------------------------------------------------------------
1 | defmodule ExOpenAI.ClientTest do
2 | use ExUnit.Case
3 | alias ExOpenAI.Client
4 | alias ExOpenAI.Config
5 |
6 | describe "api_url/1" do
7 | test "returns default URL when no override is provided" do
8 | assert Config.api_url() == "https://api.openai.com/v1"
9 | end
10 |
11 | test "returns overridden URL when provided" do
12 | override_url = "https://custom-api.example.com/v1"
13 | assert Config.api_url(override_url) == override_url
14 | end
15 | end
16 |
17 | describe "add_base_url/2" do
18 | test "adds base URL" do
19 | url = "/chat/completions"
20 | base_url = "https://api.openai.com/v1"
21 | assert Client.add_base_url(url, base_url) == "https://api.openai.com/v1/chat/completions"
22 | end
23 |
24 | test "uses custom base URL when provided" do
25 | url = "/chat/completions"
26 | base_url = "https://custom-api.example.com/v1"
27 |
28 | assert Client.add_base_url(url, base_url) ==
29 | "https://custom-api.example.com/v1/chat/completions"
30 | end
31 | end
32 | end
33 |
--------------------------------------------------------------------------------
/test/pass_api_credentials_test.exs:
--------------------------------------------------------------------------------
1 | defmodule ExOpenAI.PassApiKeyCredentialsTest do
2 | use ExUnit.Case, async: true
3 | use ExVCR.Mock, adapter: ExVCR.Adapter.Hackney
4 |
5 | setup do
6 | ExVCR.Config.cassette_library_dir("./test/fixture/vcr_cassettes")
7 | :ok
8 | end
9 |
10 | # list models is one of those endpoints that returns a bunch of stuff that's not included
11 | # in the official openapi docs, causing unknown atoms to be created
12 | test "list models with custom api key" do
13 | use_cassette "list_models_custom_key" do
14 | {:error, res} =
15 | ExOpenAI.Models.list_models(openai_api_key: "abc", openai_organization_key: "def")
16 |
17 | assert res["error"]["code"] == "invalid_api_key"
18 | end
19 | end
20 |
21 | test "list models with env variable api key" do
22 | use_cassette "list_models_custom_key_env" do
23 | Application.put_env(:ex_openai, :api_key, "abc_from_env")
24 | Application.put_env(:ex_openai, :organization_key, "def_from_envxxxxx")
25 |
26 | {:error, res} = ExOpenAI.Models.list_models()
27 | assert res["error"]["code"] == "invalid_api_key"
28 | end
29 | end
30 | end
31 |
--------------------------------------------------------------------------------
/test/assistant_test.exs:
--------------------------------------------------------------------------------
1 | defmodule ExOpenAI.AssistantTest do
2 | use ExUnit.Case, async: true
3 | use ExVCR.Mock, adapter: ExVCR.Adapter.Hackney
4 |
5 | setup do
6 | ExVCR.Config.cassette_library_dir("./test/fixture/vcr_cassettes")
7 | :ok
8 | end
9 |
10 | test "non-streaming assistant" do
11 | use_cassette "math_assistant" do
12 | {:ok, assistant} =
13 | ExOpenAI.Assistants.create_assistant(:"gpt-4o",
14 | name: "Math Teacher",
15 | instruction:
16 | "You are a personal math tutor. Write and run code to answer math questions.",
17 | tools: [%{type: "code_interpreter"}]
18 | )
19 |
20 | {:ok, thread} = ExOpenAI.Threads.create_thread()
21 |
22 | {:ok, _msg} =
23 | ExOpenAI.Threads.create_message(
24 | thread.id,
25 | "I need to solve the equation `3x + 11 = 14`. Can you help me?",
26 | "user"
27 | )
28 |
29 | {:ok, _run} =
30 | ExOpenAI.Threads.create_run(
31 | thread.id,
32 | assistant.id
33 | )
34 |
35 | # sleep for 5 seconds to generate the cassette
36 | # :timer.sleep(5000)
37 |
38 | {:ok, messages} = ExOpenAI.Threads.list_messages(thread.id)
39 | assert Enum.count(messages.data) == 2
40 | end
41 | end
42 | end
43 |
--------------------------------------------------------------------------------
/test/image_test.exs:
--------------------------------------------------------------------------------
1 | defmodule ExOpenAI.ImageTest do
2 | use ExUnit.Case, async: true
3 | use ExVCR.Mock, adapter: ExVCR.Adapter.Hackney
4 |
5 | setup do
6 | ExVCR.Config.cassette_library_dir("./test/fixture/vcr_cassettes")
7 | :ok
8 | end
9 |
10 | test "image variation" do
11 | use_cassette "image_variation" do
12 | duck = File.read!("#{__DIR__}/testdata/duck.png")
13 |
14 | {:ok, res} = ExOpenAI.Images.create_image_variation(duck)
15 |
16 | assert Enum.count(res.data) == 1
17 | assert List.first(res.data) |> Map.get(:url) |> is_binary()
18 | end
19 | end
20 |
21 | test "image variation with parameters" do
22 | use_cassette "image_variation_b64" do
23 | duck = File.read!("#{__DIR__}/testdata/duck.png")
24 |
25 | {:ok, res} = ExOpenAI.Images.create_image_variation(duck, response_format: "b64_json")
26 |
27 | assert Enum.count(res.data) == 1
28 | assert List.first(res.data) |> Map.get(:b64_json) |> is_binary()
29 | end
30 | end
31 |
32 | test "image variation with filename tuple" do
33 | use_cassette "image_variation_tuple" do
34 | duck = File.read!("#{__DIR__}/testdata/duck.png")
35 |
36 | {:ok, res} = ExOpenAI.Images.create_image_variation({"duck.png", duck})
37 |
38 | assert Enum.count(res.data) == 1
39 | assert List.first(res.data) |> Map.get(:url) |> is_binary()
40 | end
41 | end
42 | end
43 |
--------------------------------------------------------------------------------
/test/jason_encoder_test.exs:
--------------------------------------------------------------------------------
1 | defmodule ExOpenAI.JasonEncoderTest do
2 | use ExUnit.Case, async: true
3 |
4 | defp encode_and_return(val) do
5 | {:ok, res} = Jason.encode(val)
6 | res
7 | end
8 |
9 | # list models is one of those endpoints that returns a bunch of stuff that's not included
10 | # in the official openapi docs, causing unknown atoms to be created
11 | test "atoms as strings" do
12 | assert encode_and_return(:foo) == "\"foo\""
13 | assert encode_and_return([:one, :two, "three"]) == "[\"one\",\"two\",\"three\"]"
14 | end
15 |
16 | test "normal map" do
17 | assert encode_and_return(%{:foo => "bar", "bar" => :foo}) ==
18 | "{\"foo\":\"bar\",\"bar\":\"foo\"}"
19 | end
20 |
21 | test "list of structs" do
22 | msgs = [
23 | %ExOpenAI.Components.ChatCompletionRequestUserMessage{
24 | role: :user,
25 | content: "Hello!"
26 | },
27 | %ExOpenAI.Components.ChatCompletionRequestAssistantMessage{
28 | role: :assistant,
29 | content: "What's up?"
30 | },
31 | %ExOpenAI.Components.ChatCompletionRequestUserMessage{
32 | role: :user,
33 | content: "What ist the color of the sky?"
34 | }
35 | ]
36 |
37 | assert encode_and_return(msgs) ==
38 | "[{\"role\":\"user\",\"content\":\"Hello!\"},{\"role\":\"assistant\",\"content\":\"What's up?\"},{\"role\":\"user\",\"content\":\"What ist the color of the sky?\"}]"
39 | end
40 | end
41 |
--------------------------------------------------------------------------------
/test/chat_test.exs:
--------------------------------------------------------------------------------
1 | defmodule ExOpenAI.ChatTest do
2 | use ExUnit.Case, async: true
3 | use ExVCR.Mock, adapter: ExVCR.Adapter.Hackney
4 |
5 | setup do
6 | ExVCR.Config.cassette_library_dir("./test/fixture/vcr_cassettes")
7 | :ok
8 | end
9 |
10 | test "chat completion" do
11 | use_cassette "chat_basic_completion" do
12 | msgs = [
13 | %ExOpenAI.Components.ChatCompletionRequestUserMessage{
14 | role: :user,
15 | content: "Hello!"
16 | },
17 | %ExOpenAI.Components.ChatCompletionRequestAssistantMessage{
18 | role: :assistant,
19 | content: "What's up?"
20 | },
21 | %ExOpenAI.Components.ChatCompletionRequestUserMessage{
22 | role: :user,
23 | content: "What ist the color of the sky?"
24 | }
25 | ]
26 |
27 | {:ok, res} =
28 | ExOpenAI.Chat.create_chat_completion(msgs, "gpt-3.5-turbo",
29 | logit_bias: %{
30 | "8043" => -100
31 | }
32 | )
33 |
34 | assert Enum.count(res.choices) == 1
35 |
36 | assert List.first(res.choices) == %{
37 | finish_reason: "stop",
38 | index: 0,
39 | message: %{
40 | content:
41 | "The color of the sky is usually blue, but it can also be gray, pink, orange, red, or purple depending on the time of day and weather conditions.",
42 | role: "assistant"
43 | }
44 | }
45 | end
46 | end
47 | end
48 |
--------------------------------------------------------------------------------
/test/fixture/vcr_cassettes/list_models_custom_key.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "request": {
4 | "body": "",
5 | "headers": {
6 | "Authorization": "Bearer abc",
7 | "OpenAI-Organization": "def",
8 | "Content-type": "application/json"
9 | },
10 | "method": "get",
11 | "options": {
12 | "recv_timeout": 50000
13 | },
14 | "request_body": "",
15 | "url": "https://api.openai.com/v1/models?"
16 | },
17 | "response": {
18 | "binary": false,
19 | "body": "{\n \"error\": {\n \"message\": \"Incorrect API key provided: abc. You can find your API key at https://platform.openai.com/account/api-keys.\",\n \"type\": \"invalid_request_error\",\n \"param\": null,\n \"code\": \"invalid_api_key\"\n }\n}\n",
20 | "headers": {
21 | "Date": "Thu, 20 Apr 2023 06:33:37 GMT",
22 | "Content-Type": "application/json",
23 | "Content-Length": "233",
24 | "Connection": "keep-alive",
25 | "www-authenticate": "Basic realm=\"OpenAI API\"",
26 | "access-control-allow-origin": "*",
27 | "openai-version": "2020-10-01",
28 | "x-request-id": "210c44ab7c99729d735c2b5a8ddb52cf",
29 | "openai-processing-ms": "8",
30 | "strict-transport-security": "max-age=15724800; includeSubDomains",
31 | "CF-Cache-Status": "DYNAMIC",
32 | "Server": "cloudflare",
33 | "CF-RAY": "7bab5b556a07a80d-SYD",
34 | "alt-svc": "h3=\":443\"; ma=86400, h3-29=\":443\"; ma=86400"
35 | },
36 | "status_code": 401,
37 | "type": "ok"
38 | }
39 | }
40 | ]
--------------------------------------------------------------------------------
/test/fixture/vcr_cassettes/list_models_custom_key_env.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "request": {
4 | "body": "",
5 | "headers": {
6 | "Authorization": "Bearer abc_from_env",
7 | "OpenAI-Organization": "def_from_env",
8 | "Content-type": "application/json"
9 | },
10 | "method": "get",
11 | "options": {
12 | "recv_timeout": 50000
13 | },
14 | "request_body": "",
15 | "url": "https://api.openai.com/v1/models?"
16 | },
17 | "response": {
18 | "binary": false,
19 | "body": "{\n \"error\": {\n \"message\": \"Incorrect API key provided: abc_from_env. You can find your API key at https://platform.openai.com/account/api-keys.\",\n \"type\": \"invalid_request_error\",\n \"param\": null,\n \"code\": \"invalid_api_key\"\n }\n}\n",
20 | "headers": {
21 | "Date": "Thu, 20 Apr 2023 06:37:59 GMT",
22 | "Content-Type": "application/json",
23 | "Content-Length": "242",
24 | "Connection": "keep-alive",
25 | "www-authenticate": "Basic realm=\"OpenAI API\"",
26 | "access-control-allow-origin": "*",
27 | "openai-version": "2020-10-01",
28 | "x-request-id": "08253eb80b3c97e00e0d468a5fb7af73",
29 | "openai-processing-ms": "10",
30 | "strict-transport-security": "max-age=15724800; includeSubDomains",
31 | "CF-Cache-Status": "DYNAMIC",
32 | "Server": "cloudflare",
33 | "CF-RAY": "7bab61bd2b2fa94d-SYD",
34 | "alt-svc": "h3=\":443\"; ma=86400, h3-29=\":443\"; ma=86400"
35 | },
36 | "status_code": 401,
37 | "type": "ok"
38 | }
39 | }
40 | ]
--------------------------------------------------------------------------------
/test/fixture/vcr_cassettes/completion_basic_prompt.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "request": {
4 | "body": "{\"max_tokens\":100,\"model\":\"text-davinci-003\",\"prompt\":\"The apple is\",\"temperature\":0.28}",
5 | "headers": {
6 | "OpenAI-Organization": "***",
7 | "Authorization": "***",
8 | "Content-type": "application/json"
9 | },
10 | "method": "post",
11 | "options": {
12 | "recv_timeout": 50000
13 | },
14 | "request_body": "",
15 | "url": "https://api.openai.com/v1/completions?"
16 | },
17 | "response": {
18 | "binary": false,
19 | "body": "{\"id\":\"cmpl-6qKbPImRqiq1Yd8mxtQ89sR9leGgO\",\"object\":\"text_completion\",\"created\":1677930271,\"model\":\"text-davinci-003\",\"choices\":[{\"text\":\" red\\n\\nThe apple is indeed red. Apples can come in a variety of colors, including red, green, yellow, and even pink.\",\"index\":0,\"logprobs\":null,\"finish_reason\":\"stop\"}],\"usage\":{\"prompt_tokens\":3,\"completion_tokens\":30,\"total_tokens\":33}}\n",
20 | "headers": {
21 | "Date": "Sat, 04 Mar 2023 11:44:32 GMT",
22 | "Content-Type": "application/json",
23 | "Content-Length": "378",
24 | "Connection": "keep-alive",
25 | "Access-Control-Allow-Origin": "*",
26 | "Cache-Control": "no-cache, must-revalidate",
27 | "Openai-Model": "text-davinci-003",
28 | "Openai-Organization": "***",
29 | "Openai-Processing-Ms": "1801",
30 | "Openai-Version": "2020-10-01",
31 | "Strict-Transport-Security": "max-age=15724800; includeSubDomains",
32 | "X-Request-Id": "089e43bfb6ca66508353f588d09bfee9"
33 | },
34 | "status_code": 200,
35 | "type": "ok"
36 | }
37 | }
38 | ]
--------------------------------------------------------------------------------
/test/fixture/vcr_cassettes/audio_transcription.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "request": {
4 | "body": "{:multipart, [{\"file\", <<82, 73, 70, 70, 198, 167, 3, 0, 87, 65, 86, 69, 102, 109, 116, 32, 16, 0, 0, 0, 1, 0, 1, 0, 68, 172, 0, 0, 136, 88, 1, 0, 2, 0, 16, 0, 76, 73, 83, 84, 26, 0, 0, 0, 73, ...>>, {\"form-data\", [name: \"file\", filename: \"audio.wav\"]}, []}, {\"model\", \"whisper-1\"}]}",
5 | "headers": {
6 | "Authorization": "***",
7 | "OpenAI-Organization": "***",
8 | "Content-type": "multipart/form-data"
9 | },
10 | "method": "post",
11 | "options": {
12 | "recv_timeout": 50000
13 | },
14 | "request_body": "",
15 | "url": "https://api.openai.com/v1/audio/transcriptions?"
16 | },
17 | "response": {
18 | "binary": false,
19 | "body": "{\"text\":\"Hello, hello, hello, just a test.\"}",
20 | "headers": {
21 | "Date": "Mon, 21 Aug 2023 02:25:34 GMT",
22 | "Content-Type": "application/json",
23 | "Content-Length": "44",
24 | "Connection": "keep-alive",
25 | "openai-organization": "mercari-8",
26 | "openai-processing-ms": "453",
27 | "openai-version": "2020-10-01",
28 | "strict-transport-security": "max-age=15724800; includeSubDomains",
29 | "x-ratelimit-limit-requests": "50",
30 | "x-ratelimit-remaining-requests": "49",
31 | "x-ratelimit-reset-requests": "1.2s",
32 | "x-request-id": "d97b4d3981e260a6618ca8ccc398c335",
33 | "CF-Cache-Status": "DYNAMIC",
34 | "Server": "cloudflare",
35 | "CF-RAY": "7f9f6d058fe53e75-ADL",
36 | "alt-svc": "h3=\":443\"; ma=86400"
37 | },
38 | "status_code": 200,
39 | "type": "ok"
40 | }
41 | }
42 | ]
--------------------------------------------------------------------------------
/test/responses_test.exs:
--------------------------------------------------------------------------------
1 | defmodule ExOpenAI.ResponsesTest do
2 | use ExUnit.Case, async: true
3 | use ExVCR.Mock, adapter: ExVCR.Adapter.Hackney
4 |
5 | setup do
6 | ExVCR.Config.cassette_library_dir("./test/fixture/vcr_cassettes")
7 | :ok
8 | end
9 |
10 | test "responses completion" do
11 | recorder =
12 | ExVCR.Mock.start_cassette("responses_basic_usage",
13 | match_requests_on: [:query, :request_body]
14 | )
15 |
16 | {:ok, res} = ExOpenAI.Responses.create_response("tell me a joke", "gpt-4o-mini")
17 |
18 | assert res.model == "gpt-4o-mini-2024-07-18"
19 | assert res.object == "response"
20 | assert res.status == "completed"
21 |
22 | output = List.first(res.output)
23 | assert output.type == "message"
24 | assert output.role == "assistant"
25 |
26 | {:ok, get_res} = ExOpenAI.Responses.get_response(res.id)
27 |
28 | # should be the same because we haven't done anything new yet
29 | assert get_res.id == res.id
30 | ExVCR.Mock.stop_cassette(recorder)
31 |
32 | r =
33 | ExVCR.Mock.start_cassette("responses_basic_usage_second_message",
34 | match_requests_on: [:query, :request_body]
35 | )
36 |
37 | # responses API is stateful so it will have context of the previus message
38 |
39 | {:ok, another_one} =
40 | ExOpenAI.Responses.create_response(
41 | "Please tell me what I asked you to do in my previous message ok??",
42 | "gpt-4o-mini",
43 | previous_response_id: res.id
44 | )
45 |
46 | assert another_one.id != get_res.id
47 |
48 | {:ok, get_res} = ExOpenAI.Responses.get_response(another_one.id)
49 |
50 | first = List.first(get_res.output)
51 |
52 | assert List.first(first.content).text ==
53 | "You asked me to tell you a joke. Would you like to hear another one?"
54 |
55 | ExVCR.Mock.stop_cassette(r)
56 | end
57 | end
58 |
--------------------------------------------------------------------------------
/mix.exs:
--------------------------------------------------------------------------------
1 | defmodule ExOpenAI.MixProject do
2 | use Mix.Project
3 |
4 | def project do
5 | [
6 | app: :ex_openai,
7 | version: "1.8.0",
8 | elixir: "~> 1.16",
9 | description: description(),
10 | start_permanent: Mix.env() == :prod,
11 | deps: deps(),
12 | package: package(),
13 | name: "ex_openai.ex",
14 | source_url: "https://github.com/dvcrn/ex_openai",
15 | preferred_cli_env: [
16 | vcr: :test,
17 | "vcr.delete": :test,
18 | "vcr.check": :test,
19 | "vcr.show": :test
20 | ],
21 | dialyzer: [plt_add_apps: [:mix]]
22 | ]
23 | end
24 |
25 | # Run "mix help compile.app" to learn about applications.
26 | def application do
27 | [
28 | mod: {ExOpenAI, []},
29 | extra_applications: [:hackney, :httpoison, :jason, :logger, :yaml_elixir]
30 | ]
31 | end
32 |
33 | defp description do
34 | """
35 | Auto-generated Elixir SDK for OpenAI APIs with proper typespec and @docs support
36 | """
37 | end
38 |
39 | defp package do
40 | [
41 | licenses: ["MIT"],
42 | exclude_patterns: ["./config/*"],
43 | links: %{
44 | "GitHub" => "https://github.com/dvcrn/ex_openai"
45 | },
46 | maintainers: [
47 | "dvcrn"
48 | ]
49 | ]
50 | end
51 |
52 | # Run "mix help deps" to learn about dependencies.
53 | defp deps do
54 | [
55 | {:jason, "~> 1.4"},
56 | {:mock, "~> 0.3.8", only: :test},
57 | {:httpoison, "~> 2.2.1"},
58 | {:mix_test_watch, "~> 1.2", only: :test},
59 | {:ex_doc, ">= 0.34.1", only: :dev},
60 | {:exvcr, "~> 0.16.0", only: :test},
61 | {:exjsx, "~> 4.0", only: :test},
62 | {:yaml_elixir, "~> 2.11"},
63 | {:dialyxir, "~> 1.4", only: [:dev], runtime: false},
64 | {:credo, "~> 1.7", only: [:dev, :test], runtime: false},
65 | {:mime, "~> 2.0"}
66 | ]
67 | end
68 | end
69 |
--------------------------------------------------------------------------------
/test/fixture/vcr_cassettes/chat_basic_completion.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "request": {
4 | "body": "{\"logit_bias\":{\"8043\":-100},\"messages\":[{\"content\":\"Hello!\",\"role\":\"user\"},{\"content\":\"What's up?\",\"role\":\"assistant\"},{\"content\":\"What ist the color of the sky?\",\"role\":\"user\"}],\"model\":\"gpt-3.5-turbo\"}",
5 | "headers": {
6 | "OpenAI-Organization": "***",
7 | "Authorization": "***",
8 | "Content-type": "application/json"
9 | },
10 | "method": "post",
11 | "options": {
12 | "recv_timeout": 50000
13 | },
14 | "request_body": "",
15 | "url": "https://api.openai.com/v1/chat/completions?"
16 | },
17 | "response": {
18 | "binary": false,
19 | "body": "{\"id\":\"chatcmpl-6qL69buDX0CwYGRuCljwLqaRaCay4\",\"object\":\"chat.completion\",\"created\":1677932177,\"model\":\"gpt-3.5-turbo-0301\",\"usage\":{\"prompt_tokens\":31,\"completion_tokens\":36,\"total_tokens\":67},\"choices\":[{\"message\":{\"role\":\"assistant\",\"content\":\"The color of the sky is usually blue, but it can also be gray, pink, orange, red, or purple depending on the time of day and weather conditions.\"},\"finish_reason\":\"stop\",\"index\":0}]}\n",
20 | "headers": {
21 | "Date": "Sat, 04 Mar 2023 12:16:18 GMT",
22 | "Content-Type": "application/json",
23 | "Content-Length": "430",
24 | "Connection": "keep-alive",
25 | "Access-Control-Allow-Origin": "*",
26 | "Cache-Control": "no-cache, must-revalidate",
27 | "Openai-Model": "gpt-3.5-turbo-0301",
28 | "Openai-Organization": "***",
29 | "Openai-Processing-Ms": "1133",
30 | "Openai-Version": "2020-10-01",
31 | "Strict-Transport-Security": "max-age=15724800; includeSubDomains",
32 | "X-Request-Id": "572bbb54175371ad431644f172f7a235"
33 | },
34 | "status_code": 200,
35 | "type": "ok"
36 | }
37 | }
38 | ]
--------------------------------------------------------------------------------
/test/fixture/vcr_cassettes/image_variation.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "request": {
4 | "body": "{:multipart, [{\"file\", <<137, 80, 78, 71, 13, 10, 26, 10, 0, 0, 0, 13, 73, 72, 68, 82, 0, 0, 1, 99, 0, 0, 1, 99, 8, 6, 0, 0, 0, 232, 194, 106, 225, 0, 0, 0, 1, 115, 82, 71, 66, 0, 174, 206, 28, ...>>, {\"form-data\", [name: \"image\", filename: \"image.png\"]}, []}]}",
5 | "headers": {
6 | "Content-type": "multipart/form-data",
7 | "OpenAI-Organization": "***",
8 | "Authorization": "***"
9 | },
10 | "method": "post",
11 | "options": {
12 | "recv_timeout": 50000
13 | },
14 | "request_body": "",
15 | "url": "https://api.openai.com/v1/images/variations?"
16 | },
17 | "response": {
18 | "binary": false,
19 | "body": "{\n \"created\": 1678510397,\n \"data\": [\n {\n \"url\": \"https://oaidalleapiprodscus.blob.core.windows.net/private/org-PEX6yimM3dFikgcBrrK0hy4E/user-WlYtzNyXDDhbIAlOM2IQ8hPu/img-pYzWJVMcQfFceClqEZNpNSjd.png?st=2023-03-11T03%3A53%3A17Z&se=2023-03-11T05%3A53%3A17Z&sp=r&sv=2021-08-06&sr=b&rscd=inline&rsct=image/png&skoid=6aaadede-4fb3-4698-a8f6-684d7786b067&sktid=a48cca56-e6da-484e-a814-9c849652bcb3&skt=2023-03-10T19%3A42%3A40Z&ske=2023-03-11T19%3A42%3A40Z&sks=b&skv=2021-08-06&sig=9Rj1R14o2RAKA1y%2BZ9I/zrExm3SB%2B6IxfQTIYZmDfjo%3D\"\n }\n ]\n}\n",
20 | "headers": {
21 | "Date": "Sat, 11 Mar 2023 04:53:17 GMT",
22 | "Content-Type": "application/json",
23 | "Content-Length": "549",
24 | "Connection": "keep-alive",
25 | "OpenAI-Version": "2020-10-01",
26 | "OpenAI-Organization": "***",
27 | "X-Request-ID": "635c3ecc6389636af09bc0a8c37cbdb1",
28 | "OpenAI-Processing-Ms": "8914",
29 | "Access-Control-Allow-Origin": "*",
30 | "Strict-Transport-Security": "max-age=15724800; includeSubDomains"
31 | },
32 | "status_code": 200,
33 | "type": "ok"
34 | }
35 | }
36 | ]
--------------------------------------------------------------------------------
/test/fixture/vcr_cassettes/image_variation_tuple.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "request": {
4 | "body": "{:multipart, [{\"file\", <<137, 80, 78, 71, 13, 10, 26, 10, 0, 0, 0, 13, 73, 72, 68, 82, 0, 0, 1, 99, 0, 0, 1, 99, 8, 6, 0, 0, 0, 232, 194, 106, 225, 0, 0, 0, 1, 115, 82, 71, 66, 0, 174, 206, 28, ...>>, {\"form-data\", [name: \"image\", filename: \"duck.png\"]}, []}]}",
5 | "headers": {
6 | "Authorization": "***",
7 | "OpenAI-Organization": "***",
8 | "Content-type": "multipart/form-data"
9 | },
10 | "method": "post",
11 | "options": {
12 | "recv_timeout": 50000
13 | },
14 | "request_body": "",
15 | "url": "https://api.openai.com/v1/images/variations?"
16 | },
17 | "response": {
18 | "binary": false,
19 | "body": "{\n \"created\": 1692584747,\n \"data\": [\n {\n \"url\": \"https://oaidalleapiprodscus.blob.core.windows.net/private/org-U1ISUPKU2jQVnQpqskZAg5IO/user-UJcL6KdEJLM3qBrLzUK8E8id/img-G64hI5wPcuNnZWoZ2lpEDoA5.png?st=2023-08-21T01%3A25%3A47Z&se=2023-08-21T03%3A25%3A47Z&sp=r&sv=2021-08-06&sr=b&rscd=inline&rsct=image/png&skoid=6aaadede-4fb3-4698-a8f6-684d7786b067&sktid=a48cca56-e6da-484e-a814-9c849652bcb3&skt=2023-08-21T00%3A37%3A37Z&ske=2023-08-22T00%3A37%3A37Z&sks=b&skv=2021-08-06&sig=RG/7SypjLyM33r0uNJPN%2BIP/I9KRap70A6QGPv9n4TA%3D\"\n }\n ]\n}\n",
20 | "headers": {
21 | "Date": "Mon, 21 Aug 2023 02:25:47 GMT",
22 | "Content-Type": "application/json",
23 | "Content-Length": "547",
24 | "Connection": "keep-alive",
25 | "openai-version": "2020-10-01",
26 | "openai-organization": "mercari-8",
27 | "x-request-id": "5016db2e1e32d345e7144ddbfe931686",
28 | "openai-processing-ms": "9832",
29 | "access-control-allow-origin": "*",
30 | "strict-transport-security": "max-age=15724800; includeSubDomains",
31 | "CF-Cache-Status": "DYNAMIC",
32 | "Server": "cloudflare",
33 | "CF-RAY": "7f9f6d232f713e75-ADL",
34 | "alt-svc": "h3=\":443\"; ma=86400"
35 | },
36 | "status_code": 200,
37 | "type": "ok"
38 | }
39 | }
40 | ]
--------------------------------------------------------------------------------
/lib/ex_openai/config.ex:
--------------------------------------------------------------------------------
1 | defmodule ExOpenAI.Config do
2 | @moduledoc """
3 | Reads configuration on application start, parses all environment variables (if any)
4 | and caches the final config in memory to avoid parsing on each read afterwards.
5 | """
6 |
7 | use GenServer
8 |
9 | @openai_url "https://api.openai.com/v1"
10 |
11 | @config_keys [
12 | :api_key,
13 | :organization_key,
14 | :http_options,
15 | :http_headers,
16 | :http_client,
17 | :base_url
18 | ]
19 |
20 | def start_link(opts), do: GenServer.start_link(__MODULE__, opts, name: __MODULE__)
21 |
22 | @impl true
23 | def init(_opts) do
24 | config =
25 | @config_keys
26 | |> Enum.map(fn key -> {key, get_config_value(key)} end)
27 | |> Map.new()
28 |
29 | {:ok, config}
30 | end
31 |
32 | # API Key
33 | def api_key, do: get_config_value(:api_key)
34 | def org_key, do: get_config_value(:organization_key)
35 |
36 | def api_url(override \\ nil) do
37 | override || get_config_value(:base_url, @openai_url)
38 | end
39 |
40 | # HTTP Options
41 | def http_options, do: get_config_value(:http_options, [])
42 |
43 | def http_headers, do: get_config_value(:http_headers, [])
44 |
45 | # HTTP client can be customized to facilitate testing
46 | def http_client, do: get_config_value(:http_client, ExOpenAI.Client)
47 |
48 | defp get_config_value(key, default \\ nil) do
49 | value =
50 | :ex_openai
51 | |> Application.get_env(key)
52 | |> parse_config_value()
53 |
54 | if is_nil(value), do: default, else: value
55 | end
56 |
57 | defp parse_config_value({:system, env_name}), do: fetch_env!(env_name)
58 |
59 | defp parse_config_value({:system, :integer, env_name}) do
60 | env_name
61 | |> fetch_env!()
62 | |> String.to_integer()
63 | end
64 |
65 | defp parse_config_value(value), do: value
66 |
67 | # System.fetch_env!/1 support for older versions of Elixir
68 | defp fetch_env!(env_name) do
69 | case System.get_env(env_name) do
70 | nil ->
71 | raise ArgumentError,
72 | message: "could not fetch environment variable \"#{env_name}\" because it is not set"
73 |
74 | value ->
75 | value
76 | end
77 | end
78 |
79 | def get(key), do: GenServer.call(__MODULE__, {:get, key})
80 |
81 | @impl true
82 | def handle_call({:get, key}, _from, state) do
83 | {:reply, Map.get(state, key), state}
84 | end
85 |
86 | @impl true
87 | def handle_call({:put, key, value}, _from, state) do
88 | {:reply, value, Map.put(state, key, value)}
89 | end
90 | end
91 |
--------------------------------------------------------------------------------
/lib/ex_openai/components/model.ex:
--------------------------------------------------------------------------------
1 | defmodule ExOpenAI.Components.Model do
2 | @moduledoc """
3 | Replacement Component for Model API responses
4 | This module was not part of the api documentation and got probably forgotten, so it has been manually provided by this package
5 | Rpresents API responses such as:
6 | ```
7 | created: 1649880484,
8 | id: "text-davinci-insert-002",
9 | object: "model",
10 | owned_by: "openai",
11 | parent: nil,
12 | permission: [
13 | %{
14 | allow_create_engine: false,
15 | allow_fine_tuning: false,
16 | allow_logprobs: true,
17 | allow_sampling: true,
18 | allow_search_indices: false,
19 | allow_view: true,
20 | created: 1669066354,
21 | group: nil,
22 | id: "modelperm-V5YQoSyiapAf4km5wisXkNXh",
23 | is_blocking: false,
24 | object: "model_permission",
25 | organization: "*"
26 | }
27 | ],
28 | root: "text-davinci-insert-002"
29 | ```
30 | """
31 |
32 | use ExOpenAI.Jason
33 | defstruct [:created, :id, :object, :owned_by, :parent, :permission, :root]
34 |
35 | @typespec quote(
36 | do: %{
37 | created: integer,
38 | id: String.t(),
39 | object: String.t(),
40 | owned_by: String.t(),
41 | parent: String.t(),
42 | root: String.t(),
43 | permission: [
44 | %{
45 | allow_create_engine: boolean(),
46 | allow_fine_tuning: boolean(),
47 | allow_logprobs: boolean(),
48 | allow_sampling: boolean(),
49 | allow_search_indices: boolean(),
50 | allow_view: boolean(),
51 | created: integer,
52 | group: String.t(),
53 | id: String.t(),
54 | is_blocking: boolean(),
55 | object: String.t(),
56 | organization: String.t()
57 | }
58 | ]
59 | }
60 | )
61 |
62 | use ExOpenAI.Codegen.AstUnpacker
63 |
64 | @type t :: %{
65 | created: integer,
66 | id: String.t(),
67 | object: String.t(),
68 | owned_by: String.t(),
69 | parent: String.t(),
70 | root: String.t(),
71 | permission: [
72 | %{
73 | allow_create_engine: boolean(),
74 | allow_fine_tuning: boolean(),
75 | allow_logprobs: boolean(),
76 | allow_sampling: boolean(),
77 | allow_search_indices: boolean(),
78 | allow_view: boolean(),
79 | created: integer,
80 | group: String.t(),
81 | id: String.t(),
82 | is_blocking: boolean(),
83 | object: String.t(),
84 | organization: String.t()
85 | }
86 | ]
87 | }
88 | end
89 |
--------------------------------------------------------------------------------
/test/ex_openai/codegen/parse_type_test.exs:
--------------------------------------------------------------------------------
1 | defmodule ExOpenAI.Codegen.ParseTypeTest do
2 | use ExUnit.Case, async: true
3 |
4 | describe "parse_type" do
5 | test "simple type" do
6 | assert ExOpenAI.Codegen.parse_type(%{
7 | "type" => "string"
8 | }) == "string"
9 | end
10 |
11 | test "object" do
12 | assert ExOpenAI.Codegen.parse_type(%{
13 | "type" => "object",
14 | "properties" => %{
15 | "foo" => %{
16 | "type" => "number"
17 | }
18 | }
19 | }) == {:object, %{"foo" => "number"}}
20 | end
21 |
22 | test "component in object" do
23 | assert ExOpenAI.Codegen.parse_type(%{
24 | "type" => "object",
25 | "properties" => %{
26 | "foo" => %{
27 | "$ref" => "#/components/schemas/SomeComponent"
28 | }
29 | }
30 | }) == {:object, %{"foo" => {:component, "SomeComponent"}}}
31 | end
32 |
33 | test "object in object" do
34 | assert ExOpenAI.Codegen.parse_type(%{
35 | "type" => "object",
36 | "properties" => %{
37 | "foo" => %{
38 | "type" => "object",
39 | "properties" => %{
40 | "foo" => %{
41 | "type" => "integer"
42 | }
43 | }
44 | }
45 | }
46 | }) ==
47 | {:object,
48 | %{
49 | "foo" =>
50 | {:object,
51 | %{
52 | "foo" => "integer"
53 | }}
54 | }}
55 | end
56 |
57 | test "enum" do
58 | assert ExOpenAI.Codegen.parse_type(%{
59 | "type" => "string",
60 | "enum" => ["system", "user", "assistant"]
61 | }) == {:enum, [:system, :user, :assistant]}
62 | end
63 |
64 | test "oneOf" do
65 | assert ExOpenAI.Codegen.parse_type(%{
66 | "default" => "auto",
67 | "description" =>
68 | "The number of epochs to train the model for. An epoch refers to one\nfull cycle through the training dataset.\n",
69 | "oneOf" => [
70 | %{"enum" => ["auto"], "type" => "string"},
71 | %{"maximum" => 50, "minimum" => 1, "type" => "integer"}
72 | ]
73 | }) == {:oneOf, [{:enum, [:auto]}, "integer"]}
74 | end
75 |
76 | test "array" do
77 | assert ExOpenAI.Codegen.parse_type(%{
78 | "type" => "array",
79 | "items" => %{
80 | "type" => "integer"
81 | }
82 | }) == {:array, "integer"}
83 | end
84 |
85 | test "component in array" do
86 | assert ExOpenAI.Codegen.parse_type(%{
87 | "type" => "array",
88 | "items" => %{
89 | "$ref" => "#/components/schemas/SomeComponent"
90 | }
91 | }) == {:array, {:component, "SomeComponent"}}
92 | end
93 |
94 | test "array in array" do
95 | assert ExOpenAI.Codegen.parse_type(%{
96 | "type" => "array",
97 | "items" => %{
98 | "type" => "array",
99 | "items" => %{
100 | "type" => "integer"
101 | }
102 | }
103 | }) == {:array, {:array, "integer"}}
104 | end
105 |
106 | test "object in array" do
107 | assert ExOpenAI.Codegen.parse_type(%{
108 | "type" => "array",
109 | "items" => %{
110 | "type" => "object",
111 | "properties" => %{
112 | "foo" => %{
113 | "type" => "integer"
114 | }
115 | }
116 | }
117 | }) == {:array, {:object, %{"foo" => "integer"}}}
118 | end
119 |
120 | test "array in object" do
121 | assert ExOpenAI.Codegen.parse_type(%{
122 | "type" => "object",
123 | "properties" => %{
124 | "foo" => %{
125 | "type" => "array",
126 | "items" => %{
127 | "type" => "string"
128 | }
129 | },
130 | "bar" => %{
131 | "type" => "number"
132 | }
133 | }
134 | }) == {:object, %{"foo" => {:array, "string"}, "bar" => "number"}}
135 | end
136 | end
137 | end
138 |
--------------------------------------------------------------------------------
/test/fixture/vcr_cassettes/responses_basic_usage.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "request": {
4 | "options": [],
5 | "body": "{\"input\":\"tell me a joke\",\"model\":\"gpt-4o-mini\"}",
6 | "url": "https://api.openai.com/v1/responses?",
7 | "headers": {
8 | "Authorization": "***",
9 | "Content-type": "application/json"
10 | },
11 | "method": "post",
12 | "request_body": ""
13 | },
14 | "response": {
15 | "binary": false,
16 | "type": "ok",
17 | "body": "{\n \"id\": \"resp_67d2a82e377c81929d2d0a90d7be62680d257b2afabb8523\",\n \"object\": \"response\",\n \"created_at\": 1741858862,\n \"status\": \"completed\",\n \"error\": null,\n \"incomplete_details\": null,\n \"instructions\": null,\n \"max_output_tokens\": null,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"output\": [\n {\n \"type\": \"message\",\n \"id\": \"msg_67d2a82ea20881929a8fb281c1a95da40d257b2afabb8523\",\n \"status\": \"completed\",\n \"role\": \"assistant\",\n \"content\": [\n {\n \"type\": \"output_text\",\n \"text\": \"Why did the scarecrow win an award? \\n\\nBecause he was outstanding in his field!\",\n \"annotations\": []\n }\n ]\n }\n ],\n \"parallel_tool_calls\": true,\n \"previous_response_id\": null,\n \"reasoning\": {\n \"effort\": null,\n \"generate_summary\": null\n },\n \"store\": true,\n \"temperature\": 1.0,\n \"text\": {\n \"format\": {\n \"type\": \"text\"\n }\n },\n \"tool_choice\": \"auto\",\n \"tools\": [],\n \"top_p\": 1.0,\n \"truncation\": \"disabled\",\n \"usage\": {\n \"input_tokens\": 29,\n \"input_tokens_details\": {\n \"cached_tokens\": 0\n },\n \"output_tokens\": 19,\n \"output_tokens_details\": {\n \"reasoning_tokens\": 0\n },\n \"total_tokens\": 48\n },\n \"user\": null,\n \"metadata\": {}\n}",
18 | "headers": {
19 | "Date": "Thu, 13 Mar 2025 09:41:02 GMT",
20 | "Content-Type": "application/json",
21 | "Content-Length": "1240",
22 | "Connection": "keep-alive",
23 | "openai-version": "2020-10-01",
24 | "x-request-id": "req_1a29e9c9f1e0c1d4574b10156590edd3",
25 | "openai-processing-ms": "680",
26 | "strict-transport-security": "max-age=31536000; includeSubDomains; preload",
27 | "cf-cache-status": "DYNAMIC",
28 | "X-Content-Type-Options": "nosniff",
29 | "Server": "cloudflare",
30 | "CF-RAY": "91fa92c03837d74f-NRT",
31 | "alt-svc": "h3=\":443\"; ma=86400"
32 | },
33 | "status_code": 200
34 | }
35 | },
36 | {
37 | "request": {
38 | "options": [],
39 | "body": "",
40 | "url": "https://api.openai.com/v1/responses/resp_67d2a82e377c81929d2d0a90d7be62680d257b2afabb8523?",
41 | "headers": {
42 | "Authorization": "***",
43 | "Content-type": "application/json"
44 | },
45 | "method": "get",
46 | "request_body": ""
47 | },
48 | "response": {
49 | "binary": false,
50 | "type": "ok",
51 | "body": "{\n \"id\": \"resp_67d2a82e377c81929d2d0a90d7be62680d257b2afabb8523\",\n \"object\": \"response\",\n \"created_at\": 1741858862,\n \"status\": \"completed\",\n \"error\": null,\n \"incomplete_details\": null,\n \"instructions\": null,\n \"max_output_tokens\": null,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"output\": [\n {\n \"type\": \"message\",\n \"id\": \"msg_67d2a82ea20881929a8fb281c1a95da40d257b2afabb8523\",\n \"status\": \"completed\",\n \"role\": \"assistant\",\n \"content\": [\n {\n \"type\": \"output_text\",\n \"text\": \"Why did the scarecrow win an award? \\n\\nBecause he was outstanding in his field!\",\n \"annotations\": []\n }\n ]\n }\n ],\n \"parallel_tool_calls\": true,\n \"previous_response_id\": null,\n \"reasoning\": {\n \"effort\": null,\n \"generate_summary\": null\n },\n \"store\": true,\n \"temperature\": 1.0,\n \"text\": {\n \"format\": {\n \"type\": \"text\"\n }\n },\n \"tool_choice\": \"auto\",\n \"tools\": [],\n \"top_p\": 1.0,\n \"truncation\": \"disabled\",\n \"usage\": {\n \"input_tokens\": 29,\n \"input_tokens_details\": {\n \"cached_tokens\": 0\n },\n \"output_tokens\": 19,\n \"output_tokens_details\": {\n \"reasoning_tokens\": 0\n },\n \"total_tokens\": 48\n },\n \"user\": null,\n \"metadata\": {}\n}",
52 | "headers": {
53 | "Date": "Thu, 13 Mar 2025 09:41:03 GMT",
54 | "Content-Type": "application/json",
55 | "Content-Length": "1240",
56 | "Connection": "keep-alive",
57 | "openai-version": "2020-10-01",
58 | "x-request-id": "req_bd8bf1abb9306e67f25e219ef6a1ed07",
59 | "openai-processing-ms": "63",
60 | "strict-transport-security": "max-age=31536000; includeSubDomains; preload",
61 | "cf-cache-status": "DYNAMIC",
62 | "X-Content-Type-Options": "nosniff",
63 | "Server": "cloudflare",
64 | "CF-RAY": "91fa92c64db6d74f-NRT",
65 | "alt-svc": "h3=\":443\"; ma=86400"
66 | },
67 | "status_code": 200
68 | }
69 | }
70 | ]
--------------------------------------------------------------------------------
/test/fixture/vcr_cassettes/responses_basic_usage_second_message.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "request": {
4 | "options": [],
5 | "body": "{\"input\":\"Please tell me what I asked you to do in my previous message ok??\",\"model\":\"gpt-4o-mini\",\"previous_response_id\":\"resp_67d2a82e377c81929d2d0a90d7be62680d257b2afabb8523\"}",
6 | "url": "https://api.openai.com/v1/responses?",
7 | "headers": {
8 | "Authorization": "***",
9 | "Content-type": "application/json"
10 | },
11 | "request_body": "",
12 | "method": "post"
13 | },
14 | "response": {
15 | "binary": false,
16 | "type": "ok",
17 | "body": "{\n \"id\": \"resp_67d2afcfd41081929d3cbe871a7645580d257b2afabb8523\",\n \"object\": \"response\",\n \"created_at\": 1741860815,\n \"status\": \"completed\",\n \"error\": null,\n \"incomplete_details\": null,\n \"instructions\": null,\n \"max_output_tokens\": null,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"output\": [\n {\n \"type\": \"message\",\n \"id\": \"msg_67d2afd082b88192ae5b90ff477c7ec50d257b2afabb8523\",\n \"status\": \"completed\",\n \"role\": \"assistant\",\n \"content\": [\n {\n \"type\": \"output_text\",\n \"text\": \"You asked me to tell you a joke. Would you like to hear another one?\",\n \"annotations\": []\n }\n ]\n }\n ],\n \"parallel_tool_calls\": true,\n \"previous_response_id\": \"resp_67d2a82e377c81929d2d0a90d7be62680d257b2afabb8523\",\n \"reasoning\": {\n \"effort\": null,\n \"generate_summary\": null\n },\n \"store\": true,\n \"temperature\": 1.0,\n \"text\": {\n \"format\": {\n \"type\": \"text\"\n }\n },\n \"tool_choice\": \"auto\",\n \"tools\": [],\n \"top_p\": 1.0,\n \"truncation\": \"disabled\",\n \"usage\": {\n \"input_tokens\": 70,\n \"input_tokens_details\": {\n \"cached_tokens\": 0\n },\n \"output_tokens\": 18,\n \"output_tokens_details\": {\n \"reasoning_tokens\": 0\n },\n \"total_tokens\": 88\n },\n \"user\": null,\n \"metadata\": {}\n}",
18 | "headers": {
19 | "Date": "Thu, 13 Mar 2025 10:13:36 GMT",
20 | "Content-Type": "application/json",
21 | "Content-Length": "1279",
22 | "Connection": "keep-alive",
23 | "openai-version": "2020-10-01",
24 | "x-request-id": "req_ed9fab4e4998bd734b895f210346ee82",
25 | "openai-processing-ms": "1008",
26 | "strict-transport-security": "max-age=31536000; includeSubDomains; preload",
27 | "cf-cache-status": "DYNAMIC",
28 | "X-Content-Type-Options": "nosniff",
29 | "Server": "cloudflare",
30 | "CF-RAY": "91fac27248aad74f-NRT",
31 | "alt-svc": "h3=\":443\"; ma=86400"
32 | },
33 | "status_code": 200
34 | }
35 | },
36 | {
37 | "request": {
38 | "options": [],
39 | "body": "",
40 | "url": "https://api.openai.com/v1/responses/resp_67d2afcfd41081929d3cbe871a7645580d257b2afabb8523?",
41 | "headers": {
42 | "Authorization": "***",
43 | "Content-type": "application/json"
44 | },
45 | "request_body": "",
46 | "method": "get"
47 | },
48 | "response": {
49 | "binary": false,
50 | "type": "ok",
51 | "body": "{\n \"id\": \"resp_67d2afcfd41081929d3cbe871a7645580d257b2afabb8523\",\n \"object\": \"response\",\n \"created_at\": 1741860815,\n \"status\": \"completed\",\n \"error\": null,\n \"incomplete_details\": null,\n \"instructions\": null,\n \"max_output_tokens\": null,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"output\": [\n {\n \"type\": \"message\",\n \"id\": \"msg_67d2afd082b88192ae5b90ff477c7ec50d257b2afabb8523\",\n \"status\": \"completed\",\n \"role\": \"assistant\",\n \"content\": [\n {\n \"type\": \"output_text\",\n \"text\": \"You asked me to tell you a joke. Would you like to hear another one?\",\n \"annotations\": []\n }\n ]\n }\n ],\n \"parallel_tool_calls\": true,\n \"previous_response_id\": \"resp_67d2a82e377c81929d2d0a90d7be62680d257b2afabb8523\",\n \"reasoning\": {\n \"effort\": null,\n \"generate_summary\": null\n },\n \"store\": true,\n \"temperature\": 1.0,\n \"text\": {\n \"format\": {\n \"type\": \"text\"\n }\n },\n \"tool_choice\": \"auto\",\n \"tools\": [],\n \"top_p\": 1.0,\n \"truncation\": \"disabled\",\n \"usage\": {\n \"input_tokens\": 70,\n \"input_tokens_details\": {\n \"cached_tokens\": 0\n },\n \"output_tokens\": 18,\n \"output_tokens_details\": {\n \"reasoning_tokens\": 0\n },\n \"total_tokens\": 88\n },\n \"user\": null,\n \"metadata\": {}\n}",
52 | "headers": {
53 | "Date": "Thu, 13 Mar 2025 10:13:37 GMT",
54 | "Content-Type": "application/json",
55 | "Content-Length": "1279",
56 | "Connection": "keep-alive",
57 | "openai-version": "2020-10-01",
58 | "x-request-id": "req_0e8107c7776d5265f53266763b9d0e18",
59 | "openai-processing-ms": "66",
60 | "strict-transport-security": "max-age=31536000; includeSubDomains; preload",
61 | "cf-cache-status": "DYNAMIC",
62 | "X-Content-Type-Options": "nosniff",
63 | "Server": "cloudflare",
64 | "CF-RAY": "91fac279da48d74f-NRT",
65 | "alt-svc": "h3=\":443\"; ma=86400"
66 | },
67 | "status_code": 200
68 | }
69 | }
70 | ]
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Elixir SDK for OpenAI APIs
2 |
3 | [](https://hex.pm/packages/ex_openai)
4 | [](https://hexdocs.pm/ex_openai)
5 | [](https://hex.pm/packages/ex_openai)
6 |
7 | ExOpenAI is an (unofficial) Elixir SDK for interacting with the [OpenAI APIs](https://platform.openai.com/docs/api-reference/introduction). This SDK is fully auto-generated using [metaprogramming](https://elixirschool.com/en/lessons/advanced/metaprogramming/) and always reflects the latest state of the OpenAI API.
8 |
9 | ## Features
10 |
11 | - Complete implementation of all OpenAI API endpoints
12 | - Auto-generated with strict typing and documentation
13 | - Elixir-style API with required arguments as function parameters and optional arguments as keyword lists
14 | - Support for streaming responses with SSE
15 | - Editor features: autocompletion, typechecking, and inline documentation
16 | - Support with OpenAI-compatible APIs (like OpenRouter)
17 |
18 |
19 |
20 |
21 |
22 | ## Installation
23 |
24 | Add **_:ex_openai_** as a dependency in your mix.exs file:
25 |
26 | ```elixir
27 | def deps do
28 | [
29 | {:ex_openai, "~> 1.8.0"}
30 | ]
31 | end
32 | ```
33 |
34 | ## Quick Start
35 |
36 | ### Configuration
37 |
38 | ```elixir
39 | import Config
40 |
41 | config :ex_openai,
42 | api_key: System.get_env("OPENAI_API_KEY"),
43 | organization_key: System.get_env("OPENAI_ORGANIZATION_KEY"),
44 | # Optional settings
45 | base_url: System.get_env("OPENAI_API_URL"),
46 | http_options: [recv_timeout: 50_000],
47 | http_headers: [{"OpenAI-Beta", "assistants=v2"}]
48 | ```
49 |
50 | ### Basic Usage
51 |
52 | ```elixir
53 | # List available models
54 | {:ok, models} = ExOpenAI.Models.list_models()
55 |
56 | # Create a completion
57 | {:ok, completion} = ExOpenAI.Completions.create_completion("gpt-3.5-turbo-instruct", "The sky is")
58 |
59 | # Chat completion
60 | messages = [
61 | %ExOpenAI.Components.ChatCompletionRequestUserMessage{role: :user, content: "What is the capital of France?"}
62 | ]
63 | {:ok, response} = ExOpenAI.Chat.create_chat_completion(messages, "gpt-4")
64 |
65 | # Responses
66 | {:ok, response} = ExOpenAI.Responses.create_response(
67 | "Tell me a joke about programming",
68 | "gpt-4o-mini"
69 | )
70 |
71 | # Continue the conversation
72 | {:ok, follow_up} = ExOpenAI.Responses.create_response(
73 | "Explain why that joke is funny",
74 | "gpt-4o-mini",
75 | previous_response_id: response.id
76 | )
77 | ```
78 |
79 | More examples in [Examples](docs/examples.md)
80 |
81 | ## API Overview
82 |
83 | ExOpenAI supports all OpenAI API endpoints, organized into logical modules:
84 |
85 | - **Assistants** - Create and manage assistants
86 | - **Audio** - Speech, transcription, and translation
87 | - **Chat** - Chat completions API
88 | - **Completions** - Text completion API
89 | - **Embeddings** - Vector embeddings
90 | - **Files** - File management
91 | - **Images** - Image generation and editing
92 | - **Models** - Model management
93 | - **Responses** - Stateful conversation API
94 | - **Threads** - Thread-based conversations
95 | - **Vector Stores** - Vector database operations
96 |
97 | For detailed documentation on each module, see the [API Documentation](https://hexdocs.pm/ex_openai).
98 |
99 | ## Advanced Usage
100 |
101 | ### Streaming Responses
102 |
103 | ```elixir
104 | # Using a callback function
105 | callback = fn
106 | :finish -> IO.puts "Done"
107 | {:data, data} -> IO.puts "Data: #{inspect(data)}"
108 | {:error, err} -> IO.puts "Error: #{inspect(err)}"
109 | end
110 |
111 | ExOpenAI.Completions.create_completion(
112 | "gpt-3.5-turbo-instruct",
113 | "Tell me a story",
114 | stream: true,
115 | stream_to: callback
116 | )
117 | ```
118 |
119 | For more advanced streaming options, see the [Streaming Guide](docs/streaming.md).
120 |
121 | ### File Uploads
122 |
123 | ```elixir
124 | # Simple file upload
125 | image_data = File.read!("path/to/image.png")
126 | {:ok, result} = ExOpenAI.Images.create_image_variation(image_data)
127 |
128 | # With filename information
129 | audio_data = File.read!("path/to/audio.wav")
130 | {:ok, transcript} = ExOpenAI.Audio.create_transcription({"audio.wav", audio_data}, "whisper-1")
131 | ```
132 |
133 | ## Documentation
134 |
135 | - [Complete API Reference](https://hexdocs.pm/ex_openai)
136 | - [Explanation on codegen](docs/codegen.md)
137 | - [Streaming Guide](docs/streaming.md)
138 | - [Configuration Options](docs/configuration.md)
139 | - [Examples](docs/examples.md)
140 |
141 | ## Contributing
142 |
143 | Contributions are welcome! If you find a bug or want to add a feature, please open an issue or submit a PR.
144 |
145 | To update the SDK when OpenAI changes their API:
146 |
147 | ```bash
148 | mix update_openai_docs
149 | ```
150 |
151 | ## Projects Using ExOpenAI
152 |
153 | - [Elixir ChatGPT](https://github.com/dvcrn/elixir-chatgpt)
154 | - [FixMyJP](https://fixmyjp.d.sh)
155 | - [GPT Slack Bot](https://github.com/dvcrn/gpt-slack-bot)
156 |
157 | _Add yours with a PR!_
158 |
159 | ## License
160 |
161 | Available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT).
162 |
--------------------------------------------------------------------------------
/docs/configuration.md:
--------------------------------------------------------------------------------
1 | # Configuration Options
2 |
3 | ExOpenAI provides several configuration options to customize its behavior. This document explains all available configuration options and how to use them effectively.
4 |
5 | ## Basic Configuration
6 |
7 | The most basic configuration requires setting your OpenAI API key:
8 |
9 | ```elixir
10 | # In config/config.exs
11 | config :ex_openai,
12 | api_key: System.get_env("OPENAI_API_KEY")
13 | ```
14 |
15 | ## Complete Configuration Options
16 |
17 | Here's a complete list of all configuration options:
18 |
19 | ```elixir
20 | config :ex_openai,
21 | # Required: Your OpenAI API key
22 | api_key: System.get_env("OPENAI_API_KEY"),
23 |
24 | # Optional: Your OpenAI organization ID
25 | organization_key: System.get_env("OPENAI_ORGANIZATION_KEY"),
26 |
27 | # Optional: Base URL for API requests (default: https://api.openai.com/v1)
28 | base_url: System.get_env("OPENAI_API_URL"),
29 |
30 | # Optional: HTTP options passed to HTTPoison
31 | http_options: [recv_timeout: 50_000],
32 |
33 | # Optional: Default request headers
34 | http_headers: [
35 | {"OpenAI-Beta", "assistants=v2"}
36 | ],
37 |
38 | # Optional: HTTP client module (default: ExOpenAI.Client)
39 | http_client: ExOpenAI.Client
40 | ```
41 |
42 | ## Configuration Options Explained
43 |
44 | ### API Key
45 |
46 | The `api_key` is required for authenticating with the OpenAI API. You can find your API key in the [OpenAI dashboard](https://platform.openai.com/account/api-keys).
47 |
48 | ```elixir
49 | config :ex_openai,
50 | api_key: System.get_env("OPENAI_API_KEY")
51 | ```
52 |
53 | ### Organization Key
54 |
55 | If you belong to multiple organizations, you can specify which organization to use with the `organization_key`:
56 |
57 | ```elixir
58 | config :ex_openai,
59 | organization_key: System.get_env("OPENAI_ORGANIZATION_KEY")
60 | ```
61 |
62 | ### Base URL
63 |
64 | By default, ExOpenAI uses `https://api.openai.com/v1` as the base URL. You can override this if you're using a proxy or a different endpoint:
65 |
66 | ```elixir
67 | config :ex_openai,
68 | base_url: "https://your-proxy.example.com/v1"
69 | ```
70 |
71 | ### HTTP Options
72 |
73 | You can customize the HTTP client behavior by passing options to HTTPoison:
74 |
75 | ```elixir
76 | config :ex_openai,
77 | http_options: [
78 | recv_timeout: 50_000, # 50 seconds timeout
79 | ssl: [versions: [:"tlsv1.2"]],
80 | proxy: "http://proxy.example.com:8080"
81 | ]
82 | ```
83 |
84 | Common HTTP options include:
85 |
86 | - `recv_timeout`: Maximum time to wait for a response (milliseconds)
87 | - `timeout`: Connection timeout (milliseconds)
88 | - `ssl`: SSL options
89 | - `proxy`: Proxy server configuration
90 | - `hackney`: Options passed directly to hackney
91 |
92 | ### HTTP Headers
93 |
94 | You can set default headers for all requests:
95 |
96 | ```elixir
97 | config :ex_openai,
98 | http_headers: [
99 | {"OpenAI-Beta", "assistants=v2"},
100 | {"User-Agent", "MyApp/1.0"}
101 | ]
102 | ```
103 |
104 | ### HTTP Client
105 |
106 | For testing or custom HTTP handling, you can specify a different HTTP client module:
107 |
108 | ```elixir
109 | config :ex_openai,
110 | http_client: MyCustomClient
111 | ```
112 |
113 | The custom client must implement the same interface as `ExOpenAI.Client`.
114 |
115 | ## Per-Request Configuration
116 |
117 | You can override configuration options on a per-request basis by passing them as options to API calls:
118 |
119 | ```elixir
120 | ExOpenAI.Models.list_models(
121 | openai_api_key: "different-api-key",
122 | openai_organization_key: "different-org",
123 | base_url: "https://different-api-endpoint.com/v1"
124 | )
125 | ```
126 |
127 | This is useful for applications that need to switch between different API keys or organizations.
128 |
129 | ## Environment Variables
130 |
131 | ExOpenAI respects the following environment variables:
132 |
133 | - `OPENAI_API_KEY`: Your OpenAI API key
134 | - `OPENAI_ORGANIZATION_KEY`: Your OpenAI organization ID
135 | - `OPENAI_API_URL` or `OPENAI_API_BASE`: Base URL for API requests
136 |
137 | These can be set in your environment or through a `.env` file with a package like [dotenvy](https://hex.pm/packages/dotenvy).
138 |
139 | ## Testing Configuration
140 |
141 | For testing, you might want to use a mock client:
142 |
143 | ```elixir
144 | # In config/test.exs
145 | config :ex_openai,
146 | http_client: MyApp.MockOpenAIClient
147 | ```
148 |
149 | Then implement your mock client:
150 |
151 | ```elixir
152 | defmodule MyApp.MockOpenAIClient do
153 | def request(method, url, body, headers, options) do
154 | # Return mock responses based on the request
155 | case {method, url} do
156 | {:get, "/models"} ->
157 | {:ok, %{status_code: 200, body: ~s({"data": [{"id": "gpt-4", "object": "model"}]})}}
158 | _ ->
159 | {:error, %{reason: "Not implemented in mock"}}
160 | end
161 | end
162 | end
163 | ```
164 |
165 | ## Configuration Best Practices
166 |
167 | 1. **Use environment variables** for sensitive information like API keys
168 | 2. **Set reasonable timeouts** based on your application's needs
169 | 3. **Consider using different configurations** for development, testing, and production
170 | 4. **Use per-request overrides** sparingly, for special cases only
171 | 5. **Keep your API keys secure** and rotate them regularly
--------------------------------------------------------------------------------
/test/ex_openai/codegen/type_spec_test.exs:
--------------------------------------------------------------------------------
1 | defmodule ExOpenAI.Codegen.TypeSpecTest do
2 | use ExUnit.Case, async: true
3 |
4 | describe "type_to_spec" do
5 | test "basic types" do
6 | assert ExOpenAI.Codegen.type_to_spec("number") == {:float, [], []}
7 | assert ExOpenAI.Codegen.type_to_spec(:number) == {:float, [], []}
8 | assert ExOpenAI.Codegen.type_to_spec("integer") == {:integer, [], []}
9 | assert ExOpenAI.Codegen.type_to_spec(:integer) == {:integer, [], []}
10 | assert ExOpenAI.Codegen.type_to_spec("boolean") == {:boolean, [], []}
11 | assert ExOpenAI.Codegen.type_to_spec(:boolean) == {:boolean, [], []}
12 |
13 | assert ExOpenAI.Codegen.type_to_spec("pid") == {:pid, [], []}
14 | assert ExOpenAI.Codegen.type_to_spec(:pid) == {:pid, [], []}
15 |
16 | assert ExOpenAI.Codegen.type_to_spec("string") ==
17 | {{:., [], [{:__aliases__, [alias: false], [:String]}, :t]}, [], []}
18 |
19 | # bitstrings are either just bitstring, or a tuple of {filename, bitstring}
20 | assert ExOpenAI.Codegen.type_to_spec("bitstring") == {
21 | :|,
22 | [],
23 | [
24 | {:bitstring, [], []},
25 | {{{:., [], [{:__aliases__, [alias: false], [:String]}, :t]}, [], []},
26 | {:bitstring, [], []}}
27 | ]
28 | }
29 |
30 | assert ExOpenAI.Codegen.type_to_spec(:bitstring) == {
31 | :|,
32 | [],
33 | [
34 | {:bitstring, [], []},
35 | {{{:., [], [{:__aliases__, [alias: false], [:String]}, :t]}, [], []},
36 | {:bitstring, [], []}}
37 | ]
38 | }
39 | end
40 |
41 | test "enum" do
42 | assert ExOpenAI.Codegen.type_to_spec({:enum, [:hello, :world, :again]}) ==
43 | {:|, [], [:again, {:|, [], [:world, :hello]}]}
44 | end
45 |
46 | test "array" do
47 | assert ExOpenAI.Codegen.type_to_spec("array") == {:list, [], []}
48 | assert ExOpenAI.Codegen.type_to_spec({:array, "number"}) == [{:float, [], []}]
49 | end
50 |
51 | test "object" do
52 | assert ExOpenAI.Codegen.type_to_spec("object") == {:map, [], []}
53 |
54 | assert ExOpenAI.Codegen.type_to_spec({:object, %{"a" => "number"}}) ==
55 | {:%{}, [], [{:a, {:float, [], []}}]}
56 | end
57 |
58 | test "array in object" do
59 | assert ExOpenAI.Codegen.type_to_spec({:object, %{"a" => {:array, "integer"}}}) ==
60 | {:%{}, [], [{:a, [{:integer, [], []}]}]}
61 | end
62 |
63 | test "component" do
64 | assert ExOpenAI.Codegen.type_to_spec({:component, "Foobar"}) ==
65 | {{:., [], [{:__aliases__, [alias: false], [:ExOpenAI, :Components, :Foobar]}, :t]},
66 | [], []}
67 | end
68 |
69 | test "oneOf" do
70 | assert ExOpenAI.Codegen.type_to_spec({:oneOf, [{:enum, [:auto]}, "integer"]}) ==
71 | {:|, [], [{:integer, [], []}, :auto]}
72 | end
73 |
74 | test "anyOf" do
75 | assert ExOpenAI.Codegen.type_to_spec({:anyOf, [{:enum, [:auto]}, "integer"]}) ==
76 | {:|, [], [{:integer, [], []}, :auto]}
77 | end
78 |
79 | test "allOf" do
80 | assert ExOpenAI.Codegen.type_to_spec(
81 | {:allOf, [{:component, "AssistantsApiResponseFormatOption"}, "string"]}
82 | ) ==
83 | {:|, [],
84 | [
85 | {{:., [], [{:__aliases__, [alias: false], [:String]}, :t]}, [], []},
86 | {{:., [],
87 | [
88 | {:__aliases__, [alias: false],
89 | [:ExOpenAI, :Components, :AssistantsApiResponseFormatOption]},
90 | :t
91 | ]}, [], []}
92 | ]}
93 | end
94 |
95 | test "complex nesting" do
96 | sp =
97 | {:object,
98 | %{
99 | "a" =>
100 | {:object,
101 | %{
102 | "b" => {:array, "string"},
103 | "c" => {:array, {:component, "Foo"}}
104 | }}
105 | }}
106 |
107 | assert ExOpenAI.Codegen.type_to_spec(sp) ==
108 | {
109 | :%{},
110 | [],
111 | [
112 | a:
113 | {:%{}, [],
114 | [
115 | b: [{{:., [], [{:__aliases__, [alias: false], [:String]}, :t]}, [], []}],
116 | c: [
117 | {{:., [],
118 | [{:__aliases__, [alias: false], [:ExOpenAI, :Components, :Foo]}, :t]},
119 | [], []}
120 | ]
121 | ]}
122 | ]
123 | }
124 | end
125 | end
126 |
127 | test "string_to_component" do
128 | assert ExOpenAI.Codegen.string_to_component("Hello") == ExOpenAI.Components.Hello
129 | end
130 |
131 | test "keys_to_atoms" do
132 | assert ExOpenAI.Codegen.keys_to_atoms(%{
133 | "a" => 123,
134 | "b" => %{
135 | "c" => 23,
136 | "d" => 456
137 | }
138 | }) == %{
139 | a: 123,
140 | b: %{
141 | c: 23,
142 | d: 456
143 | }
144 | }
145 | end
146 | end
147 |
--------------------------------------------------------------------------------
/docs/streaming.md:
--------------------------------------------------------------------------------
1 | # Streaming Guide
2 |
3 | ExOpenAI supports streaming responses from OpenAI's API, which is particularly useful for chat and completion endpoints. This guide explains how to use streaming effectively in your applications.
4 |
5 | ## Streaming Options
6 |
7 | ExOpenAI provides two methods for handling streaming responses:
8 |
9 | 1. **Callback Function** - Pass a function that processes each chunk as it arrives
10 | 2. **Streaming Client** - Create a dedicated process to handle the stream
11 |
12 | ## Streaming with a Callback Function
13 |
14 | The simplest way to handle streaming is to pass a callback function to the `stream_to` parameter:
15 |
16 | ```elixir
17 | callback = fn
18 | :finish -> IO.puts "Stream finished"
19 | {:data, data} -> IO.puts "Received data: #{inspect(data)}"
20 | {:error, err} -> IO.puts "Error: #{inspect(err)}"
21 | end
22 |
23 | ExOpenAI.Completions.create_completion(
24 | "gpt-3.5-turbo-instruct",
25 | "Tell me a story about a robot",
26 | stream: true,
27 | stream_to: callback
28 | )
29 | ```
30 |
31 | The callback function will be called with:
32 | - `{:data, data}` for each chunk of data received
33 | - `{:error, error}` if an error occurs
34 | - `:finish` when the stream completes
35 |
36 | ## Streaming with a Dedicated Process
37 |
38 | For more complex applications, you can create a dedicated process to handle the stream:
39 |
40 | 1. First, create a module that implements the `ExOpenAI.StreamingClient` behaviour:
41 |
42 | ```elixir
43 | defmodule MyStreamingClient do
44 | use ExOpenAI.StreamingClient
45 |
46 | @impl true
47 | def handle_data(data, state) do
48 | IO.puts("Received data: #{inspect(data)}")
49 | # Process the data chunk
50 | {:noreply, state}
51 | end
52 |
53 | @impl true
54 | def handle_error(error, state) do
55 | IO.puts("Error: #{inspect(error)}")
56 | # Handle the error
57 | {:noreply, state}
58 | end
59 |
60 | @impl true
61 | def handle_finish(state) do
62 | IO.puts("Stream finished")
63 | # Clean up or finalize processing
64 | {:noreply, state}
65 | end
66 | end
67 | ```
68 |
69 | 2. Then, start the client and pass its PID to the API call:
70 |
71 | ```elixir
72 | {:ok, pid} = MyStreamingClient.start_link(initial_state)
73 |
74 | ExOpenAI.Chat.create_chat_completion(
75 | messages,
76 | "gpt-4",
77 | stream: true,
78 | stream_to: pid
79 | )
80 | ```
81 |
82 | ## Example: Building a Chat Interface
83 |
84 | Here's a more complete example of using streaming to build a simple chat interface:
85 |
86 | ```elixir
87 | defmodule ChatInterface do
88 | use ExOpenAI.StreamingClient
89 |
90 | def start_chat(initial_prompt) do
91 | {:ok, pid} = __MODULE__.start_link(%{buffer: "", complete_message: ""})
92 |
93 | messages = [
94 | %ExOpenAI.Components.ChatCompletionRequestUserMessage{
95 | role: :user,
96 | content: initial_prompt
97 | }
98 | ]
99 |
100 | ExOpenAI.Chat.create_chat_completion(
101 | messages,
102 | "gpt-4",
103 | stream: true,
104 | stream_to: pid
105 | )
106 |
107 | pid
108 | end
109 |
110 | @impl true
111 | def handle_data(data, state) do
112 | # Extract the content from the delta if it exists
113 | content = case data do
114 | %{choices: [%{"delta" => %{"content" => content}}]} when is_binary(content) ->
115 | content
116 | _ ->
117 | ""
118 | end
119 |
120 | # Update the buffer and print the new content
121 | if content != "" do
122 | IO.write(content)
123 | {:noreply, %{state | buffer: state.buffer <> content, complete_message: state.complete_message <> content}}
124 | else
125 | {:noreply, state}
126 | end
127 | end
128 |
129 | @impl true
130 | def handle_error(error, state) do
131 | IO.puts("\nError: #{inspect(error)}")
132 | {:noreply, state}
133 | end
134 |
135 | @impl true
136 | def handle_finish(state) do
137 | IO.puts("\n\nChat response complete.")
138 | # The complete message is now available in state.complete_message
139 | {:noreply, state}
140 | end
141 |
142 | # Function to get the complete message after streaming is done
143 | def get_complete_message(pid) do
144 | :sys.get_state(pid).complete_message
145 | end
146 | end
147 | ```
148 |
149 | Usage:
150 |
151 | ```elixir
152 | # Start a chat
153 | pid = ChatInterface.start_chat("Tell me about quantum computing")
154 |
155 | # After the stream completes, get the full message
156 | complete_response = ChatInterface.get_complete_message(pid)
157 | ```
158 |
159 | ## Streaming with Different Endpoints
160 |
161 | Most OpenAI endpoints that support streaming work in a similar way, but the structure of the streamed data may differ:
162 |
163 | ### Chat Completions
164 |
165 | ```elixir
166 | ExOpenAI.Chat.create_chat_completion(
167 | messages,
168 | "gpt-4",
169 | stream: true,
170 | stream_to: callback_or_pid
171 | )
172 | ```
173 |
174 | The streamed data will contain delta updates to the assistant's message.
175 |
176 | ### Text Completions
177 |
178 | ```elixir
179 | ExOpenAI.Completions.create_completion(
180 | "gpt-3.5-turbo-instruct",
181 | prompt,
182 | stream: true,
183 | stream_to: callback_or_pid
184 | )
185 | ```
186 |
187 | The streamed data will contain text fragments.
188 |
189 | ## Caveats and Limitations
190 |
191 | - Type information for streamed data is not always accurate in the current version
192 | - Return types for streaming requests may not match the actual returned data
193 | - Streaming increases the total number of tokens used slightly compared to non-streaming requests
194 | - Error handling in streaming contexts requires special attention
195 |
196 | ## Best Practices
197 |
198 | 1. **Buffer Management**: Always maintain a buffer to reconstruct the complete response
199 | 2. **Error Handling**: Implement robust error handling in your streaming clients
200 | 3. **Timeouts**: Consider implementing timeouts for long-running streams
201 | 4. **Testing**: Test your streaming code with both short and very long responses
202 | 5. **State Management**: Design your streaming client's state to handle all the information you need
--------------------------------------------------------------------------------
/test/ex_openai/codegen/convert_response_test.exs:
--------------------------------------------------------------------------------
1 | defmodule ExOpenAI.Codegen.ConvertResponseTest do
2 | use ExUnit.Case, async: false
3 | alias ExOpenAI.Codegen
4 |
5 | # Define test components in a way that they won't be mocked
6 | defmodule TestComponent do
7 | defstruct [:id, :name, :value]
8 |
9 | def unpack_ast, do: nil
10 | end
11 |
12 | defmodule AnotherTestComponent do
13 | defstruct [:id, :type, :data]
14 |
15 | def unpack_ast, do: nil
16 | end
17 |
18 | # Create a test-specific version of string_to_component
19 | defp test_string_to_component("TestComponent"), do: TestComponent
20 | defp test_string_to_component("AnotherTestComponent"), do: AnotherTestComponent
21 | defp test_string_to_component(other), do: Module.concat(ExOpenAI.Components, other)
22 |
23 | # Create a wrapper for convert_response that uses our test_string_to_component
24 | defp test_convert_response(response, response_type) do
25 | # Store original function
26 | original_fn = &Codegen.string_to_component/1
27 |
28 | try do
29 | # Replace with our test function
30 | :meck.new(Codegen, [:passthrough])
31 | :meck.expect(Codegen, :string_to_component, &test_string_to_component/1)
32 |
33 | # Call the function
34 | Codegen.convert_response(response, response_type)
35 | after
36 | # Clean up
37 | :meck.unload(Codegen)
38 | end
39 | end
40 |
41 | describe "convert_response/2" do
42 | test "handles response with 'response' and 'type' keys" do
43 | response = {:ok, %{"response" => %{"id" => "123", "name" => "test"}, "type" => "some_type"}}
44 | result = test_convert_response(response, nil)
45 | assert result == {:ok, %{"id" => "123", "name" => "test"}}
46 | end
47 |
48 | test "passes through reference values unchanged" do
49 | ref = make_ref()
50 | response = {:ok, ref}
51 | result = test_convert_response(response, nil)
52 | assert result == {:ok, ref}
53 | end
54 |
55 | test "returns original response when response_type is nil" do
56 | response = {:ok, %{"id" => "123", "name" => "test"}}
57 | result = test_convert_response(response, nil)
58 | assert result == {:ok, %{"id" => "123", "name" => "test"}}
59 | end
60 |
61 | test "converts response to component struct when keys match" do
62 | response = {:ok, %{"id" => "123", "name" => "test", "value" => 42}}
63 | result = test_convert_response(response, {:component, "TestComponent"})
64 |
65 | # Check that we got a TestComponent struct with the expected values
66 | assert match?({:ok, %TestComponent{}}, result)
67 | {:ok, struct} = result
68 | assert struct.id == "123"
69 | assert struct.name == "test"
70 | assert struct.value == 42
71 | end
72 |
73 | test "returns original response when no keys match the component" do
74 | response = {:ok, %{"foo" => "bar", "baz" => "qux"}}
75 | result = test_convert_response(response, {:component, "TestComponent"})
76 | assert result == {:ok, %{"foo" => "bar", "baz" => "qux"}}
77 | end
78 |
79 | test "handles oneOf with multiple possible components - best match wins" do
80 | # This response has more keys matching TestComponent than AnotherTestComponent
81 | response = {:ok, %{"id" => "123", "name" => "test", "value" => 42, "extra" => "ignored"}}
82 | result = test_convert_response(response, {:oneOf, [{:component, "TestComponent"}, {:component, "AnotherTestComponent"}]})
83 |
84 | assert match?({:ok, %TestComponent{}}, result)
85 | {:ok, struct} = result
86 | assert struct.id == "123"
87 | assert struct.name == "test"
88 | assert struct.value == 42
89 |
90 | # This response has more keys matching AnotherTestComponent than TestComponent
91 | response = {:ok, %{"id" => "123", "type" => "test", "data" => %{}, "extra" => "ignored"}}
92 | result = test_convert_response(response, {:oneOf, [{:component, "TestComponent"}, {:component, "AnotherTestComponent"}]})
93 |
94 | assert match?({:ok, %AnotherTestComponent{}}, result)
95 | {:ok, struct} = result
96 | assert struct.id == "123"
97 | assert struct.type == "test"
98 | assert struct.data == %{}
99 | end
100 |
101 | test "handles oneOf when no components match" do
102 | response = {:ok, %{"foo" => "bar", "baz" => "qux"}}
103 | result = test_convert_response(response, {:oneOf, [{:component, "TestComponent"}, {:component, "AnotherTestComponent"}]})
104 | assert result == {:ok, %{"foo" => "bar", "baz" => "qux"}}
105 | end
106 |
107 | test "passes through error tuples unchanged" do
108 | response = {:error, "Something went wrong"}
109 | result = test_convert_response(response, {:component, "TestComponent"})
110 | assert result == {:error, "Something went wrong"}
111 | end
112 |
113 | test "handles nested data structures" do
114 | response = {:ok, %{
115 | "id" => "123",
116 | "name" => "test",
117 | "value" => %{"nested" => "data"},
118 | "array" => [%{"item" => 1}, %{"item" => 2}]
119 | }}
120 |
121 | result = test_convert_response(response, {:component, "TestComponent"})
122 |
123 | assert match?({:ok, %TestComponent{}}, result)
124 | {:ok, struct} = result
125 | assert struct.id == "123"
126 | assert struct.name == "test"
127 | assert is_map(struct.value)
128 | assert struct.value.nested == "data"
129 | end
130 |
131 | test "handles string keys by converting them to atoms" do
132 | response = {:ok, %{"id" => "123", "name" => "test"}}
133 | result = test_convert_response(response, {:component, "TestComponent"})
134 |
135 | assert match?({:ok, %TestComponent{}}, result)
136 | {:ok, struct} = result
137 | assert struct.id == "123"
138 | assert struct.name == "test"
139 | assert struct.value == nil
140 | end
141 |
142 | test "handles unknown response types by returning original response" do
143 | response = {:ok, %{"id" => "123", "name" => "test"}}
144 | result = test_convert_response(response, {:unknown_type, "something"})
145 | assert result == {:ok, %{"id" => "123", "name" => "test"}}
146 | end
147 | end
148 | end
--------------------------------------------------------------------------------
/test/ex_openai/codegen/finalize_schema_test.exs:
--------------------------------------------------------------------------------
1 | defmodule ExOpenAI.Codegen.FinalizeSchemaTest do
2 | use ExUnit.Case, async: true
3 |
4 | describe "finalize_schema" do
5 | test "finalizes schema with correct sub-components" do
6 | # some nested components
7 | # CreateModelResponseProperties embeds ModelResponseProperties
8 | component_mapping = %{
9 | "CreateModelResponseProperties" => %{
10 | kind: :allOf,
11 | components: [component: "ModelResponseProperties"],
12 | required_props: [],
13 | optional_props: [],
14 | required_prop_keys: []
15 | },
16 | "ModelResponseProperties" => %{
17 | description: "",
18 | kind: :component,
19 | required_props: [],
20 | optional_props: [
21 | %{
22 | name: "model",
23 | type:
24 | {:anyOf,
25 | [
26 | "string",
27 | {:enum,
28 | [
29 | :"o3-mini",
30 | :"o3-mini-2025-01-31",
31 | :o1
32 | ]}
33 | ]},
34 | description:
35 | "Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI\noffers a wide range of models with different capabilities, performance\ncharacteristics, and price points. Refer to the [model guide](/docs/models)\nto browse and compare available models.\n",
36 | required: false
37 | }
38 | ]
39 | }
40 | }
41 |
42 | test_schema = %{
43 | kind: :allOf,
44 | components: [
45 | {:component, "CreateModelResponseProperties"},
46 | %{
47 | description: "",
48 | kind: :component,
49 | required_props: [
50 | %{
51 | name: "messages",
52 | type: {:array, {:component, "ChatCompletionRequestMessage"}},
53 | description:
54 | "A list of messages comprising the conversation so far. Depending on the\n[model](/docs/models) you use, different message types (modalities) are\nsupported, like [text](/docs/guides/text-generation),\n[images](/docs/guides/vision), and [audio](/docs/guides/audio).\n",
55 | example: ""
56 | }
57 | ],
58 | optional_props: [
59 | %{
60 | name: "tools",
61 | type: {:array, {:component, "ChatCompletionTool"}},
62 | description:
63 | "A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported.\n",
64 | example: ""
65 | },
66 | %{
67 | name: "top_logprobs",
68 | type: "integer",
69 | description:
70 | "An integer between 0 and 20 specifying the number of most likely tokens to\nreturn at each token position, each with an associated log probability.\n`logprobs` must be set to `true` if this parameter is used.\n",
71 | example: ""
72 | }
73 | ]
74 | }
75 | ],
76 | required_props: [],
77 | optional_props: [],
78 | required_prop_keys: ["model", "messages"]
79 | }
80 |
81 | expected =
82 | {"Something",
83 | %{
84 | description: "",
85 | kind: :component,
86 | required_props: [
87 | %{
88 | name: "messages",
89 | type: {:array, {:component, "ChatCompletionRequestMessage"}},
90 | description:
91 | "A list of messages comprising the conversation so far. Depending on the\n[model](/docs/models) you use, different message types (modalities) are\nsupported, like [text](/docs/guides/text-generation),\n[images](/docs/guides/vision), and [audio](/docs/guides/audio).\n",
92 | example: ""
93 | },
94 | %{
95 | name: "model",
96 | type:
97 | {:anyOf,
98 | [
99 | "string",
100 | {:enum,
101 | [
102 | :"o3-mini",
103 | :"o3-mini-2025-01-31",
104 | :o1
105 | ]}
106 | ]},
107 | description:
108 | "Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI\noffers a wide range of models with different capabilities, performance\ncharacteristics, and price points. Refer to the [model guide](/docs/models)\nto browse and compare available models.\n",
109 | required: false
110 | }
111 | ],
112 | optional_props: [
113 | %{
114 | name: "tools",
115 | type: {:array, {:component, "ChatCompletionTool"}},
116 | description:
117 | "A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported.\n",
118 | example: ""
119 | },
120 | %{
121 | name: "top_logprobs",
122 | type: "integer",
123 | description:
124 | "An integer between 0 and 20 specifying the number of most likely tokens to\nreturn at each token position, each with an associated log probability.\n`logprobs` must be set to `true` if this parameter is used.\n",
125 | example: ""
126 | }
127 | ]
128 | }}
129 |
130 | actual = ExOpenAI.Codegen.finalize_schema({"Something", test_schema}, component_mapping)
131 |
132 | # the final should be a combined map of all the nested objects that are referenced
133 | # the final optional_props and required_props reflects what was provided in
134 | # required_prop_keys: ["model", "messages"]
135 | assert actual == expected
136 |
137 | {name, content} = actual
138 |
139 | # should keep existing name
140 | assert name == "Something"
141 | # required should be 2 elemnts now, because ["model", "messages"] are 2
142 | assert Enum.count(content.required_props) == 2
143 |
144 | # this one should be "model"
145 | assert Enum.find(content.required_props, fn prop -> prop.name == "model" end) != nil
146 | # this one should be "messages"
147 | assert Enum.find(content.required_props, fn prop -> prop.name == "messages" end) != nil
148 |
149 | # check that none of the required fields are in options
150 | required_prop_names = content.required_props |> Enum.map(& &1.name)
151 | optional_prop_names = content.optional_props |> Enum.map(& &1.name)
152 |
153 | intersection =
154 | MapSet.intersection(MapSet.new(required_prop_names), MapSet.new(optional_prop_names))
155 |
156 | assert Enum.count(intersection) == 0
157 | end
158 | end
159 | end
160 |
--------------------------------------------------------------------------------
/mix.lock:
--------------------------------------------------------------------------------
1 | %{
2 | "bunt": {:hex, :bunt, "0.2.1", "e2d4792f7bc0ced7583ab54922808919518d0e57ee162901a16a1b6664ef3b14", [:mix], [], "hexpm", "a330bfb4245239787b15005e66ae6845c9cd524a288f0d141c148b02603777a5"},
3 | "certifi": {:hex, :certifi, "2.14.0", "ed3bef654e69cde5e6c022df8070a579a79e8ba2368a00acf3d75b82d9aceeed", [:rebar3], [], "hexpm", "ea59d87ef89da429b8e905264fdec3419f84f2215bb3d81e07a18aac919026c3"},
4 | "credo": {:hex, :credo, "1.7.1", "6e26bbcc9e22eefbff7e43188e69924e78818e2fe6282487d0703652bc20fd62", [:mix], [{:bunt, "~> 0.2.1", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2.8", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "e9871c6095a4c0381c89b6aa98bc6260a8ba6addccf7f6a53da8849c748a58a2"},
5 | "dialyxir": {:hex, :dialyxir, "1.4.1", "a22ed1e7bd3a3e3f197b68d806ef66acb61ee8f57b3ac85fc5d57354c5482a93", [:mix], [{:erlex, ">= 0.2.6", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "84b795d6d7796297cca5a3118444b80c7d94f7ce247d49886e7c291e1ae49801"},
6 | "earmark_parser": {:hex, :earmark_parser, "1.4.39", "424642f8335b05bb9eb611aa1564c148a8ee35c9c8a8bba6e129d51a3e3c6769", [:mix], [], "hexpm", "06553a88d1f1846da9ef066b87b57c6f605552cfbe40d20bd8d59cc6bde41944"},
7 | "erlex": {:hex, :erlex, "0.2.6", "c7987d15e899c7a2f34f5420d2a2ea0d659682c06ac607572df55a43753aa12e", [:mix], [], "hexpm", "2ed2e25711feb44d52b17d2780eabf998452f6efda104877a3881c2f8c0c0c75"},
8 | "ex_doc": {:hex, :ex_doc, "0.34.1", "9751a0419bc15bc7580c73fde506b17b07f6402a1e5243be9e0f05a68c723368", [:mix], [{:earmark_parser, "~> 1.4.39", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_c, ">= 0.1.0", [hex: :makeup_c, repo: "hexpm", optional: true]}, {:makeup_elixir, "~> 0.14 or ~> 1.0", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1 or ~> 1.0", [hex: :makeup_erlang, repo: "hexpm", optional: false]}, {:makeup_html, ">= 0.1.0", [hex: :makeup_html, repo: "hexpm", optional: true]}], "hexpm", "d441f1a86a235f59088978eff870de2e815e290e44a8bd976fe5d64470a4c9d2"},
9 | "exactor": {:hex, :exactor, "2.2.4", "5efb4ddeb2c48d9a1d7c9b465a6fffdd82300eb9618ece5d34c3334d5d7245b1", [:mix], [], "hexpm", "1222419f706e01bfa1095aec9acf6421367dcfab798a6f67c54cf784733cd6b5"},
10 | "exjsx": {:hex, :exjsx, "4.0.0", "60548841e0212df401e38e63c0078ec57b33e7ea49b032c796ccad8cde794b5c", [:mix], [{:jsx, "~> 2.8.0", [hex: :jsx, repo: "hexpm", optional: false]}], "hexpm", "32e95820a97cffea67830e91514a2ad53b888850442d6d395f53a1ac60c82e07"},
11 | "exvcr": {:hex, :exvcr, "0.16.0", "11579f43c88ae81f57c82ce4f09e3ebda4c40117c859ed39e61a653c3a0b4ff4", [:mix], [{:exjsx, "~> 4.0", [hex: :exjsx, repo: "hexpm", optional: false]}, {:finch, "~> 0.16", [hex: :finch, repo: "hexpm", optional: true]}, {:httpoison, "~> 1.0 or ~> 2.0", [hex: :httpoison, repo: "hexpm", optional: true]}, {:httpotion, "~> 3.1", [hex: :httpotion, repo: "hexpm", optional: true]}, {:ibrowse, "4.4.0", [hex: :ibrowse, repo: "hexpm", optional: true]}, {:meck, "~> 0.9", [hex: :meck, repo: "hexpm", optional: false]}], "hexpm", "8f576af22369942f7a1482baff1f31e2f45983cf6fac45d49d2bd2e84b4d5be8"},
12 | "file_system": {:hex, :file_system, "0.2.10", "fb082005a9cd1711c05b5248710f8826b02d7d1784e7c3451f9c1231d4fc162d", [:mix], [], "hexpm", "41195edbfb562a593726eda3b3e8b103a309b733ad25f3d642ba49696bf715dc"},
13 | "hackney": {:hex, :hackney, "1.23.0", "55cc09077112bcb4a69e54be46ed9bc55537763a96cd4a80a221663a7eafd767", [:rebar3], [{:certifi, "~> 2.14.0", [hex: :certifi, repo: "hexpm", optional: false]}, {:idna, "~> 6.1.0", [hex: :idna, repo: "hexpm", optional: false]}, {:metrics, "~> 1.0.0", [hex: :metrics, repo: "hexpm", optional: false]}, {:mimerl, "~> 1.1", [hex: :mimerl, repo: "hexpm", optional: false]}, {:parse_trans, "3.4.1", [hex: :parse_trans, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "~> 1.1.0", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}, {:unicode_util_compat, "~> 0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "6cd1c04cd15c81e5a493f167b226a15f0938a84fc8f0736ebe4ddcab65c0b44e"},
14 | "httpoison": {:hex, :httpoison, "2.2.2", "15420e9e5bbb505b931b2f589dc8be0c3b21e2a91a2c6ba882d99bf8f3ad499d", [:mix], [{:hackney, "~> 1.21", [hex: :hackney, repo: "hexpm", optional: false]}], "hexpm", "de7ac49fe2ffd89219972fdf39b268582f6f7f68d8cd29b4482dacca1ce82324"},
15 | "idna": {:hex, :idna, "6.1.1", "8a63070e9f7d0c62eb9d9fcb360a7de382448200fbbd1b106cc96d3d8099df8d", [:rebar3], [{:unicode_util_compat, "~> 0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "92376eb7894412ed19ac475e4a86f7b413c1b9fbb5bd16dccd57934157944cea"},
16 | "jason": {:hex, :jason, "1.4.1", "af1504e35f629ddcdd6addb3513c3853991f694921b1b9368b0bd32beb9f1b63", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "fbb01ecdfd565b56261302f7e1fcc27c4fb8f32d56eab74db621fc154604a7a1"},
17 | "json": {:hex, :json, "1.4.1", "8648f04a9439765ad449bc56a3ff7d8b11dd44ff08ffcdefc4329f7c93843dfa", [:mix], [], "hexpm", "9abf218dbe4ea4fcb875e087d5f904ef263d012ee5ed21d46e9dbca63f053d16"},
18 | "jsx": {:hex, :jsx, "2.8.3", "a05252d381885240744d955fbe3cf810504eb2567164824e19303ea59eef62cf", [:mix, :rebar3], [], "hexpm", "fc3499fed7a726995aa659143a248534adc754ebd16ccd437cd93b649a95091f"},
19 | "makeup": {:hex, :makeup, "1.1.2", "9ba8837913bdf757787e71c1581c21f9d2455f4dd04cfca785c70bbfff1a76a3", [:mix], [{:nimble_parsec, "~> 1.2.2 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "cce1566b81fbcbd21eca8ffe808f33b221f9eee2cbc7a1706fc3da9ff18e6cac"},
20 | "makeup_elixir": {:hex, :makeup_elixir, "0.16.2", "627e84b8e8bf22e60a2579dad15067c755531fea049ae26ef1020cad58fe9578", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "41193978704763f6bbe6cc2758b84909e62984c7752b3784bd3c218bb341706b"},
21 | "makeup_erlang": {:hex, :makeup_erlang, "1.0.0", "6f0eff9c9c489f26b69b61440bf1b238d95badae49adac77973cbacae87e3c2e", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "ea7a9307de9d1548d2a72d299058d1fd2339e3d398560a0e46c27dab4891e4d2"},
22 | "meck": {:hex, :meck, "0.9.2", "85ccbab053f1db86c7ca240e9fc718170ee5bda03810a6292b5306bf31bae5f5", [:rebar3], [], "hexpm", "81344f561357dc40a8344afa53767c32669153355b626ea9fcbc8da6b3045826"},
23 | "metrics": {:hex, :metrics, "1.0.1", "25f094dea2cda98213cecc3aeff09e940299d950904393b2a29d191c346a8486", [:rebar3], [], "hexpm", "69b09adddc4f74a40716ae54d140f93beb0fb8978d8636eaded0c31b6f099f16"},
24 | "mime": {:hex, :mime, "2.0.6", "8f18486773d9b15f95f4f4f1e39b710045fa1de891fada4516559967276e4dc2", [:mix], [], "hexpm", "c9945363a6b26d747389aac3643f8e0e09d30499a138ad64fe8fd1d13d9b153e"},
25 | "mimerl": {:hex, :mimerl, "1.3.0", "d0cd9fc04b9061f82490f6581e0128379830e78535e017f7780f37fea7545726", [:rebar3], [], "hexpm", "a1e15a50d1887217de95f0b9b0793e32853f7c258a5cd227650889b38839fe9d"},
26 | "mix_test_watch": {:hex, :mix_test_watch, "1.2.0", "1f9acd9e1104f62f280e30fc2243ae5e6d8ddc2f7f4dc9bceb454b9a41c82b42", [:mix], [{:file_system, "~> 0.2 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}], "hexpm", "278dc955c20b3fb9a3168b5c2493c2e5cffad133548d307e0a50c7f2cfbf34f6"},
27 | "mock": {:hex, :mock, "0.3.8", "7046a306b71db2488ef54395eeb74df0a7f335a7caca4a3d3875d1fc81c884dd", [:mix], [{:meck, "~> 0.9.2", [hex: :meck, repo: "hexpm", optional: false]}], "hexpm", "7fa82364c97617d79bb7d15571193fc0c4fe5afd0c932cef09426b3ee6fe2022"},
28 | "nimble_parsec": {:hex, :nimble_parsec, "1.4.0", "51f9b613ea62cfa97b25ccc2c1b4216e81df970acd8e16e8d1bdc58fef21370d", [:mix], [], "hexpm", "9c565862810fb383e9838c1dd2d7d2c437b3d13b267414ba6af33e50d2d1cf28"},
29 | "parse_trans": {:hex, :parse_trans, "3.4.1", "6e6aa8167cb44cc8f39441d05193be6e6f4e7c2946cb2759f015f8c56b76e5ff", [:rebar3], [], "hexpm", "620a406ce75dada827b82e453c19cf06776be266f5a67cff34e1ef2cbb60e49a"},
30 | "poison": {:hex, :poison, "5.0.0", "d2b54589ab4157bbb82ec2050757779bfed724463a544b6e20d79855a9e43b24", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "11dc6117c501b80c62a7594f941d043982a1bd05a1184280c0d9166eb4d8d3fc"},
31 | "ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.7", "354c321cf377240c7b8716899e182ce4890c5938111a1296add3ec74cf1715df", [:make, :mix, :rebar3], [], "hexpm", "fe4c190e8f37401d30167c8c405eda19469f34577987c76dde613e838bbc67f8"},
32 | "unicode_util_compat": {:hex, :unicode_util_compat, "0.7.0", "bc84380c9ab48177092f43ac89e4dfa2c6d62b40b8bd132b1059ecc7232f9a78", [:rebar3], [], "hexpm", "25eee6d67df61960cf6a794239566599b09e17e668d3700247bc498638152521"},
33 | "yamerl": {:hex, :yamerl, "0.10.0", "4ff81fee2f1f6a46f1700c0d880b24d193ddb74bd14ef42cb0bcf46e81ef2f8e", [:rebar3], [], "hexpm", "346adb2963f1051dc837a2364e4acf6eb7d80097c0f53cbdc3046ec8ec4b4e6e"},
34 | "yaml_elixir": {:hex, :yaml_elixir, "2.11.0", "9e9ccd134e861c66b84825a3542a1c22ba33f338d82c07282f4f1f52d847bd50", [:mix], [{:yamerl, "~> 0.10", [hex: :yamerl, repo: "hexpm", optional: false]}], "hexpm", "53cc28357ee7eb952344995787f4bb8cc3cecbf189652236e9b163e8ce1bc242"},
35 | }
36 |
--------------------------------------------------------------------------------
/lib/ex_openai/client.ex:
--------------------------------------------------------------------------------
1 | defmodule ExOpenAI.Client do
2 | @moduledoc false
3 | use HTTPoison.Base
4 | alias ExOpenAI.Config
5 |
6 | def add_base_url(url, base_url), do: Config.api_url(base_url) <> url
7 |
8 | def process_response_body(body) do
9 | case Jason.decode(body) do
10 | {:ok, decoded_json} -> {:ok, decoded_json}
11 | # audio/speech endpoint returns binary data, so leave as is
12 | _ -> {:ok, body}
13 | end
14 | end
15 |
16 | def handle_response(httpoison_response) do
17 | case httpoison_response do
18 | {:ok, %HTTPoison.Response{status_code: 200, body: {:ok, body}}} ->
19 | {:ok, body}
20 |
21 | {:ok, %HTTPoison.Response{status_code: 200, body: body}} ->
22 | {:ok, body}
23 |
24 | {:ok, %HTTPoison.Response{body: {:ok, body}}} ->
25 | {:error, body}
26 |
27 | {:ok, %HTTPoison.AsyncResponse{id: ref}} ->
28 | {:ok, ref}
29 |
30 | {:error, %HTTPoison.Error{reason: reason}} ->
31 | {:error, reason}
32 | end
33 | end
34 |
35 | @spec add_bearer_header(list(), String.t() | nil) :: list()
36 | def add_bearer_header(headers, api_key \\ nil) do
37 | if is_nil(api_key) do
38 | [{"Authorization", "Bearer #{Config.api_key()}"} | headers]
39 | else
40 | [{"Authorization", "Bearer #{api_key}"} | headers]
41 | end
42 | end
43 |
44 | @spec add_organization_header(list(), String.t() | nil) :: list()
45 | def add_organization_header(headers, org_key \\ nil) do
46 | if is_nil(org_key) do
47 | if Config.org_key() do
48 | [{"OpenAI-Organization", Config.org_key()} | headers]
49 | else
50 | headers
51 | end
52 | else
53 | [{"OpenAI-Organization", org_key} | headers]
54 | end
55 | end
56 |
57 | @spec add_json_request_headers(list()) :: list()
58 | def add_json_request_headers(headers) do
59 | [{"Content-type", "application/json"} | headers]
60 | end
61 |
62 | @spec add_multipart_request_headers(list()) :: list()
63 | def add_multipart_request_headers(headers) do
64 | [{"Content-type", "multipart/form-data"} | headers]
65 | end
66 |
67 | def request_options(), do: Config.http_options()
68 |
69 | def default_headers(), do: Config.http_headers()
70 |
71 | def stream_options(request_options, convert_response) do
72 | with {:ok, stream_val} <- Keyword.fetch(request_options, :stream),
73 | {:ok, stream_to} when is_pid(stream_to) or is_function(stream_to) <-
74 | Keyword.fetch(request_options, :stream_to),
75 | true <- stream_val do
76 | # spawn a new StreamingClient and tell it to forward data to `stream_to`
77 | {:ok, sse_client_pid} = ExOpenAI.StreamingClient.start_link(stream_to, convert_response)
78 | [stream_to: sse_client_pid]
79 | else
80 | _ ->
81 | [stream_to: nil]
82 | end
83 | end
84 |
85 | def api_get(url, request_options \\ [], convert_response) do
86 | request_options = Keyword.merge(request_options(), request_options)
87 | stream_options = stream_options(request_options, convert_response)
88 |
89 | request_options =
90 | Map.merge(Enum.into(request_options, %{}), Enum.into(stream_options, %{}))
91 | |> Map.to_list()
92 |
93 | request_options_map = Enum.into(request_options, %{})
94 |
95 | headers =
96 | default_headers()
97 | |> add_json_request_headers()
98 | |> add_organization_header(Map.get(request_options_map, :openai_organization_key, nil))
99 | |> add_bearer_header(Map.get(request_options_map, :openai_api_key, nil))
100 |
101 | base_url = Map.get(request_options_map, :base_url)
102 |
103 | url
104 | |> add_base_url(base_url)
105 | |> get(headers, request_options)
106 | |> handle_response()
107 | |> convert_response.()
108 | end
109 |
110 | defp strip_params(params) do
111 | params
112 | # remove stream_to from params as PID messes with Jason
113 | |> Map.drop([:stream_to, :openai_organization_key, :openai_api_key, :base_url])
114 | end
115 |
116 | def api_post(url, params \\ [], request_options \\ [], convert_response) do
117 | body =
118 | params
119 | |> Enum.into(%{})
120 | |> strip_params()
121 | |> Jason.encode()
122 | |> elem(1)
123 |
124 | request_options = Keyword.merge(request_options(), request_options)
125 | stream_options = stream_options(request_options, convert_response)
126 |
127 | request_options =
128 | Map.merge(Enum.into(request_options, %{}), Enum.into(stream_options, %{}))
129 | |> Map.to_list()
130 |
131 | request_options_map = Enum.into(request_options, %{})
132 |
133 | headers =
134 | default_headers()
135 | |> add_json_request_headers()
136 | |> add_organization_header(Map.get(request_options_map, :openai_organization_key, nil))
137 | |> add_bearer_header(Map.get(request_options_map, :openai_api_key, nil))
138 |
139 | base_url = Map.get(request_options_map, :base_url)
140 |
141 | url
142 | |> add_base_url(base_url)
143 | |> post(body, headers, request_options)
144 | |> handle_response()
145 | |> convert_response.()
146 | end
147 |
148 | def api_delete(url, request_options \\ [], convert_response) do
149 | request_options = Keyword.merge(request_options(), request_options)
150 | stream_options = stream_options(request_options, convert_response)
151 |
152 | request_options =
153 | Map.merge(Enum.into(request_options, %{}), Enum.into(stream_options, %{}))
154 | |> Map.to_list()
155 |
156 | request_options_map = Enum.into(request_options, %{})
157 |
158 | headers =
159 | default_headers()
160 | |> add_json_request_headers()
161 | |> add_organization_header(Map.get(request_options_map, :openai_organization_key, nil))
162 | |> add_bearer_header(Map.get(request_options_map, :openai_api_key, nil))
163 |
164 | base_url = Map.get(request_options_map, :base_url)
165 |
166 | url
167 | |> add_base_url(base_url)
168 | |> delete(headers, request_options)
169 | |> handle_response()
170 | |> convert_response.()
171 | end
172 |
173 | defp multipart_param({name, {filename, content}}) do
174 | strname = Atom.to_string(name)
175 | # Ensure filename is a string
176 | strfilename = to_string(filename)
177 |
178 | cond do
179 | # Strings can be valid bitstreams and bitstreams are valid binaries
180 | # Using String.valid? for comparison instead
181 | is_bitstring(content) and not String.valid?(content) ->
182 | # Use MIME library to determine Content-Type
183 | mime_type = MIME.from_path(strfilename)
184 | # Add Content-Type header
185 | {"file", content, {"form-data", [name: strname, filename: strfilename]},
186 | [{"Content-Type", mime_type}]}
187 |
188 | # Handle cases where content might not be a binary file needing mime type (e.g., other form fields passed in this format)
189 | true ->
190 | {strname, {strfilename, content}}
191 | end
192 | end
193 |
194 | defp multipart_param({name, content}) do
195 | # This clause handles regular form fields, not file uploads
196 | {Atom.to_string(name), content}
197 | end
198 |
199 | def api_multipart_post(url, params \\ [], request_options \\ [], convert_response) do
200 | request_options = Keyword.merge(request_options(), request_options)
201 | stream_options = stream_options(request_options, convert_response)
202 |
203 | request_options =
204 | Map.merge(Enum.into(request_options, %{}), Enum.into(stream_options, %{}))
205 | |> Map.to_list()
206 |
207 | request_options_map = Enum.into(request_options, %{})
208 |
209 | multipart_body =
210 | {:multipart,
211 | params
212 | |> Enum.into(%{})
213 | |> strip_params()
214 | |> Map.to_list()
215 | |> Enum.map(&multipart_param/1)}
216 |
217 | headers =
218 | default_headers()
219 | |> add_multipart_request_headers()
220 | |> add_organization_header(Map.get(request_options_map, :openai_organization_key, nil))
221 | |> add_bearer_header(Map.get(request_options_map, :openai_api_key, nil))
222 |
223 | base_url = Map.get(request_options_map, :base_url)
224 |
225 | url
226 | |> add_base_url(base_url)
227 | |> post(multipart_body, headers, request_options)
228 | |> handle_response()
229 | |> convert_response.()
230 | end
231 |
232 | @callback api_call(
233 | method :: atom(),
234 | url :: String.t(),
235 | params :: Keyword.t(),
236 | request_content_type :: Keyword.t(),
237 | request_options :: Keyword.t(),
238 | convert_response :: any()
239 | ) :: {:ok, res :: term()} | {:error, res :: term()}
240 | def api_call(:get, url, _params, _request_content_type, request_options, convert_response),
241 | do: api_get(url, request_options, convert_response)
242 |
243 | def api_call(:post, url, params, :"multipart/form-data", request_options, convert_response),
244 | do: api_multipart_post(url, params, request_options, convert_response)
245 |
246 | def api_call(:post, url, params, _request_content_type, request_options, convert_response),
247 | do: api_post(url, params, request_options, convert_response)
248 |
249 | def api_call(:delete, url, _params, _request_content_type, request_options, convert_response),
250 | do: api_delete(url, request_options, convert_response)
251 | end
252 |
--------------------------------------------------------------------------------
/test/streaming_client_test.exs:
--------------------------------------------------------------------------------
1 | defmodule ExOpenAI.StreamingClientTest do
2 | use ExUnit.Case, async: true
3 |
4 | alias ExOpenAI.StreamingClient
5 |
6 | require Logger
7 |
8 | # Mock implementation of a streaming client for testing
9 | defmodule TestStreamingClient do
10 | use ExOpenAI.StreamingClient
11 |
12 | def handle_data(data, state) do
13 | send(state.test_pid, {:data, data})
14 | {:noreply, state}
15 | end
16 |
17 | def handle_error(error, state) do
18 | send(state.test_pid, {:error, error})
19 | {:noreply, state}
20 | end
21 |
22 | def handle_finish(state) do
23 | send(state.test_pid, :finish)
24 | {:noreply, state}
25 | end
26 | end
27 |
28 | setup do
29 | # Create a simple conversion function for testing
30 | convert_fx = fn
31 | {:ok, data} -> {:ok, data}
32 | {:error, err} -> {:error, err}
33 | end
34 |
35 | # Start the streaming client with the test process as the receiver
36 | {:ok, client} = StreamingClient.start_link(self(), convert_fx)
37 |
38 | %{client: client}
39 | end
40 |
41 | describe "handle_info for AsyncChunk" do
42 | test "handles complete JSON error response", %{client: client} do
43 | # Create an error response similar to the one in the example
44 | error_chunk = %HTTPoison.AsyncChunk{
45 | chunk: ~s({
46 | "error": {
47 | "message": "Incorrect API key provided",
48 | "type": "invalid_request_error",
49 | "param": null,
50 | "code": "invalid_api_key"
51 | }
52 | }\n),
53 | id: make_ref()
54 | }
55 |
56 | # Send the chunk to the client
57 | send(client, error_chunk)
58 |
59 | # Assert that the error was forwarded to the test process
60 | assert_receive {:"$gen_cast", {:error, error_data}}, 500
61 | assert error_data["message"] == "Incorrect API key provided"
62 | assert error_data["code"] == "invalid_api_key"
63 | end
64 |
65 | test "handles SSE formatted messages", %{client: client} do
66 | # Create an SSE formatted chunk
67 | sse_chunk = %HTTPoison.AsyncChunk{
68 | chunk: ~s(data: {"id":"chatcmpl-123","object":"chat.completion.chunk","choices":[{"delta":{"content":"Hello"},"index":0}]}\n\n),
69 | id: make_ref()
70 | }
71 |
72 | # Send the chunk to the client
73 | send(client, sse_chunk)
74 |
75 | # Assert that the data was extracted and forwarded
76 | assert_receive {:"$gen_cast", {:data, data}}, 500
77 | assert data["choices"]
78 | end
79 |
80 | test "handles multiple SSE messages in one chunk", %{client: client} do
81 | # Create a chunk with multiple SSE messages
82 | multi_sse_chunk = %HTTPoison.AsyncChunk{
83 | chunk: ~s(data: {"id":"chatcmpl-123","choices":[{"delta":{"content":"Hello"},"index":0}]}\n\n) <>
84 | ~s(data: {"id":"chatcmpl-123","choices":[{"delta":{"content":" world"},"index":0}]}\n\n),
85 | id: make_ref()
86 | }
87 |
88 | # Send the chunk to the client
89 | send(client, multi_sse_chunk)
90 |
91 | # Assert that both messages were processed
92 | assert_receive {:"$gen_cast", {:data, _data1}}, 500
93 | assert_receive {:"$gen_cast", {:data, _data2}}, 500
94 | end
95 |
96 | test "handles [DONE] message", %{client: client} do
97 | # Create a chunk with a [DONE] message
98 | done_chunk = %HTTPoison.AsyncChunk{
99 | chunk: ~s(data: [DONE]\n\n),
100 | id: make_ref()
101 | }
102 |
103 | # Send the chunk to the client
104 | send(client, done_chunk)
105 |
106 | # Assert that the finish message was sent
107 | assert_receive {:"$gen_cast", :finish}, 500
108 | end
109 |
110 | test "handles incomplete chunks and buffers them", %{client: client} do
111 | # Send an incomplete chunk
112 | incomplete_chunk1 = %HTTPoison.AsyncChunk{
113 | chunk: ~s(data: {"id":"chatcmpl-123","choices":[{"delta":{"content":"Hello),
114 | id: make_ref()
115 | }
116 |
117 | send(client, incomplete_chunk1)
118 |
119 | # No data should be received yet
120 | refute_receive {:"$gen_cast", {:data, _}}, 100
121 |
122 | # Send the rest of the chunk
123 | incomplete_chunk2 = %HTTPoison.AsyncChunk{
124 | chunk: ~s("},"index":0}]}\n\n),
125 | id: make_ref()
126 | }
127 |
128 | send(client, incomplete_chunk2)
129 |
130 | # Now we should receive the complete data
131 | assert_receive {:"$gen_cast", {:data, _data}}, 500
132 | end
133 | end
134 |
135 | describe "handle_info for other messages" do
136 | test "handles HTTPoison.Error", %{client: client} do
137 | # Create an error message
138 | error_msg = %HTTPoison.Error{reason: "test error reason"}
139 |
140 | # Send the error to the client
141 | send(client, error_msg)
142 |
143 | # Assert that the error was forwarded
144 | assert_receive {:"$gen_cast", {:error, "test error reason"}}, 500
145 | end
146 |
147 | test "handles HTTPoison.AsyncStatus with error code", %{client: client} do
148 | # Create a status message with an error code
149 | status_msg = %HTTPoison.AsyncStatus{code: 401, id: make_ref()}
150 |
151 | # Send the status to the client
152 | send(client, status_msg)
153 |
154 | # Assert that the error was forwarded
155 | assert_receive {:"$gen_cast", {:error, "received error status code: 401"}}, 500
156 | end
157 |
158 | test "handles HTTPoison.AsyncStatus with success code", %{client: client} do
159 | # Create a status message with a success code
160 | status_msg = %HTTPoison.AsyncStatus{code: 200, id: make_ref()}
161 |
162 | # Send the status to the client
163 | send(client, status_msg)
164 |
165 | # No error should be forwarded
166 | refute_receive {:"$gen_cast", {:error, _}}, 100
167 | end
168 | end
169 |
170 | test "integration with TestStreamingClient" do
171 | # Start a TestStreamingClient with the test process
172 | {:ok, test_client} = TestStreamingClient.start_link(%{test_pid: self()})
173 |
174 | # Send a data message
175 | GenServer.cast(test_client, {:data, "test data"})
176 |
177 | # Assert that the data was handled
178 | assert_receive {:data, "test data"}, 500
179 |
180 | # Send an error message
181 | GenServer.cast(test_client, {:error, "test error"})
182 |
183 | # Assert that the error was handled
184 | assert_receive {:error, "test error"}, 500
185 |
186 | # Send a finish message
187 | GenServer.cast(test_client, :finish)
188 |
189 | # Assert that the finish was handled
190 | assert_receive :finish, 500
191 | end
192 |
193 | # Test for the buffer handling with complete messages
194 | test "handles complete messages correctly when buffer ends with \\n\\n", %{client: client} do
195 | # Create a chunk with a complete message (ending with \n\n)
196 | complete_chunk = %HTTPoison.AsyncChunk{
197 | chunk: ~s(data: {"id":"chatcmpl-123","choices":[{"delta":{"content":"Complete message"},"index":0}]}\n\n),
198 | id: make_ref()
199 | }
200 |
201 | # Send the chunk to the client
202 | send(client, complete_chunk)
203 |
204 | # Assert that the message was processed
205 | assert_receive {:"$gen_cast", {:data, data}}, 500
206 | assert get_in(data, ["choices", Access.at(0), "delta", "content"]) == "Complete message"
207 | end
208 |
209 | # Test for handling a real-world error response
210 | test "handles real-world API error response", %{client: client} do
211 | # Create an error response based on the example
212 | error_chunk = %HTTPoison.AsyncChunk{
213 | chunk: ~s({\n "error": {\n "message": "Incorrect API key provided: sk-or-v1*************************************************************78b6. You can find your API key at https://platform.openai.com/account/api-keys.",\n "type": "invalid_request_error",\n "param": null,\n "code": "invalid_api_key"\n }\n}\n),
214 | id: make_ref()
215 | }
216 |
217 | # Send the chunk to the client
218 | send(client, error_chunk)
219 |
220 | # Assert that the error was forwarded to the test process
221 | assert_receive {:"$gen_cast", {:error, error_data}}, 500
222 | assert error_data["code"] == "invalid_api_key"
223 | assert String.contains?(error_data["message"], "Incorrect API key provided")
224 | end
225 |
226 | # Test for handling a deprecated model error
227 | test "handles deprecated model error", %{client: client} do
228 | # Create an error response for a deprecated model
229 | error_chunk = %HTTPoison.AsyncChunk{
230 | chunk: ~s({\n "error": {\n "message": "The model `text-davinci-003` has been deprecated, learn more here: https://platform.openai.com/docs/deprecations",\n "type": "invalid_request_error",\n "param": null,\n "code": "model_not_found"\n }\n}\n),
231 | id: make_ref()
232 | }
233 |
234 | # Send the chunk to the client
235 | send(client, error_chunk)
236 |
237 | # Assert that the error was forwarded to the test process
238 | assert_receive {:"$gen_cast", {:error, error_data}}, 500
239 | assert error_data["code"] == "model_not_found"
240 | assert String.contains?(error_data["message"], "text-davinci-003")
241 | assert String.contains?(error_data["message"], "deprecated")
242 | end
243 | end
--------------------------------------------------------------------------------
/lib/ex_openai/streaming_client.ex:
--------------------------------------------------------------------------------
1 | defmodule ExOpenAI.StreamingClient do
2 | use GenServer
3 |
4 | require Logger
5 |
6 | @callback handle_data(any(), any()) :: {:noreply, any()}
7 | @callback handle_finish(any()) :: {:noreply, any()}
8 | @callback handle_error(any(), any()) :: {:noreply, any()}
9 |
10 | defmacro __using__(_opts) do
11 | quote do
12 | @behaviour ExOpenAI.StreamingClient
13 |
14 | def start_link(init_args, opts \\ []) do
15 | GenServer.start_link(__MODULE__, init_args, opts)
16 | end
17 |
18 | def init(init_args) do
19 | {:ok, init_args}
20 | end
21 |
22 | def handle_cast({:data, data}, state) do
23 | handle_data(data, state)
24 | end
25 |
26 | def handle_cast({:error, e}, state) do
27 | handle_error(e, state)
28 | end
29 |
30 | def handle_cast(:finish, state) do
31 | handle_finish(state)
32 | end
33 | end
34 | end
35 |
36 | def start_link(stream_to_pid, convert_response_fx) do
37 | GenServer.start_link(__MODULE__,
38 | stream_to: stream_to_pid,
39 | convert_response_fx: convert_response_fx
40 | )
41 | end
42 |
43 | def init(stream_to: pid, convert_response_fx: fx) do
44 | {:ok, %{stream_to: pid, convert_response_fx: fx, buffer: ""}}
45 | end
46 |
47 | @doc """
48 | Forwards the given response back to the receiver
49 | If receiver is a PID, will use GenServer.cast to send
50 | If receiver is a function, will call the function directly
51 | """
52 | def forward_response(pid, data) when is_pid(pid) do
53 | GenServer.cast(pid, data)
54 | end
55 |
56 | def forward_response(callback_fx, data) when is_function(callback_fx) do
57 | callback_fx.(data)
58 | end
59 |
60 | def handle_chunk(
61 | chunk,
62 | %{stream_to: pid_or_fx, convert_response_fx: convert_fx}
63 | ) do
64 | chunk
65 | |> String.trim()
66 | |> case do
67 | "[DONE]" ->
68 | Logger.debug("Received [DONE]")
69 | forward_response(pid_or_fx, :finish)
70 |
71 | "event: " <> event_type ->
72 | Logger.debug("Received event: #{inspect(event_type)}")
73 |
74 | ": OPENROUTER PROCESSING" <> event_type ->
75 | Logger.debug("Received event: #{inspect(event_type)}")
76 |
77 | etc ->
78 | Logger.debug("Received event payload: #{inspect(etc)}")
79 |
80 | json =
81 | Jason.decode(etc)
82 | |> convert_fx.()
83 |
84 | case json do
85 | {:ok, res} ->
86 | forward_response(pid_or_fx, {:data, res})
87 |
88 | {:error, err} ->
89 | Logger.warning("Received something that isn't JSON in stream: #{inspect(etc)}")
90 | forward_response(pid_or_fx, {:error, err})
91 | end
92 | end
93 | end
94 |
95 | def handle_info(
96 | %HTTPoison.AsyncChunk{chunk: "data: [DONE]\n\n"} = chunk,
97 | state
98 | ) do
99 | chunk.chunk
100 | |> String.replace("data: ", "")
101 | |> handle_chunk(state)
102 |
103 | {:noreply, state}
104 | end
105 |
106 | def handle_info(
107 | %HTTPoison.AsyncChunk{chunk: ": OPENROUTER PROCESSING\n\n"},
108 | state
109 | ) do
110 | Logger.debug("received : OPENROUTER PROCESSING stamp")
111 | {:noreply, state}
112 | end
113 |
114 | # def handle_info(%HTTPoison.AsyncChunk{chunk: "data: " <> chunk_data}, state) do
115 | # Logger.debug("Received AsyncChunk DATA: #{inspect(chunk_data)}")
116 | # end
117 |
118 | def handle_info(%HTTPoison.AsyncChunk{chunk: chunk}, state) do
119 | Logger.debug("Received AsyncChunk (partial): #{inspect(chunk)}")
120 |
121 | # Combine the existing buffer with the new chunk
122 | new_buffer = state.buffer <> chunk
123 |
124 | # Check if the buffer contains a complete JSON object (for error cases)
125 | case is_complete_json(new_buffer) do
126 | {:ok, json_obj} ->
127 | # We have a complete JSON object, process it directly
128 | process_complete_json(json_obj, state)
129 | # Clear the buffer since we've processed the JSON
130 | {:noreply, %{state | buffer: ""}}
131 |
132 | :incomplete ->
133 | # Process SSE format properly
134 | # First split by double newlines which separate SSE messages
135 | sse_messages = String.split(new_buffer, "\n\n")
136 |
137 | # Check if the buffer ends with "\n\n" to determine if the last message is complete
138 | buffer_complete = String.ends_with?(new_buffer, "\n\n")
139 |
140 | # If the buffer ends with "\n\n", all messages are complete
141 | {messages, incomplete_buffer} =
142 | case {sse_messages, buffer_complete} do
143 | {[], _} ->
144 | {[], ""}
145 |
146 | {messages, true} ->
147 | # All messages are complete, including the last one
148 | {messages, ""}
149 |
150 | {messages, false} ->
151 | # The last message might be incomplete
152 | last_idx = length(messages) - 1
153 | {Enum.take(messages, last_idx), Enum.at(messages, last_idx, "")}
154 | end
155 |
156 | # Process each complete SSE message
157 | state_after_parse =
158 | Enum.reduce(messages, state, fn message, st ->
159 | if String.trim(message) == "" do
160 | st
161 | else
162 | # Parse the SSE message
163 | message_parts = String.split(message, "\n")
164 |
165 | # Extract event type and data
166 | {event_type, data} = extract_sse_parts(message_parts)
167 |
168 | case event_type do
169 | "response.created" ->
170 | process_sse_data(data, st)
171 |
172 | "response.in_progress" ->
173 | process_sse_data(data, st)
174 |
175 | "response.final" ->
176 | process_sse_data(data, st)
177 |
178 | "response.completed" ->
179 | process_sse_data(data, st)
180 |
181 | "[DONE]" ->
182 | forward_response(st.stream_to, :finish)
183 | st
184 |
185 | _ ->
186 | # Unknown event type, try to process data anyway
187 | process_sse_data(data, st)
188 | end
189 | end
190 | end)
191 |
192 | # Update the buffer in the new state
193 | new_state = %{state_after_parse | buffer: incomplete_buffer}
194 | {:noreply, new_state}
195 | end
196 | end
197 |
198 | def handle_info(%HTTPoison.Error{reason: reason}, state) do
199 | Logger.error("Error: #{inspect(reason)}")
200 |
201 | forward_response(state.stream_to, {:error, reason})
202 | {:noreply, state}
203 | end
204 |
205 | def handle_info(%HTTPoison.AsyncStatus{code: code} = status, state) do
206 | Logger.debug("Connection status: #{inspect(status)}")
207 |
208 | if code >= 400 do
209 | forward_response(state.stream_to, {:error, "received error status code: #{code}"})
210 | end
211 |
212 | {:noreply, state}
213 | end
214 |
215 | def handle_info(%HTTPoison.AsyncEnd{}, state) do
216 | # :finish is already sent when data ends
217 | # TODO: may need a separate event for this
218 | # forward_response(state.stream_to, :finish)
219 |
220 | {:noreply, state}
221 | end
222 |
223 | def handle_info(%HTTPoison.AsyncHeaders{} = headers, state) do
224 | Logger.debug("Connection headers: #{inspect(headers)}")
225 | {:noreply, state}
226 | end
227 |
228 | def handle_info(info, state) do
229 | Logger.debug("Unhandled info: #{inspect(info)}")
230 | {:noreply, state}
231 | end
232 |
233 | # Helper function to extract event type and data from SSE message parts
234 | defp extract_sse_parts(message_parts) do
235 | Enum.reduce(message_parts, {nil, nil}, fn part, {event, data} ->
236 | cond do
237 | String.starts_with?(part, "event: ") ->
238 | {String.replace_prefix(part, "event: ", ""), data}
239 |
240 | String.starts_with?(part, "data: ") ->
241 | {event, String.replace_prefix(part, "data: ", "")}
242 |
243 | true ->
244 | {event, data}
245 | end
246 | end)
247 | end
248 |
249 | # Process the data part of an SSE message
250 | defp process_sse_data(nil, state), do: state
251 |
252 | defp process_sse_data(data, state) do
253 | case data do
254 | "[DONE]" ->
255 | forward_response(state.stream_to, :finish)
256 | state
257 |
258 | _ ->
259 | case Jason.decode(data) do
260 | {:ok, decoded} ->
261 | # Check if the decoded JSON contains an error
262 | if Map.has_key?(decoded, "error") do
263 | Logger.warning("Received error in stream: #{inspect(decoded.error)}")
264 | forward_response(state.stream_to, {:error, decoded.error})
265 | state
266 | else
267 | case state.convert_response_fx.({:ok, decoded}) do
268 | {:ok, message} ->
269 | forward_response(state.stream_to, {:data, message})
270 |
271 | e ->
272 | Logger.warning(
273 | "Something went wrong trying to decode the response: #{inspect(e)}"
274 | )
275 | end
276 |
277 | state
278 | end
279 |
280 | {:error, _} ->
281 | Logger.warning("Received something that isn't valid JSON in stream: #{inspect(data)}")
282 | state
283 | end
284 | end
285 | end
286 |
287 | # Helper function to check if a string contains a complete JSON object
288 | defp is_complete_json(str) do
289 | # Try to parse the string as JSON
290 | case Jason.decode(str) do
291 | {:ok, decoded} ->
292 | # If it's a complete JSON with an error field, return it
293 | if is_map(decoded) && Map.has_key?(decoded, "error") do
294 | {:ok, decoded}
295 | else
296 | :incomplete
297 | end
298 |
299 | {:error, _} ->
300 | :incomplete
301 | end
302 | end
303 |
304 | # Process a complete JSON object (typically an error)
305 | defp process_complete_json(json_obj, state) do
306 | if Map.has_key?(json_obj, "error") do
307 | error_data = Map.get(json_obj, "error")
308 | Logger.warning("Received error in stream: #{inspect(error_data)}")
309 | forward_response(state.stream_to, {:error, error_data})
310 | else
311 | # Handle other types of complete JSON objects if needed
312 | case state.convert_response_fx.({:ok, json_obj}) do
313 | {:ok, message} ->
314 | forward_response(state.stream_to, {:data, message})
315 |
316 | e ->
317 | Logger.warning("Something went wrong trying to decode the response: #{inspect(e)}")
318 | end
319 | end
320 |
321 | state
322 | end
323 | end
324 |
--------------------------------------------------------------------------------
/test/fixture/vcr_cassettes/math_assistant.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "request": {
4 | "options": {
5 | "recv_timeout": 30000
6 | },
7 | "body": "{\"name\":\"Math Teacher\",\"tools\":[{\"type\":\"code_interpreter\"}],\"model\":\"gpt-4o\"}",
8 | "url": "https://api.openai.com/v1/assistants?",
9 | "headers": {
10 | "Authorization": "***",
11 | "OpenAI-Organization": "***",
12 | "Content-type": "application/json",
13 | "OpenAI-Beta": "assistants=v2"
14 | },
15 | "method": "post",
16 | "request_body": ""
17 | },
18 | "response": {
19 | "binary": false,
20 | "type": "ok",
21 | "body": "{\n \"id\": \"asst_pF5jY1T3YTFO9zfVgLCLJXen\",\n \"object\": \"assistant\",\n \"created_at\": 1720838525,\n \"name\": \"Math Teacher\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"tool_resources\": {\n \"code_interpreter\": {\n \"file_ids\": []\n }\n },\n \"metadata\": {},\n \"response_format\": \"auto\"\n}",
22 | "headers": {
23 | "Date": "Sat, 13 Jul 2024 02:42:05 GMT",
24 | "Content-Type": "application/json",
25 | "Content-Length": "418",
26 | "Connection": "keep-alive",
27 | "openai-version": "2020-10-01",
28 | "openai-organization": "mercari-8",
29 | "x-request-id": "req_04688bd2b9fca7c9f40ed301e0dc087c",
30 | "openai-processing-ms": "75",
31 | "strict-transport-security": "max-age=15552000; includeSubDomains; preload",
32 | "CF-Cache-Status": "DYNAMIC",
33 | "Set-Cookie": "__cf_bm=stJu94NaiHURTrn75MVEIMnwn_ooZ2wqVrdhB7ZPobg-1720838525-1.0.1.1-WpDGfydEkh2qf5fuhw6Ht3b8Q9xAPuJajgYh9uaHfYxEHsS8fGtPPOG8dqSR4GvPyMcmY1pjz3DMyjZfFHPSCw; path=/; expires=Sat, 13-Jul-24 03:12:05 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None",
34 | "X-Content-Type-Options": "nosniff",
35 | "Server": "cloudflare",
36 | "CF-RAY": "8a25eaef2da1795f-NRT",
37 | "alt-svc": "h3=\":443\"; ma=86400"
38 | },
39 | "status_code": 200
40 | }
41 | },
42 | {
43 | "request": {
44 | "options": {
45 | "recv_timeout": 30000
46 | },
47 | "body": "{}",
48 | "url": "https://api.openai.com/v1/threads?",
49 | "headers": {
50 | "Authorization": "***",
51 | "OpenAI-Organization": "***",
52 | "Content-type": "application/json",
53 | "OpenAI-Beta": "assistants=v2"
54 | },
55 | "method": "post",
56 | "request_body": ""
57 | },
58 | "response": {
59 | "binary": false,
60 | "type": "ok",
61 | "body": "{\n \"id\": \"thread_DhBICESO7TvfyhBUXiqCPnFl\",\n \"object\": \"thread\",\n \"created_at\": 1720838525,\n \"metadata\": {},\n \"tool_resources\": {}\n}",
62 | "headers": {
63 | "Date": "Sat, 13 Jul 2024 02:42:05 GMT",
64 | "Content-Type": "application/json",
65 | "Content-Length": "137",
66 | "Connection": "keep-alive",
67 | "openai-version": "2020-10-01",
68 | "openai-organization": "mercari-8",
69 | "x-request-id": "req_c1f6a84ea16fe564021fda33ccee7888",
70 | "openai-processing-ms": "30",
71 | "strict-transport-security": "max-age=15552000; includeSubDomains; preload",
72 | "CF-Cache-Status": "DYNAMIC",
73 | "Set-Cookie": "__cf_bm=SPfQrKqa79woAewqeJbZ5oK6VzDHDh.K8ZgsBj0t0nw-1720838525-1.0.1.1-xQDfFZ4LKhUZGcxfxkdxkZv97qMO.0CUwWUXbnuTQkIvTnmb91iVQmfVVfDDwemLecJ8dt4wJ6VUOEM6SMok5Q; path=/; expires=Sat, 13-Jul-24 03:12:05 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None",
74 | "X-Content-Type-Options": "nosniff",
75 | "Server": "cloudflare",
76 | "CF-RAY": "8a25eaf0df82795f-NRT",
77 | "alt-svc": "h3=\":443\"; ma=86400"
78 | },
79 | "status_code": 200
80 | }
81 | },
82 | {
83 | "request": {
84 | "options": {
85 | "recv_timeout": 30000
86 | },
87 | "body": "{\"role\":\"user\",\"content\":\"I need to solve the equation `3x + 11 = 14`. Can you help me?\"}",
88 | "url": "https://api.openai.com/v1/threads/thread_DhBICESO7TvfyhBUXiqCPnFl/messages?",
89 | "headers": {
90 | "Authorization": "***",
91 | "OpenAI-Organization": "***",
92 | "Content-type": "application/json",
93 | "OpenAI-Beta": "assistants=v2"
94 | },
95 | "method": "post",
96 | "request_body": ""
97 | },
98 | "response": {
99 | "binary": false,
100 | "type": "ok",
101 | "body": "{\n \"id\": \"msg_fp1mk5184X7Ah5Qjl6wCVetA\",\n \"object\": \"thread.message\",\n \"created_at\": 1720838525,\n \"assistant_id\": null,\n \"thread_id\": \"thread_DhBICESO7TvfyhBUXiqCPnFl\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"I need to solve the equation `3x + 11 = 14`. Can you help me?\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n}",
102 | "headers": {
103 | "Date": "Sat, 13 Jul 2024 02:42:05 GMT",
104 | "Content-Type": "application/json",
105 | "Content-Length": "435",
106 | "Connection": "keep-alive",
107 | "openai-version": "2020-10-01",
108 | "openai-organization": "mercari-8",
109 | "x-request-id": "req_247f1976132c9d8f21696c82d0df8228",
110 | "openai-processing-ms": "49",
111 | "strict-transport-security": "max-age=15552000; includeSubDomains; preload",
112 | "CF-Cache-Status": "DYNAMIC",
113 | "Set-Cookie": "__cf_bm=9h9lB92e3prfhSYS55lezSv1PYw8wD.r2tR_C6Y0Ivg-1720838525-1.0.1.1-iHfuQplXvE1BZiGaMQdRwha2wixDOoysvByfQHkmaaZ3ltvPKAnZ24QeK_KEccuzshVdzVBMu5BuzjTsETVSbg; path=/; expires=Sat, 13-Jul-24 03:12:05 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None",
114 | "X-Content-Type-Options": "nosniff",
115 | "Server": "cloudflare",
116 | "CF-RAY": "8a25eaf228e2795f-NRT",
117 | "alt-svc": "h3=\":443\"; ma=86400"
118 | },
119 | "status_code": 200
120 | }
121 | },
122 | {
123 | "request": {
124 | "options": {
125 | "recv_timeout": 30000
126 | },
127 | "body": "{\"assistant_id\":\"asst_pF5jY1T3YTFO9zfVgLCLJXen\"}",
128 | "url": "https://api.openai.com/v1/threads/thread_DhBICESO7TvfyhBUXiqCPnFl/runs?",
129 | "headers": {
130 | "Authorization": "***",
131 | "OpenAI-Organization": "***",
132 | "Content-type": "application/json",
133 | "OpenAI-Beta": "assistants=v2"
134 | },
135 | "method": "post",
136 | "request_body": ""
137 | },
138 | "response": {
139 | "binary": false,
140 | "type": "ok",
141 | "body": "{\n \"id\": \"run_5j4vPClNuTQjTxC45Bk1IJTk\",\n \"object\": \"thread.run\",\n \"created_at\": 1720838526,\n \"assistant_id\": \"asst_pF5jY1T3YTFO9zfVgLCLJXen\",\n \"thread_id\": \"thread_DhBICESO7TvfyhBUXiqCPnFl\",\n \"status\": \"queued\",\n \"started_at\": null,\n \"expires_at\": 1720839126,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": null,\n \"required_action\": null,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"tool_resources\": {},\n \"metadata\": {},\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_completion_tokens\": null,\n \"max_prompt_tokens\": null,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"incomplete_details\": null,\n \"usage\": null,\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}",
142 | "headers": {
143 | "Date": "Sat, 13 Jul 2024 02:42:06 GMT",
144 | "Content-Type": "application/json",
145 | "Content-Length": "847",
146 | "Connection": "keep-alive",
147 | "openai-version": "2020-10-01",
148 | "openai-organization": "mercari-8",
149 | "x-request-id": "req_1d8d8495e7f7580926b77498d64c9934",
150 | "openai-processing-ms": "240",
151 | "strict-transport-security": "max-age=15552000; includeSubDomains; preload",
152 | "CF-Cache-Status": "DYNAMIC",
153 | "Set-Cookie": "__cf_bm=vFAM3bVZAboIBuxQIZ4SQWCWKTs0GrGlcimRSp9mKTw-1720838526-1.0.1.1-pRzzuXwdWmVqfO04K2J_bTZyObvhHTf0HD6LIRTG0UHuuC4vRSDZSyxqDwJvZ42ECTPZNmLpj11qdkdqy8HrXw; path=/; expires=Sat, 13-Jul-24 03:12:06 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None",
154 | "X-Content-Type-Options": "nosniff",
155 | "Server": "cloudflare",
156 | "CF-RAY": "8a25eaf39a6c795f-NRT",
157 | "alt-svc": "h3=\":443\"; ma=86400"
158 | },
159 | "status_code": 200
160 | }
161 | },
162 | {
163 | "request": {
164 | "options": {
165 | "recv_timeout": 30000
166 | },
167 | "body": "",
168 | "url": "https://api.openai.com/v1/threads/thread_DhBICESO7TvfyhBUXiqCPnFl/messages?",
169 | "headers": {
170 | "Authorization": "***",
171 | "OpenAI-Organization": "***",
172 | "Content-type": "application/json",
173 | "OpenAI-Beta": "assistants=v2"
174 | },
175 | "method": "get",
176 | "request_body": ""
177 | },
178 | "response": {
179 | "binary": false,
180 | "type": "ok",
181 | "body": "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"msg_tC2xiR0mJbTzTzYrIYDj1l21\",\n \"object\": \"thread.message\",\n \"created_at\": 1720838527,\n \"assistant_id\": \"asst_pF5jY1T3YTFO9zfVgLCLJXen\",\n \"thread_id\": \"thread_DhBICESO7TvfyhBUXiqCPnFl\",\n \"run_id\": \"run_5j4vPClNuTQjTxC45Bk1IJTk\",\n \"role\": \"assistant\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"Of course! Let's solve the equation \\\\( 3x + 11 = 14 \\\\).\\n\\nHere are the steps:\\n\\n1. Subtract 11 from both sides of the equation to isolate the term with the variable \\\\( x \\\\).\\n\\\\[ 3x + 11 - 11 = 14 - 11 \\\\]\\n\\\\[ 3x = 3 \\\\]\\n\\n2. Divide both sides by 3 to solve for \\\\( x \\\\).\\n\\\\[ \\\\frac{3x}{3} = \\\\frac{3}{3} \\\\]\\n\\\\[ x = 1 \\\\]\\n\\nSo, the solution to the equation \\\\( 3x + 11 = 14 \\\\) is \\\\( x = 1 \\\\).\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n },\n {\n \"id\": \"msg_fp1mk5184X7Ah5Qjl6wCVetA\",\n \"object\": \"thread.message\",\n \"created_at\": 1720838525,\n \"assistant_id\": null,\n \"thread_id\": \"thread_DhBICESO7TvfyhBUXiqCPnFl\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"I need to solve the equation `3x + 11 = 14`. Can you help me?\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n }\n ],\n \"first_id\": \"msg_tC2xiR0mJbTzTzYrIYDj1l21\",\n \"last_id\": \"msg_fp1mk5184X7Ah5Qjl6wCVetA\",\n \"has_more\": false\n}",
182 | "headers": {
183 | "Date": "Sat, 13 Jul 2024 02:42:11 GMT",
184 | "Content-Type": "application/json",
185 | "Content-Length": "1589",
186 | "Connection": "keep-alive",
187 | "openai-version": "2020-10-01",
188 | "openai-organization": "mercari-8",
189 | "x-request-id": "req_87699a46cb06eb749af47052275f9a24",
190 | "openai-processing-ms": "44",
191 | "strict-transport-security": "max-age=15552000; includeSubDomains; preload",
192 | "CF-Cache-Status": "DYNAMIC",
193 | "Set-Cookie": "__cf_bm=as8VQDJ4zYzmVzpooRFo7to_CPkuyiuIl6XxPLTIimM-1720838531-1.0.1.1-MpNlalQX3TjGpkLYT_HPVYclTfOJmavKR5a.eup.BHeHPmPvXwrdbN1bMAlR_htId7G2JzqyKrR_DUahMge0aA; path=/; expires=Sat, 13-Jul-24 03:12:11 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None",
194 | "X-Content-Type-Options": "nosniff",
195 | "Server": "cloudflare",
196 | "CF-RAY": "8a25eb158a14795f-NRT",
197 | "alt-svc": "h3=\":443\"; ma=86400"
198 | },
199 | "status_code": 200
200 | }
201 | }
202 | ]
--------------------------------------------------------------------------------
/lib/ex_openai.ex:
--------------------------------------------------------------------------------
1 | defmodule ExOpenAI do
2 | @moduledoc """
3 | Auto-generated SDK for OpenAI APIs
4 | See https://platform.openai.com/docs/api-reference/introduction for further info on REST endpoints
5 | Make sure to refer to the README on Github to see what is implemented and what isn't yet
6 | """
7 |
8 | use Application
9 |
10 | alias ExOpenAI.Config
11 |
12 | def start(_type, _args) do
13 | children = [Config]
14 | opts = [strategy: :one_for_one, name: ExOpenAI.Supervisor]
15 |
16 | # TODO: find something more elegant for doing this
17 | # force allocate all possible keys / atoms that are within all available components
18 | # this allows us to use String.to_existing_atom without having to worry that those
19 | # atoms aren't allocated yet
20 | # with {:ok, mods} <- :application.get_key(:ex_openai, :modules) do
21 | # # mods
22 | # # |> Enum.filter(&(&1 |> Module.split() |> Enum.at(1) == "Components"))
23 | # # |> IO.inspect()
24 | # # |> Enum.map(& &1.unpack_ast)
25 | # end
26 |
27 | Supervisor.start_link(children, opts)
28 | end
29 | end
30 |
31 | docs = ExOpenAI.Codegen.get_documentation()
32 |
33 | # Generate structs from schema
34 | docs
35 | |> Map.get(:components)
36 | # generate module name: ExOpenAI.Components.X
37 | |> Enum.map(fn {name, c} ->
38 | {name
39 | |> ExOpenAI.Codegen.string_to_component(), c}
40 | end)
41 | # ignore stuff that's overwritten
42 | |> Enum.filter(fn {name, _c} -> name not in ExOpenAI.Codegen.module_overwrites() end)
43 | |> Enum.each(fn {name, component} ->
44 | struct_fields =
45 | [{:required, component.required_props}, {:optional, component.optional_props}]
46 | |> Enum.map(fn {kind, i} ->
47 | Enum.reduce(
48 | i,
49 | %{},
50 | fn item, acc ->
51 | name = item.name
52 | type = item.type
53 |
54 | case kind do
55 | :required ->
56 | Map.merge(acc, %{
57 | String.to_atom(name) => quote(do: unquote(ExOpenAI.Codegen.type_to_spec(type)))
58 | })
59 |
60 | :optional ->
61 | Map.merge(acc, %{
62 | String.to_atom(name) =>
63 | quote(do: unquote(ExOpenAI.Codegen.type_to_spec(type)) | nil)
64 | })
65 | end
66 | end
67 | )
68 | end)
69 |
70 | # module start
71 | defmodule name do
72 | use ExOpenAI.Jason
73 |
74 | docstring_head = """
75 | Schema representing a #{Module.split(name) |> List.last()} within the OpenAI API
76 | """
77 |
78 | with l <- List.first(struct_fields),
79 | is_empty? <- Enum.empty?(l),
80 | false <- is_empty? do
81 | @enforce_keys Map.keys(l)
82 | end
83 |
84 | defstruct(struct_fields |> Enum.map(&Map.keys(&1)) |> List.flatten())
85 |
86 | # components can either be 'full' components, so they have properties
87 | # or 'oneOf' components which link to other components but don't have properties themselves
88 | # for typespec, normal ones should just have full typespecs, but oneOf just a Comp1 | Comp2 | Comp3 spec
89 | case component.kind do
90 | :component ->
91 | @type t :: %__MODULE__{
92 | unquote_splicing(
93 | struct_fields
94 | |> Enum.map(&Map.to_list(&1))
95 | |> Enum.reduce(&Kernel.++/2)
96 | )
97 | }
98 |
99 | # Inlining the typespec here to have it available during PROD builds, as spec definitions will get stripped
100 | @typespec quote(
101 | do: %__MODULE__{
102 | unquote_splicing(
103 | struct_fields
104 | |> Enum.map(&Map.to_list(&1))
105 | |> Enum.reduce(&Kernel.++/2)
106 | )
107 | }
108 | )
109 |
110 | @moduledoc "#{docstring_head}
111 | "
112 |
113 | :oneOf ->
114 | @type t :: unquote(ExOpenAI.Codegen.type_to_spec({:oneOf, component.components}))
115 | @typespec quote(
116 | do: unquote(ExOpenAI.Codegen.type_to_spec({:oneOf, component.components}))
117 | )
118 |
119 | @moduledoc "#{docstring_head}
120 |
121 | Use any of these components: #{inspect(component.components)}"
122 |
123 | :enum ->
124 | @type t :: unquote(ExOpenAI.Codegen.type_to_spec({:oneOf, component.enum}))
125 | @typespec quote(do: unquote(ExOpenAI.Codegen.type_to_spec({:enum, component.enum})))
126 |
127 | @moduledoc "#{docstring_head}
128 |
129 | This is an enum with values: #{inspect(component.enum |> Enum.map(&String.to_atom/1))}"
130 |
131 | :allOf ->
132 | @type t :: unquote(ExOpenAI.Codegen.type_to_spec({:oneOf, component.components}))
133 | @typespec quote(
134 | do: unquote(ExOpenAI.Codegen.type_to_spec({:oneOf, component.components}))
135 | )
136 |
137 | @moduledoc "#{docstring_head}
138 |
139 | Use any of these components: #{inspect(component.components |> Enum.map(&Kernel.elem(&1, 1)))}"
140 | end
141 |
142 | use ExOpenAI.Codegen.AstUnpacker
143 | end
144 |
145 | # module end
146 | end)
147 |
148 | # generate modules
149 | docs
150 | |> Map.get(:functions)
151 | # group all the functions by their 'group', to cluster them into Module.Group
152 | |> Enum.reduce(%{}, fn fx, acc ->
153 | Map.put(acc, fx.group, [fx | Map.get(acc, fx.group, [])])
154 | end)
155 | |> Enum.each(fn {name, functions} ->
156 | modname =
157 | name
158 | |> String.replace("-", "_")
159 | |> Macro.camelize()
160 | |> String.to_atom()
161 | |> (&Module.concat(ExOpenAI, &1)).()
162 |
163 | defmodule modname do
164 | @moduledoc """
165 | Modules for interacting with the `#{name}` group of OpenAI APIs
166 |
167 | API Reference: https://platform.openai.com/docs/api-reference/#{name}
168 | """
169 |
170 | functions
171 | |> Enum.each(fn fx ->
172 | %{
173 | name: function_name,
174 | summary: summary,
175 | arguments: args,
176 | endpoint: endpoint,
177 | deprecated?: deprecated,
178 | method: method,
179 | response_type: response_type,
180 | group: group
181 | } = fx
182 |
183 | name = String.to_atom(function_name)
184 |
185 | content_type =
186 | with body when not is_nil(body) <- Map.get(fx, :request_body, %{}),
187 | ct <- Map.get(body, :content_type, :"application/json") do
188 | ct
189 | end
190 |
191 | merged_required_args =
192 | case method do
193 | # POST methods have body arguments on top of positional URL ones
194 | :post ->
195 | Enum.filter(args, &Map.get(&1, :required?)) ++
196 | if(is_nil(get_in(fx, [:request_body, :request_schema, :required_props])),
197 | do: [],
198 | else: fx.request_body.request_schema.required_props
199 | )
200 |
201 | :get ->
202 | Enum.filter(args, &Map.get(&1, :required?))
203 |
204 | :delete ->
205 | Enum.filter(args, &Map.get(&1, :required?))
206 | end
207 |
208 | required_args_docstring =
209 | Enum.map_join(merged_required_args, "\n\n", fn i ->
210 | s = "- `#{i.name}`"
211 | s = if Map.has_key?(i, :description), do: "#{s}: #{Map.get(i, :description)}", else: s
212 |
213 | s =
214 | if i.name == "file",
215 | do:
216 | "#{s}(Pass in a file object created with something like File.open!, or a {filename, file object} tuple to preserve the filename information, eg `{\"filename.ext\", File.open!(\"/tmp/file.ext\")}`)",
217 | else: s
218 |
219 | s =
220 | if Map.get(i, :example, "") != "",
221 | do: "#{s}\n\n*Example*: `#{Map.get(i, :example)}`",
222 | else: s
223 |
224 | s
225 | end)
226 |
227 | merged_optional_args =
228 | case method do
229 | :post ->
230 | Enum.filter(args, &(!Map.get(&1, :required?))) ++
231 | if(is_nil(get_in(fx, [:request_body, :request_schema, :optional_props])),
232 | do: [],
233 | else: fx.request_body.request_schema.optional_props
234 | )
235 |
236 | :get ->
237 | Enum.filter(args, &(!Map.get(&1, :required?)))
238 |
239 | :delete ->
240 | Enum.filter(args, &(!Map.get(&1, :required?)))
241 | end
242 | |> ExOpenAI.Codegen.add_stream_to_opts_args()
243 | |> Kernel.++(ExOpenAI.Codegen.extra_opts_args())
244 |
245 | optional_args_docstring =
246 | Enum.map_join(merged_optional_args, "\n\n", fn i ->
247 | s = "- `#{i.name}`"
248 |
249 | s =
250 | if Map.has_key?(i, :description),
251 | do: "#{s}: #{inspect(Map.get(i, :description))}",
252 | else: s
253 |
254 | s =
255 | if Map.get(i, :example, "") != "",
256 | do: "#{s}\n\n*Example*: `#{inspect(Map.get(i, :example))}`",
257 | else: s
258 |
259 | s
260 | end)
261 |
262 | # convert non-optional args into [arg1, arg2, arg3] representation
263 | arg_names =
264 | merged_required_args
265 | |> Enum.map(&(Map.get(&1, :name) |> String.to_atom() |> Macro.var(nil)))
266 |
267 | # convert non-optional args into spec definition [String.t(), String.t(), etc.] representation
268 | spec =
269 | merged_required_args
270 | |> Enum.map(fn item -> quote do: unquote(ExOpenAI.Codegen.type_to_spec(item.type)) end)
271 |
272 | # construct response spec
273 | # for list types, instead of {:ok, list1 | list2}, we want {:ok, list1} | {:ok, list2}
274 | response_spec =
275 | case response_type do
276 | {:oneOf, c} ->
277 | Enum.map(c, fn comp -> {:ok, ExOpenAI.Codegen.type_to_spec(comp)} end)
278 | |> Enum.reduce(&{:|, [], [&1, &2]})
279 |
280 | {:component, _} = comp ->
281 | {:ok, ExOpenAI.Codegen.type_to_spec(comp)}
282 |
283 | etc ->
284 | {:ok, ExOpenAI.Codegen.type_to_spec(etc)}
285 | end
286 |
287 | optional_args =
288 | merged_optional_args
289 | |> Enum.reduce([], fn item, acc ->
290 | name = item.name
291 | type = item.type
292 |
293 | case acc do
294 | [] ->
295 | quote do:
296 | {unquote(String.to_atom(name)),
297 | unquote(ExOpenAI.Codegen.type_to_spec(type))}
298 |
299 | val ->
300 | quote do:
301 | {unquote(String.to_atom(name)),
302 | unquote(ExOpenAI.Codegen.type_to_spec(type))}
303 | | unquote(val)
304 | end
305 | end)
306 | |> case do
307 | [] -> []
308 | e -> [e]
309 | end
310 |
311 | @doc """
312 | #{summary |> ExOpenAI.Codegen.fix_openai_links()}
313 |
314 | Endpoint: `https://api.openai.com/v1#{endpoint}`
315 |
316 | Method: #{Atom.to_string(method) |> String.upcase()}
317 |
318 | Docs: https://platform.openai.com/docs/api-reference/#{group}
319 |
320 | ---
321 |
322 | ### Required Arguments:
323 |
324 | #{required_args_docstring |> ExOpenAI.Codegen.fix_openai_links()}
325 |
326 |
327 | ### Optional Arguments:
328 |
329 | #{optional_args_docstring |> ExOpenAI.Codegen.fix_openai_links()}
330 | """
331 | if deprecated, do: @deprecated("Deprecated by OpenAI")
332 |
333 | # fx without opts
334 | @spec unquote(name)(unquote_splicing(spec)) ::
335 | unquote(response_spec) | {:error, any()}
336 |
337 | # fx with opts
338 | @spec unquote(name)(unquote_splicing(spec), unquote(optional_args)) ::
339 | unquote(response_spec) | {:error, any()}
340 |
341 | def unquote(name)(unquote_splicing(arg_names), opts \\ []) do
342 | # store binding so we can't access args of the function later
343 | binding = binding()
344 |
345 | required_arguments = unquote(Macro.escape(merged_required_args))
346 | optional_arguments = unquote(Macro.escape(merged_optional_args))
347 | arguments = required_arguments ++ optional_arguments
348 | url = "#{unquote(endpoint)}"
349 | method = unquote(method)
350 | request_content_type = unquote(content_type)
351 |
352 | # merge all passed args together, so opts + passed
353 | all_passed_args = Keyword.merge(binding, opts) |> Keyword.drop([:opts])
354 |
355 | # replace all args in the URL that are specified as 'path'
356 | # for example: /model/{model_id} -> /model/123
357 | url =
358 | arguments
359 | |> Enum.filter(&Kernel.==(Map.get(&1, :in, ""), "path"))
360 | |> Enum.reduce(
361 | url,
362 | &String.replace(
363 | &2,
364 | "{#{&1.name}}",
365 | Keyword.get(all_passed_args, String.to_atom(&1.name))
366 | )
367 | )
368 |
369 | # iterate over all other arguments marked with in: "query", and append them to the query
370 | # for example /model/123?foo=bar
371 | query =
372 | Enum.filter(arguments, &Kernel.==(Map.get(&1, :in, ""), "query"))
373 | |> Enum.filter(&(!is_nil(Keyword.get(all_passed_args, String.to_atom(&1.name)))))
374 | |> Enum.reduce(%{}, fn item, acc ->
375 | Map.put(acc, item.name, Keyword.get(all_passed_args, String.to_atom(item.name)))
376 | end)
377 | |> URI.encode_query()
378 |
379 | url = url <> "?" <> query
380 |
381 | # construct body with the remaining args
382 | body_params =
383 | arguments
384 | # filter by all the rest, so neither query nor path
385 | |> Enum.filter(&Kernel.==(Map.get(&1, :in, ""), ""))
386 | |> Enum.filter(&(!is_nil(Keyword.get(all_passed_args, String.to_atom(&1.name)))))
387 | |> Enum.reduce(
388 | [],
389 | &Keyword.merge(&2, [
390 | {
391 | String.to_atom(&1.name),
392 | Keyword.get(all_passed_args, String.to_atom(&1.name))
393 | }
394 | ])
395 | )
396 |
397 | # function to convert the response back into a struct
398 | # passed into the client to get applied onto the response
399 | convert_response = fn response ->
400 | ExOpenAI.Codegen.convert_response(response, unquote(response_type))
401 | end
402 |
403 | ExOpenAI.Config.http_client().api_call(
404 | method,
405 | url,
406 | body_params,
407 | request_content_type,
408 | opts,
409 | convert_response
410 | )
411 | end
412 | end)
413 | end
414 | end)
415 |
--------------------------------------------------------------------------------
/docs/examples.md:
--------------------------------------------------------------------------------
1 | # ExOpenAI Usage Examples
2 |
3 | This document provides practical examples of using ExOpenAI for various common tasks.
4 |
5 | ## Table of Contents
6 |
7 | - [Chat Completions](#chat-completions)
8 | - [Assistants API](#assistants-api)
9 | - [Image Generation](#image-generation)
10 | - [Audio Processing](#audio-processing)
11 | - [Embeddings](#embeddings)
12 | - [File Management](#file-management)
13 | - [Responses API](#responses-api)
14 | - [Streaming Examples](#streaming-examples)
15 |
16 | ## Chat Completions
17 |
18 | ### Basic Chat Completion
19 |
20 | ```elixir
21 | messages = [
22 | %ExOpenAI.Components.ChatCompletionRequestUserMessage{
23 | role: :user,
24 | content: "What is the capital of France?"
25 | }
26 | ]
27 |
28 | {:ok, response} = ExOpenAI.Chat.create_chat_completion(messages, "gpt-4")
29 |
30 | # Extract the assistant's response
31 | assistant_message = response.choices |> List.first() |> Map.get("message") |> Map.get("content")
32 | IO.puts("Assistant: #{assistant_message}")
33 | ```
34 |
35 | ### Multi-turn Conversation
36 |
37 | ```elixir
38 | messages = [
39 | %ExOpenAI.Components.ChatCompletionRequestSystemMessage{
40 | role: :system,
41 | content: "You are a helpful assistant that speaks like a pirate."
42 | },
43 | %ExOpenAI.Components.ChatCompletionRequestUserMessage{
44 | role: :user,
45 | content: "Tell me about the weather today."
46 | },
47 | %ExOpenAI.Components.ChatCompletionRequestAssistantMessage{
48 | role: :assistant,
49 | content: "Arr matey! The skies be clear and the winds be favorable today!"
50 | },
51 | %ExOpenAI.Components.ChatCompletionRequestUserMessage{
52 | role: :user,
53 | content: "What should I wear?"
54 | }
55 | ]
56 |
57 | {:ok, response} = ExOpenAI.Chat.create_chat_completion(messages, "gpt-4")
58 | ```
59 |
60 | ### Using Function Calling
61 |
62 | ```elixir
63 | messages = [
64 | %ExOpenAI.Components.ChatCompletionRequestUserMessage{
65 | role: :user,
66 | content: "What's the weather like in San Francisco?"
67 | }
68 | ]
69 |
70 | tools = [
71 | %{
72 | type: "function",
73 | function: %{
74 | name: "get_weather",
75 | description: "Get the current weather in a given location",
76 | parameters: %{
77 | type: "object",
78 | properties: %{
79 | location: %{
80 | type: "string",
81 | description: "The city and state, e.g. San Francisco, CA"
82 | },
83 | unit: %{
84 | type: "string",
85 | enum: ["celsius", "fahrenheit"],
86 | description: "The temperature unit to use"
87 | }
88 | },
89 | required: ["location"]
90 | }
91 | }
92 | }
93 | ]
94 |
95 | {:ok, response} =
96 | ExOpenAI.Chat.create_chat_completion(
97 | messages,
98 | "gpt-4",
99 | tools: tools,
100 | tool_choice: "auto"
101 | )
102 |
103 | # Handle the function call
104 | case response.choices |> List.first() |> Map.get(:message) do
105 | %{:tool_calls => tool_calls} ->
106 | # Process tool calls
107 | Enum.each(tool_calls, fn tool_call ->
108 | IO.puts("Tool call: #{inspect(tool_call)}")
109 |
110 | function_name = tool_call.function.name
111 | arguments = Jason.decode!(tool_call.function.arguments)
112 |
113 | IO.puts("function arguments: #{inspect(arguments)}")
114 |
115 | # Call your actual function
116 | weather_data = %{"weather" => "its very very very hot"}
117 |
118 | # Add the function response to messages
119 | updated_messages =
120 | messages ++
121 | [
122 | %ExOpenAI.Components.ChatCompletionRequestAssistantMessage{
123 | role: :assistant,
124 | tool_calls: tool_calls
125 | },
126 | %ExOpenAI.Components.ChatCompletionRequestToolMessage{
127 | role: :tool,
128 | tool_call_id: tool_call.id,
129 | content: Jason.encode!(weather_data)
130 | }
131 | ]
132 |
133 | # Get the final response
134 | {:ok, final_response} =
135 | ExOpenAI.Chat.create_chat_completion(
136 | updated_messages,
137 | "gpt-4"
138 | )
139 |
140 | IO.puts(
141 | "Final response: #{final_response.choices |> List.first() |> Map.get(:message) |> Map.get(:content)}"
142 | )
143 | end)
144 |
145 | e ->
146 | # Regular message response
147 | IO.inspect(e)
148 |
149 | IO.puts(
150 | "Response: #{response.choices |> List.first() |> Map.get(:message) |> Map.get(:content)}"
151 | )
152 | end
153 | ```
154 |
155 | ## Assistants API
156 |
157 | ### Creating and Using an Assistant
158 |
159 | ```elixir
160 | # Create an assistant
161 | IO.puts("Creating assistant")
162 |
163 | {:ok, assistant} =
164 | ExOpenAI.Assistants.create_assistant(
165 | :"gpt-4o",
166 | name: "Research Assistant",
167 | instructions: "You help users with research questions. Be thorough and cite sources.",
168 | tools: [%{type: "file_search"}]
169 | )
170 |
171 | # Create a thread
172 | IO.puts("Creating thread")
173 | {:ok, thread} = ExOpenAI.Threads.create_thread()
174 |
175 | # Add a message to the thread
176 | IO.puts("Creating message")
177 |
178 | {:ok, message} =
179 | ExOpenAI.Threads.create_message(
180 | thread.id,
181 | "Can you explain the basics of quantum computing?",
182 | "user"
183 | )
184 |
185 | # Run the assistant on the thread
186 | IO.puts("Running assistant #{inspect(assistant.id)} with thread #{inspect(thread.id)}")
187 |
188 | {:ok, run} =
189 | ExOpenAI.Threads.create_run(
190 | thread.id,
191 | assistant.id
192 | )
193 |
194 | # Poll for completion
195 | check_run_status = fn run_id, thread_id ->
196 | {:ok, run_status} = ExOpenAI.Threads.get_run(thread_id, run_id)
197 | run_status.status
198 | end
199 |
200 | # Wait for completion (in a real app, use a better polling mechanism)
201 | run_id = run.id
202 | thread_id = thread.id
203 |
204 | # simple loop to wait until status is no longer in_progress
205 | wait_for_completion = fn wait_func, run_id, thread_id ->
206 | case check_run_status.(run_id, thread_id) do
207 | "completed" ->
208 | # Get the messages
209 | {:ok, messages} = ExOpenAI.Threads.list_messages(thread_id)
210 | latest_message = messages.data |> List.first()
211 |
212 | IO.puts(
213 | "Assistant response: #{latest_message.content |> List.first() |> Map.get("text")}"
214 | )
215 |
216 | IO.inspect(latest_message)
217 |
218 | "failed" ->
219 | IO.puts("Run failed")
220 |
221 | "queued" ->
222 | IO.puts("Run is queued... ")
223 | Process.sleep(2000)
224 | wait_func.(wait_func, run_id, thread_id)
225 |
226 | "in_progress" ->
227 | IO.puts("Run is still in progress, waiting 2s")
228 | Process.sleep(2000)
229 | wait_func.(wait_func, run_id, thread_id)
230 |
231 | "requires_action" ->
232 | # Handle tool calls if needed
233 | {:ok, run_details} = ExOpenAI.Threads.get_run(thread_id, run_id)
234 |
235 | # tool_outputs =
236 | # process_tool_calls(run_details.required_action.submit_tool_outputs.tool_calls)
237 |
238 | {:ok, _updated_run} =
239 | ExOpenAI.Threads.submit_tool_ouputs_to_run(
240 | thread_id,
241 | run_id,
242 | %{}
243 | # tool_outputs
244 | )
245 |
246 | status ->
247 | IO.puts("Run is still in progress: #{status}")
248 | end
249 | end
250 |
251 | wait_for_completion.(wait_for_completion, run_id, thread_id)
252 | ```
253 |
254 | ## Image Generation
255 |
256 | ### Generate an Image
257 |
258 | ```elixir
259 | {:ok, response} = ExOpenAI.Images.create_image(
260 | "A serene lake surrounded by mountains at sunset",
261 | n: 1,
262 | size: "1024x1024"
263 | )
264 |
265 | # Get the image URL
266 | image_url = response.data |> List.first() |> IO.inspect
267 | ```
268 |
269 | ### Edit an Image
270 |
271 | ```elixir
272 | # Read the image and mask files
273 | image_data = File.read!("path/to/image.png")
274 | mask_data = File.read!("path/to/mask.png")
275 |
276 | {:ok, response} = ExOpenAI.Images.create_image_edit(
277 | image_data,
278 | mask_data,
279 | "Replace the masked area with a cat",
280 | n: 1,
281 | size: "1024x1024"
282 | )
283 |
284 | # Get the edited image URL
285 | edited_image_url = response.data |> List.first() |> IO.inspect
286 | ```
287 |
288 | ### Create Image Variations
289 |
290 | ```elixir
291 | image_data = File.read!("path/to/image.png")
292 |
293 | {:ok, response} = ExOpenAI.Images.create_image_variation(
294 | image_data,
295 | n: 3,
296 | size: "1024x1024"
297 | )
298 |
299 | # Get all variation URLs
300 | IO.inspect(response)
301 | ```
302 |
303 | ## Audio Processing
304 |
305 | ### Transcribe Audio
306 |
307 | ```elixir
308 | audio_data = File.read!("path/to/audio.mp3")
309 |
310 | {:ok, transcription} = ExOpenAI.Audio.create_transcription(
311 | {"audio.mp3", audio_data},
312 | "whisper-1"
313 | )
314 |
315 | IO.inspect(transcription)
316 | ```
317 |
318 | ### Translate Audio
319 |
320 | ```elixir
321 | audio_data = File.read!("path/to/french_audio.mp3")
322 |
323 | {:ok, translation} = ExOpenAI.Audio.create_translation(
324 | {"french_audio.mp3", audio_data},
325 | "whisper-1"
326 | )
327 |
328 | IO.inspect(translation)
329 | ```
330 |
331 | ### Generate Speech
332 |
333 | ```elixir
334 | {:ok, speech_data} = ExOpenAI.Audio.create_speech(
335 | "Hello world! This is a test of the text-to-speech API.",
336 | "tts-1",
337 | :alloy,
338 | response_format: "mp3"
339 | )
340 |
341 | IO.inspect(speech_data)
342 |
343 | # Save the audio file
344 | File.write!("output.mp3", speech_data)
345 | ```
346 |
347 | ## Embeddings
348 |
349 | ### Create Embeddings for Text
350 |
351 | ```elixir
352 | {:ok, response} = ExOpenAI.Embeddings.create_embedding(
353 | "The food was delicious and the service was excellent.",
354 | "text-embedding-ada-002"
355 | )
356 |
357 | # Get the embedding vector
358 | embedding_vector = response.data |> List.first() |> IO.inspect
359 | ```
360 |
361 | ### Create Embeddings for Multiple Texts
362 |
363 | ```elixir
364 | texts = [
365 | "The food was delicious and the service was excellent.",
366 | "The restaurant was too noisy and the food was mediocre.",
367 | "I would definitely recommend this place to my friends."
368 | ]
369 |
370 | {:ok, response} = ExOpenAI.Embeddings.create_embedding(
371 | texts,
372 | "text-embedding-ada-002"
373 | )
374 |
375 | # Get all embedding vectors
376 | IO.inspect(response)
377 | ```
378 |
379 | ## File Management
380 |
381 | ### Upload a File
382 |
383 | ```elixir
384 | file_content = File.read!("path/to/data.jsonl")
385 |
386 | {:ok, file} = ExOpenAI.Files.create_file(
387 | file_content,
388 | "fine-tune"
389 | )
390 |
391 | IO.puts("File ID: #{file.id}")
392 | ```
393 |
394 | ### List Files
395 |
396 | ```elixir
397 | {:ok, files} = ExOpenAI.Files.list_files()
398 |
399 | Enum.each(files.data, fn file ->
400 | IO.puts("File ID: #{file["id"]}, Filename: #{file["filename"]}, Purpose: #{file["purpose"]}")
401 | end)
402 | ```
403 |
404 | ### Retrieve File Content
405 |
406 | ```elixir
407 | {:ok, content} = ExOpenAI.Files.download_file(file_id)
408 | ```
409 |
410 | ### Delete a File
411 |
412 | ```elixir
413 | {:ok, result} = ExOpenAI.Files.delete_file(file_id)
414 | IO.puts("File deleted: #{result.deleted}")
415 | ```
416 |
417 | ## Responses API
418 |
419 | ### Create a Response
420 |
421 | ```elixir
422 | {:ok, response} = ExOpenAI.Responses.create_response(
423 | "Tell me a joke about programming",
424 | "gpt-4o-mini"
425 | )
426 |
427 | # Get the assistant's message
428 | output = List.first(response.output)
429 | content = output.content |> List.first() |> Map.get(:text)
430 | IO.puts("Assistant's response: #{content}")
431 | ```
432 |
433 | ### Continue a Conversation
434 |
435 | ```elixir
436 | # Initial response
437 | {:ok, response} = ExOpenAI.Responses.create_response(
438 | "Tell me a joke about programming",
439 | "gpt-4o-mini"
440 | )
441 |
442 | # Continue the conversation
443 | {:ok, follow_up} = ExOpenAI.Responses.create_response(
444 | "Explain why that joke is funny",
445 | "gpt-4o-mini",
446 | previous_response_id: response.id
447 | )
448 |
449 | # Get the follow-up response
450 | follow_up_content = follow_up.output
451 | |> List.first()
452 | |> Map.get(:content)
453 | |> List.first()
454 | |> Map.get(:text)
455 |
456 | IO.puts("Follow-up response: #{follow_up_content}")
457 | ```
458 |
459 | ## Streaming Examples
460 |
461 | ### Streaming Chat Completion
462 |
463 | ```elixir
464 | defmodule ChatStreamer do
465 | use ExOpenAI.StreamingClient
466 |
467 | def start(messages, model) do
468 | {:ok, pid} = __MODULE__.start_link(%{text: ""})
469 |
470 | ExOpenAI.Chat.create_chat_completion(
471 | messages,
472 | model,
473 | stream: true,
474 | stream_to: pid
475 | )
476 |
477 | pid
478 | end
479 |
480 | @impl true
481 | def handle_data(data, state) do
482 | content = case data do
483 | %{choices: [%{"delta" => %{"content" => content}}]} when is_binary(content) ->
484 | content
485 | _ ->
486 | ""
487 | end
488 |
489 | if content != "" do
490 | IO.write(content)
491 | {:noreply, %{state | text: state.text <> content}}
492 | else
493 | {:noreply, state}
494 | end
495 | end
496 |
497 | @impl true
498 | def handle_error(error, state) do
499 | IO.puts("\nError: #{inspect(error)}")
500 | {:noreply, state}
501 | end
502 |
503 | @impl true
504 | def handle_finish(state) do
505 | IO.puts("\n\nDone!")
506 | {:noreply, state}
507 | end
508 |
509 | def get_full_text(pid) do
510 | :sys.get_state(pid).text
511 | end
512 | end
513 |
514 | # Usage
515 | messages = [
516 | %ExOpenAI.Components.ChatCompletionRequestUserMessage{
517 | role: :user,
518 | content: "Write a short poem about coding"
519 | }
520 | ]
521 |
522 | pid = ChatStreamer.start(messages, "gpt-4")
523 |
524 | # After streaming completes
525 | full_text = ChatStreamer.get_full_text(pid)
526 | ```
527 |
528 | ### Streaming with a Callback Function
529 |
530 | ```elixir
531 | buffer = ""
532 | ref = make_ref()
533 |
534 | callback = fn
535 | :finish ->
536 | send(self(), {:stream_finished, ref, buffer})
537 |
538 | {:data, data} ->
539 | content = case data do
540 | %{choices: [%{"delta" => %{"content" => content}}]} when is_binary(content) ->
541 | content
542 | _ ->
543 | ""
544 | end
545 |
546 | if content != "" do
547 | IO.write(content)
548 | buffer = buffer <> content
549 | end
550 |
551 | {:error, err} ->
552 | IO.puts("\nError: #{inspect(err)}")
553 | end
554 |
555 | messages = [
556 | %ExOpenAI.Components.ChatCompletionRequestUserMessage{
557 | role: :user,
558 | content: "Explain quantum computing briefly"
559 | }
560 | ]
561 |
562 | ExOpenAI.Chat.create_chat_completion(
563 | messages,
564 | "gpt-4",
565 | stream: true,
566 | stream_to: callback
567 | )
568 |
569 | # In a real application, you would wait for the {:stream_finished, ref, buffer} message
570 | ```
571 |
--------------------------------------------------------------------------------
/docs/codegen.md:
--------------------------------------------------------------------------------
1 | # Understanding ex_openai Code Generation Architecture
2 |
3 | ## Overview
4 |
5 | The `ex_openai` library is an Elixir SDK for the OpenAI API that leverages metaprogramming to auto-generate most of its code directly from OpenAI's API documentation. This design ensures the SDK remains current with OpenAI's API without requiring manual updates for each API change.
6 |
7 | ## Code Generation Process - Step by Step
8 |
9 | ### 1. Reading and Parsing "docs.yaml"
10 |
11 | The process begins with parsing OpenAI's API documentation, stored as a YAML file at `lib/ex_openai/docs/docs.yaml`:
12 |
13 | ```elixir
14 | def get_documentation do
15 | {:ok, yml} =
16 | File.read!("#{__DIR__}/docs/docs.yaml")
17 | |> YamlElixir.read_from_string()
18 |
19 | # Process components and functions...
20 | end
21 | ```
22 |
23 | **Short Example:**
24 |
25 | In your "docs.yaml," suppose you have:
26 |
27 | ```yaml
28 | paths:
29 | /completions:
30 | post:
31 | operationId: createCompletion
32 | summary: Create a new completion
33 | requestBody:
34 | required: true
35 | content:
36 | application/json:
37 | schema:
38 | $ref: '#/components/schemas/CompletionRequest'
39 | responses:
40 | "200":
41 | description: OK
42 | content:
43 | application/json:
44 | schema:
45 | $ref: '#/components/schemas/CompletionResponse'
46 |
47 | components:
48 | schemas:
49 | CompletionRequest:
50 | type: object
51 | required:
52 | - model
53 | - prompt
54 | properties:
55 | model:
56 | type: string
57 | prompt:
58 | type: string
59 | max_tokens:
60 | type: integer
61 | CompletionResponse:
62 | type: object
63 | properties:
64 | id:
65 | type: string
66 | choices:
67 | type: array
68 | items:
69 | type: string
70 | ```
71 |
72 | Your code calls `ExOpenAI.Codegen.get_documentation/0` to parse this file, returning a structure like:
73 |
74 | ```elixir
75 | %{
76 | components: %{
77 | "CompletionRequest" => %{kind: :component, required_props: [...], optional_props: [...]},
78 | "CompletionResponse" => %{kind: :component, ...}
79 | },
80 | functions: [
81 | %{
82 | endpoint: "/completions",
83 | name: "create_completion",
84 | method: :post,
85 | request_body: %{...},
86 | response_type: {:component, "CompletionResponse"},
87 | group: "completions"
88 | }
89 | ]
90 | }
91 | ```
92 |
93 | ### 2. Converting Raw JSON Schema Types (parse_type/1)
94 |
95 | The type system converts OpenAI's type definitions to an intermediate Elixir representation:
96 |
97 | ```elixir
98 | def parse_type(%{"type" => "string", "enum" => enum_entries}),
99 | do: {:enum, Enum.map(enum_entries, &String.to_atom/1)}
100 |
101 | def parse_type(%{"type" => "array", "items" => items}),
102 | do: {:array, parse_type(items)}
103 |
104 | def parse_type(%{"$ref" => ref}),
105 | do: {:component, String.replace(ref, "#/components/schemas/", "")}
106 |
107 | def parse_type(%{"type" => type}),
108 | do: type
109 | ```
110 |
111 | **Short Example:**
112 |
113 | If the docs.yaml contains:
114 |
115 | ```json
116 | {
117 | "type": "array",
118 | "items": {
119 | "type": "string"
120 | }
121 | }
122 | ```
123 |
124 | The `parse_type` function sees `"type": "array"` and `"items": {"type": "string"}`. It transforms this into an intermediate representation:
125 |
126 | ```elixir
127 | {:array, "string"}
128 | ```
129 |
130 | Similarly, if you have:
131 |
132 | ```json
133 | {
134 | "type": "object",
135 | "properties": {
136 | "role": {
137 | "type": "string",
138 | "enum": ["system", "user"]
139 | }
140 | }
141 | }
142 | ```
143 |
144 | `parse_type` will return:
145 |
146 | ```elixir
147 | {:object, %{"role" => {:enum, [:system, :user]}}}
148 | ```
149 |
150 | This representation is used later for building Elixir typespecs or struct fields.
151 |
152 | ### 3. Creating a Normalized Component Schema (parse_component_schema/1)
153 |
154 | Component schemas are transformed into a normalized Elixir representation:
155 |
156 | ```elixir
157 | def parse_component_schema(%{"properties" => props, "required" => required} = full_schema) do
158 | # Process properties and separate required vs optional
159 | %{
160 | description: Map.get(full_schema, "description", ""),
161 | kind: :component,
162 | required_props: parse_properties(required_props),
163 | optional_props: parse_properties(optional_props)
164 | }
165 | end
166 | ```
167 |
168 | **Short Example:**
169 |
170 | Consider a component:
171 |
172 | ```yaml
173 | CompletionRequest:
174 | type: object
175 | required:
176 | - "model"
177 | properties:
178 | model:
179 | type: "string"
180 | max_tokens:
181 | type: "integer"
182 | ```
183 |
184 | `parse_component_schema/1` generates something like:
185 |
186 | ```elixir
187 | %{
188 | kind: :component,
189 | description: "...",
190 | required_props: [
191 | %{
192 | name: "model",
193 | type: "string",
194 | description: "",
195 | example: ""
196 | }
197 | ],
198 | optional_props: [
199 | %{
200 | name: "max_tokens",
201 | type: "integer",
202 | description: "",
203 | example: ""
204 | }
205 | ]
206 | }
207 | ```
208 |
209 | Notice how the function separates `required_props` from `optional_props`, based on the "required" array in the YAML definition.
210 |
211 | The function also handles special cases like `oneOf` and `allOf` types. Both of these are handled similarly, creating a type representation that allows for multiple possible type variants.
212 |
213 | ### 4. Extracting Endpoint Definitions (parse_path/3)
214 |
215 | For each API endpoint, the library processes HTTP methods and parameters:
216 |
217 | ```elixir
218 | def parse_path(
219 | path,
220 | %{
221 | "post" =>
222 | %{
223 | "operationId" => id,
224 | "summary" => summary,
225 | "requestBody" => body,
226 | "responses" => responses,
227 | "x-oaiMeta" => _meta
228 | } = args
229 | },
230 | component_mapping
231 | ) do
232 | # Extract endpoint data
233 | end
234 | ```
235 |
236 | The library handles GET, POST, and DELETE methods. If it encounters an unsupported HTTP verb or path definition, it logs "unhandled path: [path] - [args]". This helps users identify when an OpenAI API endpoint isn't implemented in the library, which can be useful for troubleshooting or requesting new features.
237 |
238 | **Short Example:**
239 |
240 | If your docs.yaml has:
241 |
242 | ```yaml
243 | /completions:
244 | post:
245 | operationId: createCompletion
246 | summary: Create a new completion
247 | requestBody:
248 | required: true
249 | content:
250 | application/json:
251 | schema:
252 | $ref: '#/components/schemas/CompletionRequest'
253 | responses:
254 | "200":
255 | description: OK
256 | content:
257 | application/json:
258 | schema:
259 | $ref: '#/components/schemas/CompletionResponse'
260 | ```
261 |
262 | The `parse_path` function produces a data structure such as:
263 |
264 | ```elixir
265 | %{
266 | endpoint: "/completions",
267 | name: "create_completion",
268 | summary: "Create a new completion",
269 | deprecated?: false,
270 | arguments: [], # no explicit path/query parameters in this example
271 | method: :post,
272 | request_body: %{
273 | required?: true,
274 | content_type: :application/json,
275 | request_schema:
276 | },
277 | group: "completions",
278 | response_type: {:component, "CompletionResponse"}
279 | }
280 | ```
281 |
282 | ### 5. Type Spec Generation
283 |
284 | After parsing types, they're converted to Elixir typespecs:
285 |
286 | ```elixir
287 | def type_to_spec("string"), do: quote(do: String.t())
288 | def type_to_spec("integer"), do: quote(do: integer())
289 | def type_to_spec("number"), do: quote(do: float())
290 | def type_to_spec("boolean"), do: quote(do: boolean())
291 | def type_to_spec("bitstring"), do: quote(do: bitstring() | {String.t(), bitstring()})
292 |
293 | def type_to_spec({:array, nested}), do: quote(do: unquote([type_to_spec(nested)]))
294 |
295 | def type_to_spec({:enum, l}) when is_list(l), do:
296 | Enum.reduce(l, &{:|, [], [&1, &2]})
297 |
298 | def type_to_spec({:component, component}) when is_binary(component) do
299 | mod = string_to_component(component) |> Module.split() |> Enum.map(&String.to_atom/1)
300 | {{:., [], [{:__aliases__, [alias: false], mod}, :t]}, [], []}
301 | end
302 | ```
303 |
304 | This generates proper Elixir typespecs for documentation and dialyzer analysis.
305 |
306 | ### 6. Generating Modules for Components
307 |
308 | For each component schema, a corresponding Elixir module with a struct is generated:
309 |
310 | ```elixir
311 | defmodule name do
312 | use ExOpenAI.Jason
313 |
314 | @enforce_keys Map.keys(l)
315 | defstruct(struct_fields |> Enum.map(&Map.keys(&1)) |> List.flatten())
316 |
317 | @type t :: %__MODULE__{
318 | unquote_splicing(
319 | struct_fields
320 | |> Enum.map(&Map.to_list(&1))
321 | |> Enum.reduce(&Kernel.++/2)
322 | )
323 | }
324 |
325 | use ExOpenAI.Codegen.AstUnpacker
326 | end
327 | ```
328 |
329 | Before generating these modules, the code checks against `ExOpenAI.Codegen.module_overwrites()`, which returns a list of modules that should NOT be auto-generated (currently only `ExOpenAI.Components.Model`). These modules are provided manually instead, allowing for customization beyond what the OpenAI docs specify.
330 |
331 | **Short Example:**
332 |
333 | Given the parsed schema for "CompletionRequest," the code dynamically builds an Elixir module:
334 |
335 | ```elixir
336 | defmodule ExOpenAI.Components.CompletionRequest do
337 | @enforce_keys [:model] # from required props
338 | defstruct [:model, :max_tokens]
339 |
340 | @type t :: %__MODULE__{
341 | model: String.t(),
342 | max_tokens: integer() | nil
343 | }
344 |
345 | # Includes AST unpacking for atom allocation
346 | use ExOpenAI.Codegen.AstUnpacker
347 | end
348 | ```
349 |
350 | This happens for each "component" in docs.yaml. Components with allOf / oneOf / anyOf become specialized union types.
351 |
352 | ### 7. Generating Endpoint Modules and Functions
353 |
354 | API functions are grouped by their path prefix, and a module is generated for each group:
355 |
356 | ```elixir
357 | defmodule modname do
358 | @moduledoc """
359 | Modules for interacting with the `#{name}` group of OpenAI APIs
360 | """
361 |
362 | # Function definitions...
363 | end
364 | ```
365 |
366 | **Short Example:**
367 |
368 | Given a single parsed path data structure:
369 |
370 | ```elixir
371 | %{
372 | endpoint: "/completions",
373 | name: "create_completion",
374 | method: :post,
375 | group: "completions",
376 | request_body: %{
377 | request_schema: ,
378 | },
379 | response_type: {:component, "CompletionResponse"}
380 | }
381 | ```
382 |
383 | ExOpenAI creates a module `ExOpenAI.Completions` with a function `create_completion/2`:
384 |
385 | ```elixir
386 | defmodule ExOpenAI.Completions do
387 | @doc """
388 | Create a new completion
389 |
390 | Required Arguments:
391 | - model: string
392 |
393 | Optional Arguments:
394 | - max_tokens: integer
395 |
396 | Endpoint: POST /completions
397 | Docs: https://platform.openai.com/docs/api-reference/completions
398 | """
399 | @spec create_completion(String.t(), keyword()) ::
400 | {:ok, ExOpenAI.Components.CompletionResponse.t()} | {:error, any()}
401 | def create_completion(model, opts \\ []) do
402 | # Construct body from arguments
403 | body_params = [model: model] ++ opts
404 | # Delegates to the client
405 | ExOpenAI.Config.http_client().api_call(
406 | :post,
407 | "/completions",
408 | body_params,
409 | :"application/json",
410 | opts,
411 | &convert_response(&1, {:component, "CompletionResponse"})
412 | )
413 | end
414 | end
415 | ```
416 |
417 | For each function, the generator creates:
418 | - Documentation with parameter descriptions and examples
419 | - Type specifications for both required and optional arguments
420 | - Return type specifications
421 | - Function implementation that handles the API request
422 |
423 | Additionally, `ExOpenAI.Codegen.extra_opts_args()` injects additional standard options into every generated function's options:
424 | - `openai_api_key`: Overrides the global API key config
425 | - `openai_organization_key`: Overrides the global organization key config
426 | - `base_url`: Customizes which API endpoint to use as base
427 |
428 | For functions that support streaming, the code uses `ExOpenAI.Codegen.add_stream_to_opts_args()` to inject a `stream_to` parameter, allowing users to specify a PID or function to receive streaming content.
429 |
430 | ### 8. Request Processing
431 |
432 | The generated functions create properly formatted API calls by:
433 |
434 | 1. Extracting required parameters from function arguments
435 | 2. Extracting optional parameters from the keyword list
436 | 3. Constructing the API URL by injecting path parameters
437 | 4. Adding query parameters to the URL
438 | 5. Building the request body for POST requests
439 | 6. Handling the response and converting it to the appropriate Elixir types
440 |
441 | ```elixir
442 | # Constructing URL with path parameters
443 | url =
444 | arguments
445 | |> Enum.filter(&Kernel.==(Map.get(&1, :in, ""), "path"))
446 | |> Enum.reduce(
447 | url,
448 | &String.replace(
449 | &2,
450 | "{#{&1.name}}",
451 | Keyword.get(all_passed_args, String.to_atom(&1.name))
452 | )
453 | )
454 |
455 | # Adding query parameters
456 | query =
457 | Enum.filter(arguments, &Kernel.==(Map.get(&1, :in, ""), "query"))
458 | |> Enum.filter(&(!is_nil(Keyword.get(all_passed_args, String.to_atom(&1.name
459 | |> Enum.filter(&(!is_nil(Keyword.get(all_passed_args, String.to_atom(&1.name)))))
460 | |> Enum.reduce(%{}, fn item, acc ->
461 | Map.put(acc, item.name, Keyword.get(all_passed_args, String.to_atom(item.name)))
462 | end)
463 | |> URI.encode_query()
464 |
465 | # Calling the API
466 | ExOpenAI.Config.http_client().api_call(
467 | method,
468 | url,
469 | body_params,
470 | request_content_type,
471 | opts,
472 | convert_response
473 | )
474 | ```
475 |
476 | ### 9. Client Implementation
477 |
478 | The API client (`ExOpenAI.Client`) handles the actual HTTP requests:
479 |
480 | ```elixir
481 | def api_post(url, params \\ [], request_options \\ [], convert_response) do
482 | body =
483 | params
484 | |> Enum.into(%{})
485 | |> strip_params()
486 | |> Jason.encode()
487 | |> elem(1)
488 |
489 | # Set up headers, options, etc.
490 |
491 | url
492 | |> add_base_url(base_url)
493 | |> post(body, headers, request_options)
494 | |> handle_response()
495 | |> convert_response.()
496 | end
497 | ```
498 |
499 | Key features:
500 | 1. Support for different HTTP methods (GET, POST, DELETE)
501 | 2. Proper header handling for authorization
502 | 3. Support for multipart forms for file uploads
503 | 4. Response processing
504 |
505 | ### 10. Streaming Support
506 |
507 | The library implements sophisticated streaming support for OpenAIs Server-Sent Events:
508 |
509 | ```elixir
510 | def stream_options(request_options, convert_response) do
511 | with {:ok, stream_val} <- Keyword.fetch(request_options, :stream),
512 | {:ok, stream_to} when is_pid(stream_to) or is_function(stream_to) <-
513 | Keyword.fetch(request_options, :stream_to),
514 | true <- stream_val do
515 | # spawn a new StreamingClient and tell it to forward data to `stream_to`
516 | {:ok, sse_client_pid} = ExOpenAI.StreamingClient.start_link(stream_to, convert_response)
517 | [stream_to: sse_client_pid]
518 | else
519 | _ ->
520 | [stream_to: nil]
521 | end
522 | end
523 | ```
524 |
525 | This supports:
526 | 1. Callback functions for streaming data processing
527 | 2. Streaming to separate processes
528 | 3. Proper error handling and cleanup
529 |
530 | For any endpoint that supports streaming, the `add_stream_to_opts_args` function automatically injects the `stream_to` parameter option, making it available to users without manual implementation for each function.
531 |
532 | ### 11. AST Unpacking for Atom Allocation
533 |
534 | A crucial part of the system is ensuring atoms are pre-allocated:
535 |
536 | ```elixir
537 | defmodule AstUnpacker do
538 | defmacro __using__(_opts) do
539 | quote do
540 | def unpack_ast(partial_tree \\ %{}) do
541 | resolved_mods = Map.get(partial_tree, :resolved_mods, [])
542 | partial_tree = Map.put(partial_tree, :resolved_mods, resolved_mods)
543 |
544 | case Enum.member?(resolved_mods, __MODULE__) do
545 | true ->
546 | partial_tree
547 | false ->
548 | # Walk through the AST and find all components
549 | # Recursively unpack their AST
550 | end
551 | end
552 | end
553 | end
554 | end
555 | ```
556 |
557 | Every generated component module uses `use ExOpenAI.Codegen.AstUnpacker`, which injects the `unpack_ast` function. This function is crucial for:
558 |
559 | 1. Walking recursively through all component references in a type
560 | 2. Pre-allocating atoms for every field and nested field that might be used in API responses
561 | 3. Ensuring `String.to_existing_atom/1` can be safely used in response handling
562 |
563 | The `keys_to_atoms` function is responsible for converting JSON response keys from strings to atoms:
564 |
565 | ```elixir
566 | def keys_to_atoms(string_key_map) when is_map(string_key_map) do
567 | for {key, val} <- string_key_map,
568 | into: %{},
569 | do: {
570 | try do
571 | String.to_existing_atom(key)
572 | rescue
573 | ArgumentError ->
574 | Logger.debug(
575 | "Warning! Found non-existing atom returning by OpenAI API: :#{key}.\n" <>
576 | "This may mean that OpenAI has updated it's API..."
577 | )
578 | String.to_atom(key)
579 | end,
580 | keys_to_atoms(val)
581 | }
582 | end
583 | ```
584 |
585 | This function tries to convert every key to an existing atom first, and if that fails, it:
586 | 1. Logs a warning (this is important for identifying API changes from OpenAI)
587 | 2. Creates the atom anyway (with a caution about potential memory leaks)
588 | 3. Recursively processes nested maps and lists
589 |
590 | This combination of AST unpacking and careful atom handling ensures the library safely handles JSON responses from the OpenAI API.
591 |
592 | ## Contributing to the Code Generation
593 |
594 | To add or extend functionality:
595 |
596 | 1. Update docs.yaml under the correct OpenAI endpoints and components.
597 | 2. If a component includes new or unusual fields (e.g., new "image" type), modify `parse_type/1` or `parse_property/1` in codegen.ex to translate to the correct Elixir structure.
598 | 3. If an endpoint returns a brand-new top-level schema, reference it in the responses field so `parse_path/3` can link it.
599 | 4. If you need custom logic for certain endpoints (e.g., special streaming behavior), you can override how the code is generated (e.g., by hooking into or modifying the final expansions in lib/ex_openai.ex).
600 | 5. If you need to provide a custom implementation for a component, add it to `ExOpenAI.Codegen.module_overwrites()` to prevent auto-generation.
601 | 6. For new global options, consider adding them to `ExOpenAI.Codegen.extra_opts_args()`.
602 |
--------------------------------------------------------------------------------
/test/ex_openai/codegen/parse_path_test.exs:
--------------------------------------------------------------------------------
1 | defmodule ExOpenAI.Codegen.ParsePathTest do
2 | use ExUnit.Case, async: true
3 |
4 | describe "parse_path" do
5 | test "simple path" do
6 | handler_schema =
7 | ~S"
8 | get:
9 | operationId: mypath
10 | deprecated: true
11 | summary: some summary
12 | parameters:
13 | - in: path
14 | name: arg1
15 | required: true
16 | schema:
17 | type: string
18 | example:
19 | davinci
20 | description: &engine_id_description >
21 | The ID of the engine to use for this request
22 | responses:
23 | \"200\":
24 | description: OK
25 | content:
26 | application/json:
27 | schema:
28 | type: 'string'
29 | x-oaiMeta:
30 | group: somegroup"
31 | |> YamlElixir.read_all_from_string!()
32 | |> List.first()
33 |
34 | expected = %{
35 | arguments: [
36 | %{example: "davinci", in: "path", name: "arg1", required?: true, type: "string"}
37 | ],
38 | deprecated?: true,
39 | endpoint: "/foo/${engine_id}",
40 | group: "foo",
41 | method: :get,
42 | name: "mypath",
43 | response_type: :string,
44 | summary: "some summary"
45 | }
46 |
47 | assert ExOpenAI.Codegen.parse_path("/foo/${engine_id}", handler_schema, %{}) == expected
48 | end
49 |
50 | test "path with 'query in'" do
51 | handler_schema =
52 | ~S"""
53 | post:
54 | operationId: createRun
55 | tags:
56 | - Assistants
57 | summary: Create a run.
58 | parameters:
59 | - in: path
60 | name: thread_id
61 | required: true
62 | schema:
63 | type: string
64 | description: The ID of the thread to run.
65 | - name: include[]
66 | in: query
67 | description: >
68 | A list of additional fields to include in the response. Currently
69 | the only supported value is
70 | `step_details.tool_calls[*].file_search.results[*].content` to fetch
71 | the file search result content.
72 |
73 |
74 | See the [file search tool
75 | documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
76 | for more information.
77 | schema:
78 | type: array
79 | items:
80 | type: string
81 | enum:
82 | - step_details.tool_calls[*].file_search.results[*].content
83 | requestBody:
84 | required: true
85 | content:
86 | application/json:
87 | schema:
88 | $ref: "#/components/schemas/CreateRunRequest"
89 | responses:
90 | "200":
91 | description: OK
92 | content:
93 | application/json:
94 | schema:
95 | $ref: "#/components/schemas/RunObject"
96 | x-oaiMeta:
97 | name: Create run
98 | group: threads
99 | beta: true
100 | returns: A [run](/docs/api-reference/runs/object) object.
101 | examples:
102 | - title: Default
103 | request:
104 | curl: |
105 | curl https://api.openai.com/v1/threads/thread_abc123/runs \
106 | -H "Authorization: Bearer $OPENAI_API_KEY" \
107 | -H "Content-Type: application/json" \
108 | -H "OpenAI-Beta: assistants=v2" \
109 | -d '{
110 | "assistant_id": "asst_abc123"
111 | }'
112 | python: |
113 | from openai import OpenAI
114 | client = OpenAI()
115 |
116 | run = client.beta.threads.runs.create(
117 | thread_id="thread_abc123",
118 | assistant_id="asst_abc123"
119 | )
120 |
121 | print(run)
122 | node.js: |
123 | import OpenAI from "openai";
124 |
125 | const openai = new OpenAI();
126 |
127 | async function main() {
128 | const run = await openai.beta.threads.runs.create(
129 | "thread_abc123",
130 | { assistant_id: "asst_abc123" }
131 | );
132 |
133 | console.log(run);
134 | }
135 |
136 | main();
137 | response: |
138 | {
139 | "id": "run_abc123",
140 | "object": "thread.run",
141 | "created_at": 1699063290,
142 | "assistant_id": "asst_abc123",
143 | "thread_id": "thread_abc123",
144 | "status": "queued",
145 | "started_at": 1699063290,
146 | "expires_at": null,
147 | "cancelled_at": null,
148 | "failed_at": null,
149 | "completed_at": 1699063291,
150 | "last_error": null,
151 | "model": "gpt-4o",
152 | "instructions": null,
153 | "incomplete_details": null,
154 | "tools": [
155 | {
156 | "type": "code_interpreter"
157 | }
158 | ],
159 | "metadata": {},
160 | "usage": null,
161 | "temperature": 1.0,
162 | "top_p": 1.0,
163 | "max_prompt_tokens": 1000,
164 | "max_completion_tokens": 1000,
165 | "truncation_strategy": {
166 | "type": "auto",
167 | "last_messages": null
168 | },
169 | "response_format": "auto",
170 | "tool_choice": "auto",
171 | "parallel_tool_calls": true
172 | }
173 | - title: Streaming
174 | request:
175 | curl: |
176 | curl https://api.openai.com/v1/threads/thread_123/runs \
177 | -H "Authorization: Bearer $OPENAI_API_KEY" \
178 | -H "Content-Type: application/json" \
179 | -H "OpenAI-Beta: assistants=v2" \
180 | -d '{
181 | "assistant_id": "asst_123",
182 | "stream": true
183 | }'
184 | python: |
185 | from openai import OpenAI
186 | client = OpenAI()
187 |
188 | stream = client.beta.threads.runs.create(
189 | thread_id="thread_123",
190 | assistant_id="asst_123",
191 | stream=True
192 | )
193 |
194 | for event in stream:
195 | print(event)
196 | node.js: |
197 | import OpenAI from "openai";
198 |
199 | const openai = new OpenAI();
200 |
201 | async function main() {
202 | const stream = await openai.beta.threads.runs.create(
203 | "thread_123",
204 | { assistant_id: "asst_123", stream: true }
205 | );
206 |
207 | for await (const event of stream) {
208 | console.log(event);
209 | }
210 | }
211 |
212 | main();
213 | response: >
214 | event: thread.run.created
215 |
216 | data:
217 | {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}}
218 |
219 |
220 | event: thread.run.queued
221 |
222 | data:
223 | {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}}
224 |
225 |
226 | event: thread.run.in_progress
227 |
228 | data:
229 | {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710330641,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}}
230 |
231 |
232 | event: thread.run.step.created
233 |
234 | data:
235 | {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null}
236 |
237 |
238 | event: thread.run.step.in_progress
239 |
240 | data:
241 | {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null}
242 |
243 |
244 | event: thread.message.created
245 |
246 | data:
247 | {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}}
248 |
249 |
250 | event: thread.message.in_progress
251 |
252 | data:
253 | {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}}
254 |
255 |
256 | event: thread.message.delta
257 |
258 | data:
259 | {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}}
260 |
261 |
262 | ...
263 |
264 |
265 | event: thread.message.delta
266 |
267 | data:
268 | {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"
269 | today"}}]}}
270 |
271 |
272 | event: thread.message.delta
273 |
274 | data:
275 | {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}}
276 |
277 |
278 | event: thread.message.completed
279 |
280 | data:
281 | {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710330642,"role":"assistant","content":[{"type":"text","text":{"value":"Hello!
282 | How can I assist you today?","annotations":[]}}],"metadata":{}}
283 |
284 |
285 | event: thread.run.step.completed
286 |
287 | data:
288 | {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710330642,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}}
289 |
290 |
291 | event: thread.run.completed
292 |
293 | data:
294 | {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710330641,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710330642,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}}
295 |
296 |
297 | event: done
298 |
299 | data: [DONE]
300 | - title: Streaming with Functions
301 | request:
302 | curl: >
303 | curl https://api.openai.com/v1/threads/thread_abc123/runs \
304 | -H "Authorization: Bearer $OPENAI_API_KEY" \
305 | -H "Content-Type: application/json" \
306 | -H "OpenAI-Beta: assistants=v2" \
307 | -d '{
308 | "assistant_id": "asst_abc123",
309 | "tools": [
310 | {
311 | "type": "function",
312 | "function": {
313 | "name": "get_current_weather",
314 | "description": "Get the current weather in a given location",
315 | "parameters": {
316 | "type": "object",
317 | "properties": {
318 | "location": {
319 | "type": "string",
320 | "description": "The city and state, e.g. San Francisco, CA"
321 | },
322 | "unit": {
323 | "type": "string",
324 | "enum": ["celsius", "fahrenheit"]
325 | }
326 | },
327 | "required": ["location"]
328 | }
329 | }
330 | }
331 | ],
332 | "stream": true
333 | }'
334 | python: >
335 | from openai import OpenAI
336 |
337 | client = OpenAI()
338 |
339 |
340 | tools = [
341 | {
342 | "type": "function",
343 | "function": {
344 | "name": "get_current_weather",
345 | "description": "Get the current weather in a given location",
346 | "parameters": {
347 | "type": "object",
348 | "properties": {
349 | "location": {
350 | "type": "string",
351 | "description": "The city and state, e.g. San Francisco, CA",
352 | },
353 | "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
354 | },
355 | "required": ["location"],
356 | },
357 | }
358 | }
359 | ]
360 |
361 |
362 | stream = client.beta.threads.runs.create(
363 | thread_id="thread_abc123",
364 | assistant_id="asst_abc123",
365 | tools=tools,
366 | stream=True
367 | )
368 |
369 |
370 | for event in stream:
371 | print(event)
372 | node.js: >
373 | import OpenAI from "openai";
374 |
375 |
376 | const openai = new OpenAI();
377 |
378 |
379 | const tools = [
380 | {
381 | "type": "function",
382 | "function": {
383 | "name": "get_current_weather",
384 | "description": "Get the current weather in a given location",
385 | "parameters": {
386 | "type": "object",
387 | "properties": {
388 | "location": {
389 | "type": "string",
390 | "description": "The city and state, e.g. San Francisco, CA",
391 | },
392 | "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
393 | },
394 | "required": ["location"],
395 | },
396 | }
397 | }
398 | ];
399 |
400 |
401 | async function main() {
402 | const stream = await openai.beta.threads.runs.create(
403 | "thread_abc123",
404 | {
405 | assistant_id: "asst_abc123",
406 | tools: tools,
407 | stream: true
408 | }
409 | );
410 |
411 | for await (const event of stream) {
412 | console.log(event);
413 | }
414 | }
415 |
416 |
417 | main();
418 | response: >
419 | event: thread.run.created
420 |
421 | data:
422 | {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}}
423 |
424 |
425 | event: thread.run.queued
426 |
427 | data:
428 | {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}}
429 |
430 |
431 | event: thread.run.in_progress
432 |
433 | data:
434 | {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710348075,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}}
435 |
436 |
437 | event: thread.run.step.created
438 |
439 | data:
440 | {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null}
441 |
442 |
443 | event: thread.run.step.in_progress
444 |
445 | data:
446 | {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null}
447 |
448 |
449 | event: thread.message.created
450 |
451 | data:
452 | {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}}
453 |
454 |
455 | event: thread.message.in_progress
456 |
457 | data:
458 | {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}}
459 |
460 |
461 | event: thread.message.delta
462 |
463 | data:
464 | {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}}
465 |
466 |
467 | ...
468 |
469 |
470 | event: thread.message.delta
471 |
472 | data:
473 | {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"
474 | today"}}]}}
475 |
476 |
477 | event: thread.message.delta
478 |
479 | data:
480 | {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}}
481 |
482 |
483 | event: thread.message.completed
484 |
485 | data:
486 | {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710348077,"role":"assistant","content":[{"type":"text","text":{"value":"Hello!
487 | How can I assist you today?","annotations":[]}}],"metadata":{}}
488 |
489 |
490 | event: thread.run.step.completed
491 |
492 | data:
493 | {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}}
494 |
495 |
496 | event: thread.run.completed
497 |
498 | data:
499 | {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710348075,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710348077,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}}
500 |
501 |
502 | event: done
503 |
504 | data: [DONE]
505 | """
506 | |> YamlElixir.read_all_from_string!()
507 | |> List.first()
508 |
509 | expected = %{
510 | name: "create_run",
511 | group: "foo",
512 | arguments: [
513 | %{
514 | in: "path",
515 | name: "thread_id",
516 | type: "string",
517 | required?: true,
518 | example: ""
519 | },
520 | %{
521 | in: "query",
522 | name: "include[]",
523 | type: "array",
524 | required?: false,
525 | example: ""
526 | }
527 | ],
528 | deprecated?: false,
529 | endpoint: "/foo/${engine_id}",
530 | method: :post,
531 | response_type: {:component, "RunObject"},
532 | summary: "Create a run.",
533 | request_body: %{
534 | required?: true,
535 | content_type: :"application/json",
536 | request_schema: nil
537 | }
538 | }
539 |
540 | actual = ExOpenAI.Codegen.parse_path("/foo/${engine_id}", handler_schema, %{})
541 |
542 | assert actual == expected
543 | end
544 |
545 | test "get path with response component" do
546 | handler_schema =
547 | ~S"
548 | get:
549 | operationId: retrieveEngine
550 | deprecated: true
551 | tags:
552 | - OpenAI
553 | summary: Retrieves a model instance, providing basic information about it such as the owner and availability.
554 | parameters:
555 | - in: path
556 | name: engine_id
557 | required: true
558 | schema:
559 | type: string
560 | example:
561 | davinci
562 | description: &engine_id_description >
563 | The ID of the engine to use for this request
564 | responses:
565 | \"200\":
566 | description: OK
567 | content:
568 | application/json:
569 | schema:
570 | $ref: '#/components/schemas/Engine'
571 | x-oaiMeta:
572 | name: Retrieve engine
573 | group: engines
574 | path: retrieve"
575 | |> YamlElixir.read_all_from_string!()
576 | |> List.first()
577 |
578 | expected = %{
579 | arguments: [
580 | %{example: "davinci", in: "path", name: "engine_id", required?: true, type: "string"}
581 | ],
582 | deprecated?: true,
583 | endpoint: "/foo/${engine_id}",
584 | group: "foo",
585 | method: :get,
586 | name: "retrieve_engine",
587 | response_type: {:component, "Engine"},
588 | summary:
589 | "Retrieves a model instance, providing basic information about it such as the owner and availability."
590 | }
591 |
592 | assert ExOpenAI.Codegen.parse_path("/foo/${engine_id}", handler_schema, %{}) == expected
593 | end
594 |
595 | test "post path with request component" do
596 | handler_schema =
597 | ~S"
598 | post:
599 | operationId: retrieveEngine
600 | deprecated: true
601 | tags:
602 | - OpenAI
603 | summary: summary
604 | requestBody:
605 | required: true
606 | content:
607 | application/json:
608 | schema:
609 | $ref: '#/components/schemas/CreateSearchRequest'
610 | parameters:
611 | - in: path
612 | name: engine_id
613 | required: true
614 | schema:
615 | type: string
616 | example:
617 | davinci
618 | description: &engine_id_description >
619 | The ID of the engine to use for this request
620 | responses:
621 | \"200\":
622 | description: OK
623 | content:
624 | application/json:
625 | schema:
626 | type: 'number'
627 | x-oaiMeta:
628 | name: Retrieve engine
629 | group: engines
630 | path: retrieve"
631 | |> YamlElixir.read_all_from_string!()
632 | |> List.first()
633 |
634 | # CreateSearchRequest inside comp_mapping will get expanded into request_schema key
635 | comp_mapping = %{
636 | "CreateSearchRequest" => %{
637 | "type" => "object",
638 | "properties" => %{
639 | "foo" => %{
640 | "type" => "string"
641 | }
642 | }
643 | }
644 | }
645 |
646 | expected = %{
647 | arguments: [
648 | %{example: "davinci", in: "path", name: "engine_id", required?: true, type: "string"}
649 | ],
650 | deprecated?: true,
651 | endpoint: "/foo/${engine_id}",
652 | group: "foo",
653 | method: :post,
654 | name: "retrieve_engine",
655 | response_type: :number,
656 | summary: "summary",
657 | request_body: %{
658 | content_type: :"application/json",
659 | request_schema: %{"properties" => %{"foo" => %{"type" => "string"}}, "type" => "object"},
660 | required?: true
661 | }
662 | }
663 |
664 | assert ExOpenAI.Codegen.parse_path("/foo/${engine_id}", handler_schema, comp_mapping) ==
665 | expected
666 | end
667 |
668 | test "post with multipart/form-data" do
669 | handler_schema =
670 | ~S"
671 | post:
672 | operationId: createImageEdit
673 | tags:
674 | - OpenAI
675 | summary: Creates an edited or extended image given an original image and a prompt.
676 | requestBody:
677 | required: true
678 | content:
679 | multipart/form-data:
680 | schema:
681 | $ref: '#/components/schemas/CreateImageEditRequest'
682 | responses:
683 | \"200\":
684 | description: OK
685 | content:
686 | application/json:
687 | schema:
688 | $ref: '#/components/schemas/ImagesResponse'
689 | x-oaiMeta:
690 | name: Create image edit
691 | group: images
692 | path: create-edit
693 | beta: true"
694 | |> YamlElixir.read_all_from_string!()
695 | |> List.first()
696 |
697 | # CreateSearchRequest inside comp_mapping will get expanded into request_schema key
698 | comp_mapping = %{
699 | "CreateImageEditRequest" => %{
700 | "type" => "object",
701 | "properties" => %{
702 | "image" => %{
703 | "type" => "bitstring"
704 | },
705 | "mask" => %{
706 | "type" => "bitstring"
707 | }
708 | }
709 | }
710 | }
711 |
712 | expected = %{
713 | arguments: [],
714 | deprecated?: false,
715 | endpoint: "/foo/${engine_id}",
716 | group: "foo",
717 | method: :post,
718 | name: "create_image_edit",
719 | response_type: {:component, "ImagesResponse"},
720 | summary: "Creates an edited or extended image given an original image and a prompt.",
721 | request_body: %{
722 | content_type: :"multipart/form-data",
723 | request_schema: %{
724 | "type" => "object",
725 | "properties" => %{
726 | "image" => %{"type" => "bitstring"},
727 | "mask" => %{"type" => "bitstring"}
728 | }
729 | },
730 | required?: true
731 | }
732 | }
733 |
734 | assert ExOpenAI.Codegen.parse_path("/foo/${engine_id}", handler_schema, comp_mapping) ==
735 | expected
736 | end
737 | end
738 | end
739 |
--------------------------------------------------------------------------------