├── .gitattributes ├── .github └── workflows │ ├── lib.json │ └── lib.yml ├── .gitignore ├── LIBRARY_TEST.md ├── LICENSE ├── README.md ├── examples ├── ChatCompletion │ └── ChatCompletion.ino ├── Completions │ └── Completions.ino ├── Edit │ └── Edit.ino ├── ImageGeneration │ └── ImageGeneration.ino └── Moderation │ └── Moderation.ino ├── keywords.txt ├── library.properties └── src ├── OpenAI.cpp └── OpenAI.h /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /.github/workflows/lib.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "source-path": "./", 4 | "exclude_targets": [], 5 | "sketch_path": [ 6 | "examples" 7 | ] 8 | } 9 | ] -------------------------------------------------------------------------------- /.github/workflows/lib.yml: -------------------------------------------------------------------------------- 1 | name: Library Compilation Test 2 | 3 | # The workflow will run on push to master branch (create result file) and on pull requests (adding results comment) 4 | on: 5 | # Run when new changes are pushed to master branch 6 | push: 7 | branches: 8 | - 'master' 9 | 10 | pull_request: 11 | types: [opened, reopened, synchronize, labeled] 12 | 13 | env: 14 | # It's convenient to set variables for values used multiple times in the workflow 15 | SKETCHES_REPORTS_PATH: libraries-report 16 | SKETCHES_REPORTS_ARTIFACT_NAME: libraries-report 17 | RESULT_LIBRARY_TEST_FILE: LIBRARY_TEST.md 18 | JSON_LIBRARY_LIST_FILE: .github/workflows/lib.json 19 | 20 | jobs: 21 | compile-sketch: 22 | if: | 23 | github.event_name == 'pull_request' || 24 | (github.event_name == 'push' && github.repository == 'me-no-dev/OpenAI-ESP32') 25 | runs-on: ubuntu-latest 26 | 27 | env: 28 | # Options of how will be the ESP32 Arduino core installed 29 | BOARD_MANAGER: | 30 | - name: "espressif:esp32" 31 | source-url: https://raw.githubusercontent.com/espressif/arduino-esp32/gh-pages/package_esp32_dev_index.json 32 | version: 2.0.7 33 | 34 | GITHUB_URL: | 35 | - name: "espressif:esp32" 36 | source-url: https://github.com/espressif/arduino-esp32.git 37 | 38 | strategy: 39 | matrix: 40 | target: 41 | - esp32 42 | - esp32s2 43 | - esp32c3 44 | - esp32s3 45 | 46 | include: 47 | - target: esp32 48 | fqbn: espressif:esp32:esp32 49 | - target: esp32s2 50 | fqbn: espressif:esp32:esp32s2 51 | - target: esp32c3 52 | fqbn: espressif:esp32:esp32c3 53 | - target: esp32s3 54 | fqbn: espressif:esp32:esp32s3 55 | 56 | 57 | steps: 58 | # This step makes the contents of the repository available to the workflow 59 | - name: Checkout repository 60 | uses: actions/checkout@v3 61 | 62 | # Possible platforms input for compile-sketches 63 | # ${{ env.BOARD_MANAGER }} 64 | # ${{ env.GITHUB_URL }} 65 | 66 | - name: Compile sketch 67 | uses: P-R-O-C-H-Y/compile-sketches@main 68 | with: 69 | platforms: | 70 | ${{ env.GITHUB_URL }} 71 | target: ${{ matrix.target }} 72 | fqbn: ${{ matrix.fqbn }} 73 | use-json-file: true 74 | json-path: ${{ env.JSON_LIBRARY_LIST_FILE }} 75 | enable-deltas-report: true 76 | sketches-report-path: ${{ env.SKETCHES_REPORTS_PATH }} 77 | enable-warnings-report: true 78 | cli-compile-flags: | 79 | - --warnings="all" 80 | 81 | - name: Upload artifact 82 | uses: actions/upload-artifact@v3 83 | with: 84 | name: ${{ env.SKETCHES_REPORTS_ARTIFACT_NAME }} 85 | path: ${{ env.SKETCHES_REPORTS_PATH }} 86 | 87 | report-comment: 88 | needs: compile-sketch # Wait for the compile job to finish to get the data for the report 89 | if: github.event_name == 'pull_request' # Only run the job when the workflow is triggered by a pull request 90 | runs-on: ubuntu-latest 91 | steps: 92 | # This step is needed to get the size data produced by the compile jobs 93 | - name: Download sketches reports artifact 94 | uses: actions/download-artifact@v3 95 | with: 96 | name: ${{ env.SKETCHES_REPORTS_ARTIFACT_NAME }} 97 | path: ${{ env.SKETCHES_REPORTS_PATH }} 98 | 99 | - name: Report results 100 | uses: P-R-O-C-H-Y/report-size-deltas@main 101 | with: 102 | sketches-reports-source: ${{ env.SKETCHES_REPORTS_PATH }} 103 | 104 | report-to-file: 105 | needs: compile-sketch # Wait for the compile job to finish to get the data for the report 106 | if: github.event_name == 'push' # Only run the job when the workflow is triggered by a push 107 | runs-on: ubuntu-latest 108 | steps: 109 | # Check out repository 110 | - name: Checkout repository 111 | uses: actions/checkout@v3 112 | 113 | # This step is needed to get the size data produced by the compile jobs 114 | - name: Download sketches reports artifact 115 | uses: actions/download-artifact@v3 116 | with: 117 | name: ${{ env.SKETCHES_REPORTS_ARTIFACT_NAME }} 118 | path: ${{ env.SKETCHES_REPORTS_PATH }} 119 | 120 | - name: Report results 121 | uses: P-R-O-C-H-Y/report-size-deltas@main 122 | with: 123 | sketches-reports-source: ${{ env.SKETCHES_REPORTS_PATH }} 124 | destination-file: ${{ env.RESULT_LIBRARY_TEST_FILE }} 125 | 126 | - name: Append file with action URL 127 | uses: DamianReeves/write-file-action@master 128 | with: 129 | path: ${{ env.RESULT_LIBRARY_TEST_FILE }} 130 | contents: | 131 | / [GitHub Action Link](https://github.com/${{github.repository}}/actions/runs/${{github.run_id}}) 132 | write-mode: append 133 | 134 | - name: Push to github repo 135 | run: | 136 | git config user.name github-actions 137 | git config user.email github-actions@github.com 138 | git add ${{ env.RESULT_LIBRARY_TEST_FILE }} 139 | git commit -m "Generated Library Test Results" 140 | git push 141 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | .DS_Store 3 | -------------------------------------------------------------------------------- /LIBRARY_TEST.md: -------------------------------------------------------------------------------- 1 | ### External libraries build test 2 | 3 | Library|ESP32|ESP32C3|ESP32S2|ESP32S3 4 | -|:-:|:-:|:-:|:-: 5 | OpenAI-ESP32|5 :white_check_mark: |5 :white_check_mark: |5 :white_check_mark: |5 :white_check_mark: 6 | 7 | 8 | Generated on: Apr-01-2023 08:57:15 9 | / [GitHub Action Link](https://github.com/me-no-dev/OpenAI-ESP32/actions/runs/4582309067) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 me-no-dev 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## OpenAI Library for ESP32 Arduino 2 | 3 | [![Library Compilation Test](https://github.com/me-no-dev/OpenAI-ESP32/actions/workflows/lib.yml/badge.svg?branch=master&event=push)](https://github.com/me-no-dev/OpenAI-ESP32/actions/workflows/lib.yml?link=http://https://github.com/me-no-dev/OpenAI-ESP32/LIBRARY_TEST.md) 4 | 5 | This library currently supports most of the OpenAI API. Exception are `files`, `fine-tunes`. 6 | 7 | Examples for almost all endpoints can be found in the `examples` folder. 8 | 9 | Library is still in early stages and could sustain some small changes. Some are outlined [here](https://github.com/me-no-dev/OpenAI-ESP32/blob/master/src/OpenAI.cpp#L2-L7) 10 | -------------------------------------------------------------------------------- /examples/ChatCompletion/ChatCompletion.ino: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | const char* ssid = "your-SSID"; 5 | const char* password = "your-PASSWORD"; 6 | const char* api_key = "your-OPENAI_API_KEY"; 7 | 8 | OpenAI openai(api_key); 9 | OpenAI_ChatCompletion chat(openai); 10 | 11 | void setup(){ 12 | Serial.begin(115200); 13 | WiFi.begin(ssid, password); 14 | Serial.print("Connecting"); 15 | while (WiFi.status() != WL_CONNECTED) { 16 | delay(100); 17 | Serial.print("."); 18 | } 19 | Serial.println(); 20 | 21 | chat.setModel("gpt-3.5-turbo"); //Model to use for completion. Default is gpt-3.5-turbo 22 | chat.setSystem("Code geek"); //Description of the required assistant 23 | chat.setMaxTokens(1000); //The maximum number of tokens to generate in the completion. 24 | chat.setTemperature(0.2); //float between 0 and 2. Higher value gives more random results. 25 | chat.setStop("\r"); //Up to 4 sequences where the API will stop generating further tokens. 26 | chat.setPresencePenalty(0); //float between -2.0 and 2.0. Positive values increase the model's likelihood to talk about new topics. 27 | chat.setFrequencyPenalty(0); //float between -2.0 and 2.0. Positive values decrease the model's likelihood to repeat the same line verbatim. 28 | chat.setUser("OpenAI-ESP32"); //A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 29 | 30 | Serial.println("You can now send chat message to OpenAI by typing in the Arduino IDE Serial Monitor."); 31 | Serial.println("Each line will be interpreted as one message and processed."); 32 | Serial.println("You can restart the conversation by typing \"clear\"\n"); 33 | } 34 | 35 | void loop() { 36 | String line = Serial.readStringUntil('\n'); 37 | if(line.length() == 0){ 38 | return; 39 | } 40 | if(line == "clear" || line == "clear\r"){ 41 | chat.clearConversation(); 42 | Serial.println("Conversation cleared!"); 43 | return; 44 | } 45 | Serial.println(); 46 | Serial.println(line); 47 | Serial.println("Processing..."); 48 | OpenAI_StringResponse result = chat.message(line); 49 | if(result.length() == 1){ 50 | Serial.printf("Received message. Tokens: %u\n", result.tokens()); 51 | String response = result.getAt(0); 52 | response.trim(); 53 | Serial.println(response); 54 | } else if(result.length() > 1){ 55 | Serial.printf("Received %u messages. Tokens: %u\n", result.length(), result.tokens()); 56 | for (unsigned int i = 0; i < result.length(); ++i){ 57 | String response = result.getAt(i); 58 | response.trim(); 59 | Serial.printf("Message[%u]:\n%s\n", i, response.c_str()); 60 | } 61 | } else if(result.error()){ 62 | Serial.print("Error! "); 63 | Serial.println(result.error()); 64 | } else { 65 | Serial.println("Unknown error!"); 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /examples/Completions/Completions.ino: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | const char* ssid = "your-SSID"; 5 | const char* password = "your-PASSWORD"; 6 | const char* api_key = "your-OPENAI_API_KEY"; 7 | 8 | OpenAI openai(api_key); 9 | OpenAI_Completion completion(openai); 10 | 11 | void setup(){ 12 | Serial.begin(115200); 13 | WiFi.begin(ssid, password); 14 | Serial.print("Connecting"); 15 | while (WiFi.status() != WL_CONNECTED) { 16 | delay(100); 17 | Serial.print("."); 18 | } 19 | Serial.println(); 20 | 21 | // completion.setModel("text-ada-001"); //Model to use for completion. Default is text-davinci-003 22 | // completion.setMaxTokens(1000); //The maximum number of tokens to generate in the completion. 23 | // completion.setTemperature(0.2); //float between 0 and 2. Higher value gives more random results. 24 | // completion.setN(2); //How many completions to generate for each prompt. 25 | // completion.setEcho(true); //Echo back the prompt in addition to the completion 26 | // completion.setStop("\r"); //Up to 4 sequences where the API will stop generating further tokens. 27 | // completion.setPresencePenalty(2.0); //float between -2.0 and 2.0. Positive values increase the model's likelihood to talk about new topics. 28 | // completion.setFrequencyPenalty(2.0); //float between -2.0 and 2.0. Positive values decrease the model's likelihood to repeat the same line verbatim. 29 | // completion.setBestOf(5); //Generates best_of completions server-side and returns the "best". "best_of" must be greater than "n" 30 | // completion.setUser("OpenAI-ESP32"); //A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 31 | 32 | Serial.println("You can now send prompt to OpenAI for completion by typing in the Arduino IDE Serial Monitor."); 33 | Serial.println("Each line will be interpreted as one prompt and processed.\n"); 34 | } 35 | 36 | void loop() { 37 | String line = Serial.readStringUntil('\n'); 38 | if(line.length() == 0){ 39 | return; 40 | } 41 | Serial.println(); 42 | Serial.println(line); 43 | Serial.println("Processing..."); 44 | OpenAI_StringResponse result = completion.prompt(line); 45 | if(result.length() == 1){ 46 | Serial.printf("Received completion. Tokens: %u\n", result.tokens()); 47 | String response = result.getAt(0); 48 | response.trim(); 49 | Serial.println(response); 50 | } else if(result.length() > 1){ 51 | Serial.printf("Received %u completions. Tokens: %u\n", result.length(), result.tokens()); 52 | for (unsigned int i = 0; i < result.length(); ++i){ 53 | String response = result.getAt(i); 54 | response.trim(); 55 | Serial.printf("Completion[%u]:\n%s\n", i, response.c_str()); 56 | } 57 | } else if(result.error()){ 58 | Serial.print("Error! "); 59 | Serial.println(result.error()); 60 | } else { 61 | Serial.println("Unknown error!"); 62 | } 63 | } -------------------------------------------------------------------------------- /examples/Edit/Edit.ino: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | const char* ssid = "your-SSID"; 5 | const char* password = "your-PASSWORD"; 6 | const char* api_key = "your-OPENAI_API_KEY"; 7 | 8 | OpenAI openai(api_key); 9 | OpenAI_Edit edit(openai); 10 | 11 | void setup(){ 12 | Serial.begin(115200); 13 | WiFi.begin(ssid, password); 14 | Serial.print("Connecting"); 15 | while (WiFi.status() != WL_CONNECTED) { 16 | delay(100); 17 | Serial.print("."); 18 | } 19 | Serial.println(); 20 | 21 | edit.setModel("code-davinci-edit-001"); //Model to use for completion. Default is text-davinci-edit-001 22 | edit.setTemperature(0); //float between 0 and 2. Higher value gives more random results. 23 | edit.setN(1); //How many edits to generate for the input and instruction. 24 | 25 | Serial.println("You can now send an instruction on how to edit an input to OpenAI by typing in the Arduino IDE Serial Monitor."); 26 | Serial.println("Each line will be interpreted as new instruction and processed against the last edit."); 27 | Serial.println("You can clear the old input by typing \"clear\"\n"); 28 | } 29 | 30 | // Will hold the last edit from OpenAI 31 | String input = ""; 32 | 33 | void loop() { 34 | String line = Serial.readStringUntil('\n'); 35 | if(line.length() == 0){ 36 | return; 37 | } 38 | if(line == "clear" || line == "clear\r"){ 39 | input = ""; 40 | Serial.println("Input cleared!"); 41 | return; 42 | } 43 | Serial.println(); 44 | Serial.println(line); 45 | Serial.println("Processing..."); 46 | OpenAI_StringResponse result = edit.process(line, input.length()?input.c_str():NULL); 47 | if(result.length() == 1){ 48 | Serial.printf("Received edit. Tokens: %u\n", result.tokens()); 49 | input = result.getAt(0); 50 | input.trim(); 51 | Serial.println(input); 52 | } else if(result.length() > 1){ 53 | Serial.printf("Received %u edits. Tokens: %u\n", result.length(), result.tokens()); 54 | for (unsigned int i = 0; i < result.length(); ++i){ 55 | String response = result.getAt(i); 56 | response.trim(); 57 | Serial.printf("Edit[%u]:\n%s\n", i, response.c_str()); 58 | } 59 | } else if(result.error()){ 60 | Serial.print("Error! "); 61 | Serial.println(result.error()); 62 | } else { 63 | Serial.println("Unknown error!"); 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /examples/ImageGeneration/ImageGeneration.ino: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | const char* ssid = "your-SSID"; 5 | const char* password = "your-PASSWORD"; 6 | const char* api_key = "your-OPENAI_API_KEY"; 7 | 8 | OpenAI openai(api_key); 9 | OpenAI_ImageGeneration imageGeneration(openai); 10 | 11 | void setup(){ 12 | Serial.begin(115200); 13 | WiFi.begin(ssid, password); 14 | Serial.print("Connecting"); 15 | while (WiFi.status() != WL_CONNECTED) { 16 | delay(100); 17 | Serial.print("."); 18 | } 19 | Serial.println(); 20 | 21 | imageGeneration.setSize(OPENAI_IMAGE_SIZE_256x256); //The size of the generated images. 256, 512 or 1024 pixels square. 22 | imageGeneration.setResponseFormat(OPENAI_IMAGE_RESPONSE_FORMAT_URL); //The format in which the generated images are returned. URL or B64_JSON 23 | imageGeneration.setN(1); //The number of images to generate. Must be between 1 and 10. 24 | imageGeneration.setUser("OpenAI-ESP32"); //A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 25 | 26 | Serial.println("You can now let OpenAI to generate an image based on prompt by typing in the Arduino IDE Serial Monitor."); 27 | Serial.println("Each line will be interpreted as new prompt and processed."); 28 | } 29 | 30 | void loop() { 31 | String line = Serial.readStringUntil('\n'); 32 | if(line.length() == 0){ 33 | return; 34 | } 35 | Serial.println(); 36 | Serial.println(line); 37 | Serial.println("Processing..."); 38 | OpenAI_ImageResponse result = imageGeneration.prompt(line); 39 | if(result.length() == 1){ 40 | Serial.printf("Received image.\n"); 41 | Serial.println(result.getAt(0)); 42 | } else if(result.length() > 1){ 43 | Serial.printf("Received %u images.\n", result.length()); 44 | for (unsigned int i = 0; i < result.length(); ++i){ 45 | Serial.printf("Image[%u]:\n%s\n", i, result.getAt(i)); 46 | } 47 | } else if(result.error()){ 48 | Serial.print("Error! "); 49 | Serial.println(result.error()); 50 | } else { 51 | Serial.println("Unknown error!"); 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /examples/Moderation/Moderation.ino: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | const char* ssid = "your-SSID"; 5 | const char* password = "your-PASSWORD"; 6 | const char* api_key = "your-OPENAI_API_KEY"; 7 | 8 | OpenAI openai(api_key); 9 | 10 | void setup(){ 11 | Serial.begin(115200); 12 | WiFi.begin(ssid, password); 13 | Serial.print("Connecting"); 14 | while (WiFi.status() != WL_CONNECTED) { 15 | delay(100); 16 | Serial.print("."); 17 | } 18 | Serial.println(); 19 | 20 | Serial.println("You can now send a message to OpenAI and check if it's offensive by typing in the Arduino IDE Serial Monitor."); 21 | Serial.println("Each line will be interpreted as a single message and processed."); 22 | } 23 | 24 | void loop() { 25 | String line = Serial.readStringUntil('\n'); 26 | if(line.length() == 0){ 27 | return; 28 | } 29 | Serial.println(line); 30 | Serial.println("Processing..."); 31 | OpenAI_ModerationResponse result = openai.moderation(line); 32 | if(result.length() == 1){ 33 | Serial.println(result.getAt(0)?"Flagged":"Not Flagged"); 34 | } else if(result.length() > 1){ 35 | Serial.printf("Received %u moderations.\n", result.length()); 36 | for (unsigned int i = 0; i < result.length(); ++i){ 37 | Serial.printf("Moderation[%u]:\n%s\n", i, result.getAt(i)?"Flagged":"Not Flagged"); 38 | } 39 | } else if(result.error()){ 40 | Serial.print("Error! "); 41 | Serial.println(result.error()); 42 | } else { 43 | Serial.println("Unknown error!"); 44 | } 45 | Serial.println(); 46 | } 47 | -------------------------------------------------------------------------------- /keywords.txt: -------------------------------------------------------------------------------- 1 | ####################################### 2 | # Syntax Coloring Map For OpenAI 3 | ####################################### 4 | 5 | ####################################### 6 | # Datatypes (KEYWORD1) 7 | ####################################### 8 | 9 | OpenAI KEYWORD1 10 | OpenAI_Completion KEYWORD1 11 | OpenAI_ChatCompletion KEYWORD1 12 | OpenAI_Edit KEYWORD1 13 | OpenAI_ImageGeneration KEYWORD1 14 | OpenAI_ImageVariation KEYWORD1 15 | OpenAI_ImageEdit KEYWORD1 16 | OpenAI_AudioTranscription KEYWORD1 17 | OpenAI_AudioTranslation KEYWORD1 18 | OpenAI_Image_Size KEYWORD1 19 | OpenAI_Image_Response_Format KEYWORD1 20 | OpenAI_Audio_Response_Format KEYWORD1 21 | OpenAI_Audio_Input_Format KEYWORD1 22 | OpenAI_StringResponse KEYWORD1 23 | OpenAI_ImageResponse KEYWORD1 24 | OpenAI_ModerationResponse KEYWORD1 25 | OpenAI_EmbeddingResponse KEYWORD1 26 | 27 | ####################################### 28 | # Methods and Functions (KEYWORD2) 29 | ####################################### 30 | 31 | tokens KEYWORD2 32 | length KEYWORD2 33 | getAt KEYWORD2 34 | embedding KEYWORD2 35 | moderation KEYWORD2 36 | completion KEYWORD2 37 | chat KEYWORD2 38 | edit KEYWORD2 39 | imageGeneration KEYWORD2 40 | imageVariation KEYWORD2 41 | imageEdit KEYWORD2 42 | audioTranscription KEYWORD2 43 | audioTranslation KEYWORD2 44 | get KEYWORD2 45 | del KEYWORD2 46 | post KEYWORD2 47 | upload KEYWORD2 48 | setModel KEYWORD2 49 | setMaxTokens KEYWORD2 50 | setTemperature KEYWORD2 51 | setTopP KEYWORD2 52 | setN KEYWORD2 53 | setEcho KEYWORD2 54 | setStop KEYWORD2 55 | setPresencePenalty KEYWORD2 56 | setFrequencyPenalty KEYWORD2 57 | setBestOf KEYWORD2 58 | setUser KEYWORD2 59 | prompt KEYWORD2 60 | setSystem KEYWORD2 61 | clearConversation KEYWORD2 62 | message KEYWORD2 63 | process KEYWORD2 64 | setSize KEYWORD2 65 | setResponseFormat KEYWORD2 66 | image KEYWORD2 67 | setPrompt KEYWORD2 68 | setLanguage KEYWORD2 69 | file KEYWORD2 70 | 71 | ####################################### 72 | # Constants (LITERAL1) 73 | ####################################### 74 | 75 | OPENAI_IMAGE_SIZE_1024x1024 LITERAL1 76 | OPENAI_IMAGE_SIZE_512x512 LITERAL1 77 | OPENAI_IMAGE_SIZE_256x256 LITERAL1 78 | OPENAI_IMAGE_RESPONSE_FORMAT_URL LITERAL1 79 | OPENAI_IMAGE_RESPONSE_FORMAT_B64_JSON LITERAL1 80 | OPENAI_AUDIO_RESPONSE_FORMAT_JSON LITERAL1 81 | OPENAI_AUDIO_RESPONSE_FORMAT_TEXT LITERAL1 82 | OPENAI_AUDIO_RESPONSE_FORMAT_SRT LITERAL1 83 | OPENAI_AUDIO_RESPONSE_FORMAT_VERBOSE_JSON LITERAL1 84 | OPENAI_AUDIO_RESPONSE_FORMAT_VTT LITERAL1 85 | OPENAI_AUDIO_INPUT_FORMAT_MP3 LITERAL1 86 | OPENAI_AUDIO_INPUT_FORMAT_MP4 LITERAL1 87 | OPENAI_AUDIO_INPUT_FORMAT_MPEG LITERAL1 88 | OPENAI_AUDIO_INPUT_FORMAT_MPGA LITERAL1 89 | OPENAI_AUDIO_INPUT_FORMAT_M4A LITERAL1 90 | OPENAI_AUDIO_INPUT_FORMAT_WAV LITERAL1 91 | OPENAI_AUDIO_INPUT_FORMAT_WEBM LITERAL1 92 | -------------------------------------------------------------------------------- /library.properties: -------------------------------------------------------------------------------- 1 | name=OpenAI-ESP32 2 | version=1.0.0 3 | author=me-no-dev 4 | maintainer=me-no-dev 5 | sentence=Library for OpenAI 6 | paragraph=Supports ESP32 Arduino platforms. 7 | category=Sensor 8 | url=https://github.com/me-no-dev/OpenAI-ESP32/ 9 | architectures=esp32 -------------------------------------------------------------------------------- /src/OpenAI.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | ToDo: 3 | - Support FS::File as input 4 | - Look into supporting "stream" responses? 5 | - Thread-Safe API? 6 | */ 7 | 8 | #include "OpenAI.h" 9 | #include "HTTPClient.h" 10 | 11 | // Macros for building the request 12 | #define reqAddString(var,val) \ 13 | if(cJSON_AddStringToObject(req, var, val) == NULL){ \ 14 | cJSON_Delete(req); \ 15 | log_e("cJSON_AddStringToObject failed!"); \ 16 | return result; \ 17 | } 18 | 19 | #define reqAddNumber(var,val) \ 20 | if(cJSON_AddNumberToObject(req, var, val) == NULL){ \ 21 | cJSON_Delete(req); \ 22 | log_e("cJSON_AddNumberToObject failed!"); \ 23 | return result; \ 24 | } 25 | 26 | #define reqAddBool(var,val) \ 27 | if(cJSON_AddBoolToObject(req, var, val) == NULL){ \ 28 | cJSON_Delete(req); \ 29 | log_e("cJSON_AddBoolToObject failed!"); \ 30 | return result; \ 31 | } 32 | 33 | #define reqAddItem(var,val) \ 34 | if(!cJSON_AddItemToObject(req, var, val)){ \ 35 | cJSON_Delete(req); \ 36 | cJSON_Delete(val); \ 37 | log_e("cJSON_AddItemToObject failed!"); \ 38 | return result; \ 39 | } 40 | 41 | static String getJsonError(cJSON * json){ 42 | if(json == NULL){ 43 | return String("cJSON_Parse failed!"); 44 | } 45 | if(!cJSON_IsObject(json)){ 46 | return String("Response is not an object! " + String(cJSON_Print(json))); 47 | } 48 | if(cJSON_HasObjectItem(json, "error")){ 49 | cJSON * error = cJSON_GetObjectItem(json, "error"); 50 | if(!cJSON_IsObject(error)){ 51 | return String("Error is not an object! " + String(cJSON_Print(error))); 52 | } 53 | if(!cJSON_HasObjectItem(error, "message")){ 54 | return String("Error does not contain message! " + String(cJSON_Print(error))); 55 | } 56 | cJSON * error_message = cJSON_GetObjectItem(error, "message"); 57 | return String(cJSON_GetStringValue(error_message)); 58 | } 59 | return String(); 60 | } 61 | 62 | // 63 | // OpenAI_EmbeddingResponse 64 | // 65 | 66 | OpenAI_EmbeddingResponse::OpenAI_EmbeddingResponse(const char * payload){ 67 | usage = 0; 68 | len = 0; 69 | data = NULL; 70 | error_str = NULL; 71 | cJSON * u, *tokens, *d; 72 | int dl; 73 | 74 | if(payload == NULL){ 75 | return; 76 | } 77 | 78 | // Parse payload 79 | cJSON * json = cJSON_Parse(payload); 80 | 81 | // Check for error 82 | String error = getJsonError(json); 83 | if(error.length()){ 84 | error_str = strdup(error.c_str()); 85 | if(json != NULL){ 86 | cJSON_Delete(json); 87 | } 88 | log_e("%s",error.c_str()); 89 | return; 90 | } 91 | 92 | // Get total_tokens 93 | if(!cJSON_HasObjectItem(json, "usage")){ 94 | log_e("Usage was not found"); 95 | goto end; 96 | } 97 | u = cJSON_GetObjectItem(json, "usage"); 98 | if(u == NULL || !cJSON_IsObject(u) || !cJSON_HasObjectItem(u, "total_tokens")){ 99 | log_e("Total tokens were not found"); 100 | goto end; 101 | } 102 | tokens = cJSON_GetObjectItem(u, "total_tokens"); 103 | if(tokens == NULL){ 104 | log_e("Total tokens could not be read"); 105 | goto end; 106 | } 107 | usage = cJSON_GetNumberValue(tokens); 108 | 109 | // Parse data 110 | if(!cJSON_HasObjectItem(json, "data")){ 111 | log_e("Data was not found"); 112 | goto end; 113 | } 114 | d = cJSON_GetObjectItem(json, "data"); 115 | if(d == NULL || !cJSON_IsArray(d)){ 116 | log_e("Data is not array"); 117 | goto end; 118 | } 119 | dl = cJSON_GetArraySize(d); 120 | if(dl <= 0){ 121 | log_e("Data is empty"); 122 | goto end; 123 | } 124 | data = (OpenAI_EmbeddingData*)malloc(dl * sizeof(OpenAI_EmbeddingData)); 125 | if(data == NULL){ 126 | log_e("Data could not be allocated"); 127 | goto end; 128 | } 129 | for (int di = 0; di < dl; di++){ 130 | cJSON * ditem = cJSON_GetArrayItem(d, di); 131 | if(ditem == NULL || !cJSON_IsObject(ditem) || !cJSON_HasObjectItem(ditem, "embedding")){ 132 | log_e("Embedding was not found"); 133 | goto end; 134 | } 135 | cJSON * numberArray = cJSON_GetObjectItem(ditem, "embedding"); 136 | if(numberArray == NULL || !cJSON_IsArray(numberArray)){ 137 | log_e("Embedding is not array"); 138 | goto end; 139 | } 140 | int l = cJSON_GetArraySize(numberArray); 141 | if(l <= 0){ 142 | log_e("Embedding is empty"); 143 | goto end; 144 | } 145 | data[di].data = (double*)malloc(l * sizeof(double)); 146 | if(data[di].data == NULL){ 147 | log_e("Embedding could not be allocated"); 148 | goto end; 149 | } 150 | len++; 151 | data[di].len = l; 152 | for (int i = 0; i < l; i++){ 153 | cJSON * item = cJSON_GetArrayItem(numberArray, i); 154 | if(item == NULL){ 155 | log_e("Embedding item could not be read"); 156 | goto end; 157 | } 158 | data[di].data[i] = cJSON_GetNumberValue(item); 159 | } 160 | } 161 | end: 162 | cJSON_Delete(json); 163 | } 164 | 165 | OpenAI_EmbeddingResponse::~OpenAI_EmbeddingResponse(){ 166 | if(data){ 167 | for (unsigned int i = 0; i < len; i++){ 168 | free(data[i].data); 169 | } 170 | free(data); 171 | } 172 | if(error_str != NULL){ 173 | free(error_str); 174 | } 175 | } 176 | 177 | // 178 | // OpenAI_ModerationResponse 179 | // 180 | 181 | OpenAI_ModerationResponse::OpenAI_ModerationResponse(const char * payload){ 182 | len = 0; 183 | data = NULL; 184 | error_str = NULL; 185 | cJSON *d; 186 | int dl; 187 | 188 | if(payload == NULL){ 189 | return; 190 | } 191 | 192 | // Parse payload 193 | cJSON * json = cJSON_Parse(payload); 194 | 195 | // Check for error 196 | String error = getJsonError(json); 197 | if(error.length()){ 198 | error_str = strdup(error.c_str()); 199 | if(json != NULL){ 200 | cJSON_Delete(json); 201 | } 202 | log_e("%s",error.c_str()); 203 | return; 204 | } 205 | 206 | // Parse data 207 | if(!cJSON_HasObjectItem(json, "results")){ 208 | log_e("Results was not found"); 209 | goto end; 210 | } 211 | d = cJSON_GetObjectItem(json, "results"); 212 | if(d == NULL || !cJSON_IsArray(d)){ 213 | log_e("Results is not array"); 214 | goto end; 215 | } 216 | dl = cJSON_GetArraySize(d); 217 | if(dl <= 0){ 218 | log_e("Results is empty"); 219 | goto end; 220 | } 221 | data = (bool*)malloc(dl * sizeof(bool)); 222 | if(data == NULL){ 223 | log_e("Data could not be allocated"); 224 | goto end; 225 | } 226 | len = dl; 227 | for (int di = 0; di < dl; di++){ 228 | cJSON * ditem = cJSON_GetArrayItem(d, di); 229 | if(ditem == NULL || !cJSON_IsObject(ditem) || !cJSON_HasObjectItem(ditem, "flagged")){ 230 | log_e("Flagged was not found"); 231 | goto end; 232 | } 233 | cJSON * flagged = cJSON_GetObjectItem(ditem, "flagged"); 234 | if(flagged == NULL || !cJSON_IsBool(flagged)){ 235 | log_e("Flagged is not bool"); 236 | goto end; 237 | } 238 | data[di] = cJSON_IsTrue(flagged); 239 | } 240 | end: 241 | cJSON_Delete(json); 242 | } 243 | 244 | OpenAI_ModerationResponse::~OpenAI_ModerationResponse(){ 245 | if(data){ 246 | free(data); 247 | } 248 | if(error_str != NULL){ 249 | free(error_str); 250 | } 251 | } 252 | 253 | // 254 | // OpenAI_ImageResponse 255 | // 256 | 257 | OpenAI_ImageResponse::OpenAI_ImageResponse(const char * payload){ 258 | len = 0; 259 | data = NULL; 260 | error_str = NULL; 261 | cJSON *d; 262 | int dl; 263 | 264 | if(payload == NULL){ 265 | return; 266 | } 267 | 268 | // Parse payload 269 | cJSON * json = cJSON_Parse(payload); 270 | 271 | // Check for error 272 | String error = getJsonError(json); 273 | if(error.length()){ 274 | error_str = strdup(error.c_str()); 275 | if(json != NULL){ 276 | cJSON_Delete(json); 277 | } 278 | log_e("%s",error.c_str()); 279 | return; 280 | } 281 | 282 | // Parse data 283 | if(!cJSON_HasObjectItem(json, "data")){ 284 | log_e("Data was not found"); 285 | goto end; 286 | } 287 | d = cJSON_GetObjectItem(json, "data"); 288 | if(d == NULL || !cJSON_IsArray(d)){ 289 | log_e("Data is not array"); 290 | goto end; 291 | } 292 | dl = cJSON_GetArraySize(d); 293 | if(dl <= 0){ 294 | log_e("Data is empty"); 295 | goto end; 296 | } 297 | data = (char**)malloc(dl * sizeof(char*)); 298 | if(data == NULL){ 299 | log_e("Data could not be allocated"); 300 | goto end; 301 | } 302 | 303 | for (int di = 0; di < dl; di++){ 304 | cJSON * item = cJSON_GetArrayItem(d, di); 305 | if(item == NULL || !cJSON_IsObject(item) || (!cJSON_HasObjectItem(item, "url") && !cJSON_HasObjectItem(item, "b64_json"))){ 306 | log_e("Image was not found"); 307 | goto end; 308 | } 309 | if(cJSON_HasObjectItem(item, "url")){ 310 | cJSON * url = cJSON_GetObjectItem(item, "url"); 311 | if(url == NULL || !cJSON_IsString(url)){ 312 | log_e("Image url could not be read"); 313 | goto end; 314 | } 315 | data[di] = strdup(cJSON_GetStringValue(url)); 316 | if(data[di] == NULL){ 317 | log_e("Image url could not be copied"); 318 | goto end; 319 | } 320 | len++; 321 | } else if(cJSON_HasObjectItem(item, "b64_json")){ 322 | cJSON * b64_json = cJSON_GetObjectItem(item, "b64_json"); 323 | if(b64_json == NULL || !cJSON_IsString(b64_json)){ 324 | log_e("Image b64_json could not be read"); 325 | goto end; 326 | } 327 | data[di] = strdup(cJSON_GetStringValue(b64_json)); 328 | if(data[di] == NULL){ 329 | log_e("Image b64_json could not be copied"); 330 | goto end; 331 | } 332 | len++; 333 | } 334 | } 335 | end: 336 | cJSON_Delete(json); 337 | } 338 | 339 | OpenAI_ImageResponse::~OpenAI_ImageResponse(){ 340 | if(data){ 341 | for (unsigned int i = 0; i < len; i++){ 342 | free(data[i]); 343 | } 344 | free(data); 345 | } 346 | if(error_str != NULL){ 347 | free(error_str); 348 | } 349 | } 350 | 351 | // 352 | // OpenAI_StringResponse 353 | // 354 | 355 | OpenAI_StringResponse::OpenAI_StringResponse(const char * payload){ 356 | usage = 0; 357 | len = 0; 358 | data = NULL; 359 | error_str = NULL; 360 | cJSON * u, *tokens, *d; 361 | int dl; 362 | 363 | if(payload == NULL){ 364 | return; 365 | } 366 | 367 | // Parse payload 368 | cJSON * json = cJSON_Parse(payload); 369 | 370 | // Check for error 371 | String error = getJsonError(json); 372 | if(error.length()){ 373 | error_str = strdup(error.c_str()); 374 | if(json != NULL){ 375 | cJSON_Delete(json); 376 | } 377 | log_e("%s",error.c_str()); 378 | return; 379 | } 380 | 381 | // Get total_tokens 382 | if(!cJSON_HasObjectItem(json, "usage")){ 383 | log_e("Usage was not found"); 384 | goto end; 385 | } 386 | u = cJSON_GetObjectItem(json, "usage"); 387 | if(u == NULL || !cJSON_IsObject(u) || !cJSON_HasObjectItem(u, "total_tokens")){ 388 | log_e("Total tokens were not found"); 389 | goto end; 390 | } 391 | tokens = cJSON_GetObjectItem(u, "total_tokens"); 392 | if(tokens == NULL){ 393 | log_e("Total tokens could not be read"); 394 | goto end; 395 | } 396 | usage = cJSON_GetNumberValue(tokens); 397 | 398 | // Parse data 399 | if(!cJSON_HasObjectItem(json, "choices")){ 400 | log_e("Choices was not found"); 401 | goto end; 402 | } 403 | d = cJSON_GetObjectItem(json, "choices"); 404 | if(d == NULL || !cJSON_IsArray(d)){ 405 | log_e("Choices is not array"); 406 | goto end; 407 | } 408 | dl = cJSON_GetArraySize(d); 409 | if(dl <= 0){ 410 | log_e("Choices is empty"); 411 | goto end; 412 | } 413 | data = (char**)malloc(dl * sizeof(char*)); 414 | if(data == NULL){ 415 | log_e("Data could not be allocated"); 416 | goto end; 417 | } 418 | 419 | for (int di = 0; di < dl; di++){ 420 | cJSON * item = cJSON_GetArrayItem(d, di); 421 | if(item == NULL || !cJSON_IsObject(item) || (!cJSON_HasObjectItem(item, "text") && !cJSON_HasObjectItem(item, "message"))){ 422 | log_e("Message was not found"); 423 | goto end; 424 | } 425 | if(cJSON_HasObjectItem(item, "text")){ 426 | cJSON * text = cJSON_GetObjectItem(item, "text"); 427 | if(text == NULL || !cJSON_IsString(text)){ 428 | log_e("Text could not be read"); 429 | goto end; 430 | } 431 | data[di] = strdup(cJSON_GetStringValue(text)); 432 | if(data[di] == NULL){ 433 | log_e("Text could not be copied"); 434 | goto end; 435 | } 436 | len++; 437 | } else if(cJSON_HasObjectItem(item, "message")){ 438 | cJSON * message = cJSON_GetObjectItem(item, "message"); 439 | if(message == NULL || !cJSON_IsObject(message) || !cJSON_HasObjectItem(message, "content")){ 440 | log_e("Message is not object"); 441 | goto end; 442 | } 443 | cJSON * mesg = cJSON_GetObjectItem(message, "content"); 444 | if(mesg == NULL || !cJSON_IsString(mesg)){ 445 | log_e("Message could not be read"); 446 | goto end; 447 | } 448 | data[di] = strdup(cJSON_GetStringValue(mesg)); 449 | if(data[di] == NULL){ 450 | log_e("Message could not be copied"); 451 | goto end; 452 | } 453 | len = di+1; 454 | } 455 | } 456 | end: 457 | cJSON_Delete(json); 458 | } 459 | 460 | OpenAI_StringResponse::~OpenAI_StringResponse(){ 461 | if(data != NULL){ 462 | for (unsigned int i = 0; i < len; i++){ 463 | free(data[i]); 464 | } 465 | free(data); 466 | } 467 | if(error_str != NULL){ 468 | free(error_str); 469 | } 470 | } 471 | 472 | // 473 | // OpenAI 474 | // 475 | 476 | OpenAI::OpenAI(const char *openai_api_key) 477 | : api_key(openai_api_key) 478 | { 479 | 480 | } 481 | 482 | OpenAI::~OpenAI(){ 483 | 484 | } 485 | 486 | String OpenAI::upload(String endpoint, String boundary, uint8_t * data, size_t len) { 487 | log_d("\"%s\": boundary=%s, len=%u", endpoint.c_str(), boundary.c_str(), len); 488 | HTTPClient http; 489 | http.setTimeout(20000); 490 | http.begin("https://api.openai.com/v1/" + endpoint); 491 | http.addHeader("Content-Type", "multipart/form-data; boundary="+boundary); 492 | http.addHeader("Authorization", "Bearer " + api_key); 493 | int httpCode = http.sendRequest("POST", data, len); 494 | if (httpCode != HTTP_CODE_OK) { 495 | log_e("HTTP_ERROR: %d", httpCode); 496 | } 497 | String response = http.getString(); 498 | http.end(); 499 | log_d("%s", response.c_str()); 500 | return response; 501 | } 502 | 503 | String OpenAI::post(String endpoint, String jsonBody) { 504 | log_d("\"%s\": %s", endpoint.c_str(), jsonBody.c_str()); 505 | HTTPClient http; 506 | http.setTimeout(60000); 507 | http.begin("https://api.openai.com/v1/" + endpoint); 508 | http.addHeader("Content-Type", "application/json"); 509 | http.addHeader("Authorization", "Bearer " + api_key); 510 | int httpCode = http.POST(jsonBody); 511 | if (httpCode != HTTP_CODE_OK) { 512 | log_e("HTTP_ERROR: %d", httpCode); 513 | } 514 | String response = http.getString(); 515 | http.end(); 516 | log_d("%s", response.c_str()); 517 | return response; 518 | } 519 | 520 | String OpenAI::get(String endpoint) { 521 | log_d("\"%s\"", endpoint.c_str()); 522 | HTTPClient http; 523 | http.begin("https://api.openai.com/v1/" + endpoint); 524 | http.addHeader("Authorization", "Bearer " + api_key); 525 | int httpCode = http.GET(); 526 | if (httpCode != HTTP_CODE_OK) { 527 | log_e("HTTP_ERROR: %d", httpCode); 528 | } 529 | String response = http.getString(); 530 | http.end(); 531 | log_d("%s", response.c_str()); 532 | return response; 533 | } 534 | 535 | String OpenAI::del(String endpoint) { 536 | log_d("\"%s\"", endpoint.c_str()); 537 | HTTPClient http; 538 | http.begin("https://api.openai.com/v1/" + endpoint); 539 | http.addHeader("Authorization", "Bearer " + api_key); 540 | int httpCode = http.sendRequest("DELETE"); 541 | if (httpCode != HTTP_CODE_OK) { 542 | log_e("HTTP_ERROR: %d", httpCode); 543 | } 544 | String response = http.getString(); 545 | http.end(); 546 | log_d("%s", response.c_str()); 547 | return response; 548 | } 549 | 550 | OpenAI_Completion OpenAI::completion(){ 551 | return OpenAI_Completion(*this); 552 | } 553 | 554 | OpenAI_ChatCompletion OpenAI::chat(){ 555 | return OpenAI_ChatCompletion(*this); 556 | } 557 | 558 | OpenAI_Edit OpenAI::edit(){ 559 | return OpenAI_Edit(*this); 560 | } 561 | 562 | OpenAI_ImageGeneration OpenAI::imageGeneration(){ 563 | return OpenAI_ImageGeneration(*this); 564 | } 565 | 566 | OpenAI_ImageVariation OpenAI::imageVariation(){ 567 | return OpenAI_ImageVariation(*this); 568 | } 569 | 570 | OpenAI_ImageEdit OpenAI::imageEdit(){ 571 | return OpenAI_ImageEdit(*this); 572 | } 573 | 574 | OpenAI_AudioTranscription OpenAI::audioTranscription(){ 575 | return OpenAI_AudioTranscription(*this); 576 | } 577 | 578 | OpenAI_AudioTranslation OpenAI::audioTranslation(){ 579 | return OpenAI_AudioTranslation(*this); 580 | } 581 | 582 | // embeddings { //Creates an embedding vector representing the input text. 583 | // "model": "text-embedding-ada-002",//required 584 | // "input": "The food was delicious and the waiter...",//required string or array. Input text to get embeddings for, encoded as a string or array of tokens. To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed 8192 tokens in length. 585 | // "user": null//string. A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 586 | // } 587 | 588 | OpenAI_EmbeddingResponse OpenAI::embedding(String input, const char * model, const char * user){ 589 | String endpoint = "embeddings"; 590 | 591 | OpenAI_EmbeddingResponse result = OpenAI_EmbeddingResponse(NULL); 592 | cJSON * req = cJSON_CreateObject(); 593 | if(req == NULL){ 594 | log_e("cJSON_CreateObject failed!"); 595 | return result; 596 | } 597 | reqAddString("model", (model == NULL)?"text-embedding-ada-002":model); 598 | if(input.startsWith("[")){ 599 | cJSON * in = cJSON_Parse(input.c_str()); 600 | if(in == NULL || !cJSON_IsArray(in)){ 601 | log_e("Input not JSON Array!"); 602 | cJSON_Delete(req); 603 | return result; 604 | } 605 | reqAddItem("input", in); 606 | } else { 607 | reqAddString("input", input.c_str()); 608 | } 609 | if(user != NULL){ 610 | reqAddString("user", user); 611 | } 612 | String jsonBody = String(cJSON_Print(req)); 613 | cJSON_Delete(req); 614 | String response = post(endpoint, jsonBody); 615 | 616 | if(!response.length()){ 617 | log_e("Empty response!"); 618 | return result; 619 | } 620 | return OpenAI_EmbeddingResponse(response.c_str()); 621 | } 622 | 623 | // moderations { //Classifies if text violates OpenAI's Content Policy 624 | // "input": "I want to kill them.",//required string or array 625 | // "model": "text-moderation-latest"//optional. Two content moderations models are available: text-moderation-stable and text-moderation-latest. 626 | // } 627 | 628 | OpenAI_ModerationResponse OpenAI::moderation(String input, const char * model){ 629 | String endpoint = "moderations"; 630 | 631 | OpenAI_ModerationResponse result = OpenAI_ModerationResponse(NULL); 632 | String res = ""; 633 | cJSON * req = cJSON_CreateObject(); 634 | if(req == NULL){ 635 | log_e("cJSON_CreateObject failed!"); 636 | return result; 637 | } 638 | if(input.startsWith("[")){ 639 | cJSON * in = cJSON_Parse(input.c_str()); 640 | if(in == NULL || !cJSON_IsArray(in)){ 641 | log_e("Input not JSON Array!"); 642 | cJSON_Delete(req); 643 | return result; 644 | } 645 | reqAddItem("input", in); 646 | } else { 647 | reqAddString("input", input.c_str()); 648 | } 649 | if(model != NULL){ 650 | reqAddString("model", model); 651 | } 652 | String jsonBody = String(cJSON_Print(req)); 653 | cJSON_Delete(req); 654 | res = post(endpoint, jsonBody); 655 | 656 | if(!res.length()){ 657 | log_e("Empty result!"); 658 | return result; 659 | } 660 | return OpenAI_ModerationResponse(res.c_str()); 661 | } 662 | 663 | // completions { //Creates a completion for the provided prompt and parameters 664 | // "model": "text-davinci-003",//required 665 | // "prompt": "<|endoftext|>",//string, array of strings, array of tokens, or array of token arrays. 666 | // "max_tokens": 16,//integer. The maximum number of tokens to generate in the completion. 667 | // "temperature": 1,//float between 0 and 2 668 | // "top_p": 1,//float between 0 and 1. recommended to alter this or temperature but not both. 669 | // "n": 1,//integer. How many completions to generate for each prompt. 670 | // "stream": false,//boolean. Whether to stream back partial progress. keep false 671 | // "logprobs": null,//integer. Include the log probabilities on the logprobs most likely tokens, as well the chosen tokens. 672 | // "echo": false,//boolean. Echo back the prompt in addition to the completion 673 | // "stop": null,//string or array. Up to 4 sequences where the API will stop generating further tokens. 674 | // "presence_penalty": 0,//float between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. 675 | // "frequency_penalty": 0,//float between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. 676 | // "best_of": 1,//integer. Generates best_of completions server-side and returns the "best". best_of must be greater than n 677 | // "logit_bias": null,//map. Modify the likelihood of specified tokens appearing in the completion. 678 | // "user": null//string. A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 679 | // } 680 | 681 | OpenAI_Completion::OpenAI_Completion(OpenAI &openai) 682 | : oai(openai) 683 | , model(NULL) 684 | , max_tokens(0) 685 | , temperature(1) 686 | , top_p(1) 687 | , n(1) 688 | , echo(false) 689 | , stop(NULL) 690 | , presence_penalty(0) 691 | , frequency_penalty(0) 692 | , best_of(1) 693 | , user(NULL) 694 | {} 695 | 696 | OpenAI_Completion::~OpenAI_Completion(){ 697 | if(model != NULL){ 698 | free((void*)model); 699 | } 700 | if(stop != NULL){ 701 | free((void*)stop); 702 | } 703 | if(user != NULL){ 704 | free((void*)user); 705 | } 706 | } 707 | 708 | OpenAI_Completion & OpenAI_Completion::setModel(const char * m){ 709 | if(model != NULL){ 710 | free((void*)model); 711 | } 712 | model = strdup(m); 713 | return *this; 714 | } 715 | 716 | OpenAI_Completion & OpenAI_Completion::setMaxTokens(unsigned int m){ 717 | if(m > 0){ 718 | max_tokens = m; 719 | } 720 | return *this; 721 | } 722 | 723 | OpenAI_Completion & OpenAI_Completion::setTemperature(float t){ 724 | if(t >= 0 && t <= 2.0){ 725 | temperature = t; 726 | } 727 | return *this; 728 | } 729 | 730 | OpenAI_Completion & OpenAI_Completion::setTopP(float t){ 731 | if(t >= 0 && t <= 1.0){ 732 | top_p = t; 733 | } 734 | return *this; 735 | } 736 | 737 | OpenAI_Completion & OpenAI_Completion::setN(unsigned int _n){ 738 | if(n > 0){ 739 | n = _n; 740 | } 741 | return *this; 742 | } 743 | 744 | OpenAI_Completion & OpenAI_Completion::setEcho(bool e){ 745 | echo = e; 746 | return *this; 747 | } 748 | 749 | OpenAI_Completion & OpenAI_Completion::setStop(const char * s){ 750 | if(stop != NULL){ 751 | free((void*)stop); 752 | } 753 | stop = strdup(s); 754 | return *this; 755 | } 756 | 757 | OpenAI_Completion & OpenAI_Completion::setPresencePenalty(float p){ 758 | if(p >= -2.0 && p <= 2.0){ 759 | presence_penalty = p; 760 | } 761 | return *this; 762 | } 763 | 764 | OpenAI_Completion & OpenAI_Completion::setFrequencyPenalty(float p){ 765 | if(p >= -2.0 && p <= 2.0){ 766 | frequency_penalty = p; 767 | } 768 | return *this; 769 | } 770 | 771 | OpenAI_Completion & OpenAI_Completion::setBestOf(unsigned int b){ 772 | if(b >= n){ 773 | best_of = b; 774 | } 775 | return *this; 776 | } 777 | 778 | OpenAI_Completion & OpenAI_Completion::setUser(const char * u){ 779 | if(user != NULL){ 780 | free((void*)user); 781 | } 782 | user = strdup(u); 783 | return *this; 784 | } 785 | 786 | OpenAI_StringResponse OpenAI_Completion::prompt(String p){ 787 | String endpoint = "completions"; 788 | 789 | OpenAI_StringResponse result = OpenAI_StringResponse(NULL); 790 | cJSON * req = cJSON_CreateObject(); 791 | if(req == NULL){ 792 | log_e("cJSON_CreateObject failed!"); 793 | return result; 794 | } 795 | reqAddString("model", (model == NULL)?"text-davinci-003":model); 796 | if(p.startsWith("[")){ 797 | cJSON * in = cJSON_Parse(p.c_str()); 798 | if(in == NULL || !cJSON_IsArray(in)){ 799 | log_e("Input not JSON Array!"); 800 | cJSON_Delete(req); 801 | return result; 802 | } 803 | reqAddItem("prompt", in); 804 | } else { 805 | reqAddString("prompt", p.c_str()); 806 | } 807 | if(max_tokens){ 808 | reqAddNumber("max_tokens", max_tokens); 809 | } 810 | if(temperature != 1){ 811 | reqAddNumber("temperature", temperature); 812 | } 813 | if(top_p != 1){ 814 | reqAddNumber("top_p", top_p); 815 | } 816 | if(n != 1){ 817 | reqAddNumber("n", n); 818 | } 819 | if(echo){ 820 | reqAddBool("echo", true); 821 | } 822 | if(stop != NULL){ 823 | reqAddString("stop", stop); 824 | } 825 | if(presence_penalty != 0){ 826 | reqAddNumber("presence_penalty", presence_penalty); 827 | } 828 | if(frequency_penalty != 0){ 829 | reqAddNumber("frequency_penalty", frequency_penalty); 830 | } 831 | if(best_of != 1){ 832 | reqAddNumber("best_of", best_of); 833 | } 834 | if(user != NULL){ 835 | reqAddString("user", user); 836 | } 837 | String jsonBody = String(cJSON_Print(req)); 838 | cJSON_Delete(req); 839 | String res = oai.post(endpoint, jsonBody); 840 | 841 | if(!res.length()){ 842 | log_e("Empty result!"); 843 | return result; 844 | } 845 | return OpenAI_StringResponse(res.c_str()); 846 | } 847 | 848 | // chat/completions { //Given a chat conversation, the model will return a chat completion response. 849 | // "model": "gpt-3.5-turbo",//required 850 | // "messages": [//required array 851 | // {"role": "system", "content": "Description of the required assistant"}, 852 | // {"role": "user", "content": "First question from the user"}, 853 | // {"role": "assistant", "content": "Response from the assistant"}, 854 | // {"role": "user", "content": "Next question from the user to be answered"} 855 | // ], 856 | // "temperature": 1,//float between 0 and 2 857 | // "top_p": 1,//float between 0 and 1. recommended to alter this or temperature but not both. 858 | // "stream": false,//boolean. Whether to stream back partial progress. keep false 859 | // "stop": null,//string or array. Up to 4 sequences where the API will stop generating further tokens. 860 | // "max_tokens": 16,//integer. The maximum number of tokens to generate in the completion. 861 | // "presence_penalty": 0,//float between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. 862 | // "frequency_penalty": 0,//float between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. 863 | // "logit_bias": null,//map. Modify the likelihood of specified tokens appearing in the completion. 864 | // "user": null//string. A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 865 | // } 866 | 867 | OpenAI_ChatCompletion::OpenAI_ChatCompletion(OpenAI &openai) 868 | : oai(openai) 869 | , model(NULL) 870 | , description(NULL) 871 | , max_tokens(0) 872 | , temperature(1) 873 | , top_p(1) 874 | , stop(NULL) 875 | , presence_penalty(0) 876 | , frequency_penalty(0) 877 | , user(NULL) 878 | { 879 | messages = cJSON_CreateArray(); 880 | } 881 | 882 | OpenAI_ChatCompletion::~OpenAI_ChatCompletion(){ 883 | if(model != NULL){ 884 | free((void*)model); 885 | } 886 | if(description != NULL){ 887 | free((void*)description); 888 | } 889 | if(stop != NULL){ 890 | free((void*)stop); 891 | } 892 | if(user != NULL){ 893 | free((void*)user); 894 | } 895 | } 896 | 897 | OpenAI_ChatCompletion & OpenAI_ChatCompletion::setModel(const char * m){ 898 | if(model != NULL){ 899 | free((void*)model); 900 | } 901 | model = strdup(m); 902 | return *this; 903 | } 904 | 905 | OpenAI_ChatCompletion & OpenAI_ChatCompletion::setSystem(const char * s){ 906 | if(description != NULL){ 907 | free((void*)description); 908 | } 909 | description = strdup(s); 910 | return *this; 911 | } 912 | 913 | OpenAI_ChatCompletion & OpenAI_ChatCompletion::setMaxTokens(unsigned int m){ 914 | if(m > 0){ 915 | max_tokens = m; 916 | } 917 | return *this; 918 | } 919 | 920 | OpenAI_ChatCompletion & OpenAI_ChatCompletion::setTemperature(float t){ 921 | if(t >= 0 && t <= 2.0){ 922 | temperature = t; 923 | } 924 | return *this; 925 | } 926 | 927 | OpenAI_ChatCompletion & OpenAI_ChatCompletion::setTopP(float t){ 928 | if(t >= 0 && t <= 1.0){ 929 | top_p = t; 930 | } 931 | return *this; 932 | } 933 | 934 | OpenAI_ChatCompletion & OpenAI_ChatCompletion::setStop(const char * s){ 935 | if(stop != NULL){ 936 | free((void*)stop); 937 | } 938 | stop = strdup(s); 939 | return *this; 940 | } 941 | 942 | OpenAI_ChatCompletion & OpenAI_ChatCompletion::setPresencePenalty(float p){ 943 | if(p >= -2.0 && p <= 2.0){ 944 | presence_penalty = p; 945 | } 946 | return *this; 947 | } 948 | 949 | OpenAI_ChatCompletion & OpenAI_ChatCompletion::setFrequencyPenalty(float p){ 950 | if(p >= -2.0 && p <= 2.0){ 951 | frequency_penalty = p; 952 | } 953 | return *this; 954 | } 955 | 956 | OpenAI_ChatCompletion & OpenAI_ChatCompletion::setUser(const char * u){ 957 | if(user != NULL){ 958 | free((void*)user); 959 | } 960 | user = strdup(u); 961 | return *this; 962 | } 963 | 964 | OpenAI_ChatCompletion & OpenAI_ChatCompletion::clearConversation(){ 965 | if(messages != NULL){ 966 | cJSON_Delete(messages); 967 | messages = cJSON_CreateArray(); 968 | } 969 | return *this; 970 | } 971 | 972 | static cJSON * createChatMessage(cJSON * messages, const char * role, const char * content){ 973 | cJSON * message = cJSON_CreateObject(); 974 | if(message == NULL){ 975 | log_e("cJSON_CreateObject failed!"); 976 | return NULL; 977 | } 978 | if(cJSON_AddStringToObject(message, "role", role) == NULL){ 979 | cJSON_Delete(message); 980 | log_e("cJSON_AddStringToObject failed!"); 981 | return NULL; 982 | } 983 | if(cJSON_AddStringToObject(message, "content", content) == NULL){ 984 | cJSON_Delete(message); 985 | log_e("cJSON_AddStringToObject failed!"); 986 | return NULL; 987 | } 988 | if(!cJSON_AddItemToArray(messages, message)){ 989 | cJSON_Delete(message); 990 | log_e("cJSON_AddItemToArray failed!"); 991 | return NULL; 992 | } 993 | return message; 994 | } 995 | 996 | OpenAI_StringResponse OpenAI_ChatCompletion::message(String p, bool save){ 997 | String endpoint = "chat/completions"; 998 | 999 | OpenAI_StringResponse result = OpenAI_StringResponse(NULL); 1000 | cJSON * req = cJSON_CreateObject(); 1001 | if(req == NULL){ 1002 | log_e("cJSON_CreateObject failed!"); 1003 | return result; 1004 | } 1005 | reqAddString("model", (model == NULL)?"gpt-3.5-turbo":model); 1006 | 1007 | cJSON * _messages = cJSON_CreateArray(); 1008 | if(_messages == NULL){ 1009 | cJSON_Delete(req); 1010 | log_e("cJSON_CreateArray failed!"); 1011 | return result; 1012 | } 1013 | if(description != NULL){ 1014 | if(createChatMessage(_messages, "system", description) == NULL){ 1015 | cJSON_Delete(req); 1016 | cJSON_Delete(_messages); 1017 | log_e("createChatMessage failed!"); 1018 | return result; 1019 | } 1020 | } 1021 | if(messages != NULL && cJSON_IsArray(messages)){ 1022 | int mlen = cJSON_GetArraySize(messages); 1023 | for(int i = 0; i < mlen; ++i){ 1024 | cJSON * item = cJSON_GetArrayItem(messages, i); 1025 | if(item != NULL && cJSON_IsObject(item)){ 1026 | if(!cJSON_AddItemReferenceToArray(_messages, item)){ 1027 | cJSON_Delete(req); 1028 | cJSON_Delete(_messages); 1029 | log_e("cJSON_AddItemReferenceToArray failed!"); 1030 | return result; 1031 | } 1032 | } 1033 | } 1034 | } 1035 | if(createChatMessage(_messages, "user", p.c_str()) == NULL){ 1036 | cJSON_Delete(req); 1037 | cJSON_Delete(_messages); 1038 | log_e("createChatMessage failed!"); 1039 | return result; 1040 | } 1041 | 1042 | reqAddItem("messages", _messages); 1043 | if(max_tokens){ 1044 | reqAddNumber("max_tokens", max_tokens); 1045 | } 1046 | if(temperature != 1){ 1047 | reqAddNumber("temperature", temperature); 1048 | } 1049 | if(top_p != 1){ 1050 | reqAddNumber("top_p", top_p); 1051 | } 1052 | if(stop != NULL){ 1053 | reqAddString("stop", stop); 1054 | } 1055 | if(presence_penalty != 0){ 1056 | reqAddNumber("presence_penalty", presence_penalty); 1057 | } 1058 | if(frequency_penalty != 0){ 1059 | reqAddNumber("frequency_penalty", frequency_penalty); 1060 | } 1061 | if(user != NULL){ 1062 | reqAddString("user", user); 1063 | } 1064 | String jsonBody = String(cJSON_Print(req)); 1065 | cJSON_Delete(req); 1066 | 1067 | String res = oai.post(endpoint, jsonBody); 1068 | 1069 | if(!res.length()){ 1070 | log_e("Empty result!"); 1071 | return result; 1072 | } 1073 | if(save){ 1074 | //add the responses to the messages here 1075 | //double parsing is here as workaround 1076 | OpenAI_StringResponse r = OpenAI_StringResponse(res.c_str()); 1077 | if(r.length()){ 1078 | if(createChatMessage(messages, "user", p.c_str()) == NULL){ 1079 | log_e("createChatMessage failed!"); 1080 | } 1081 | if(createChatMessage(messages, "assistant", r.getAt(0)) == NULL){ 1082 | log_e("createChatMessage failed!"); 1083 | } 1084 | } 1085 | } 1086 | return OpenAI_StringResponse(res.c_str()); 1087 | } 1088 | 1089 | // edits { //Creates a new edit for the provided input, instruction, and parameters. 1090 | // "model": "text-davinci-edit-001",//required 1091 | // "input": "",//string. The input text to use as a starting point for the edit. 1092 | // "instruction": "Fix the spelling mistakes",//required string. The instruction that tells the model how to edit the prompt. 1093 | // "n": 1,//integer. How many edits to generate for the input and instruction. 1094 | // "temperature": 1,//float between 0 and 2 1095 | // "top_p": 1//float between 0 and 1. recommended to alter this or temperature but not both. 1096 | // } 1097 | 1098 | OpenAI_Edit::OpenAI_Edit(OpenAI &openai) 1099 | : oai(openai) 1100 | , model(NULL) 1101 | , temperature(1) 1102 | , top_p(1) 1103 | , n(1) 1104 | {} 1105 | 1106 | OpenAI_Edit::~OpenAI_Edit(){ 1107 | if(model != NULL){ 1108 | free((void*)model); 1109 | } 1110 | } 1111 | 1112 | OpenAI_Edit & OpenAI_Edit::setModel(const char * m){ 1113 | if(model != NULL){ 1114 | free((void*)model); 1115 | } 1116 | model = strdup(m); 1117 | return *this; 1118 | } 1119 | 1120 | OpenAI_Edit & OpenAI_Edit::setTemperature(float t){ 1121 | if(t >= 0 && t <= 2.0){ 1122 | temperature = t; 1123 | } 1124 | return *this; 1125 | } 1126 | 1127 | OpenAI_Edit & OpenAI_Edit::setTopP(float t){ 1128 | if(t >= 0 && t <= 1.0){ 1129 | top_p = t; 1130 | } 1131 | return *this; 1132 | } 1133 | 1134 | OpenAI_Edit & OpenAI_Edit::setN(unsigned int _n){ 1135 | if(n > 0){ 1136 | n = _n; 1137 | } 1138 | return *this; 1139 | } 1140 | 1141 | OpenAI_StringResponse OpenAI_Edit::process(String instruction, String input){ 1142 | String endpoint = "edits"; 1143 | 1144 | OpenAI_StringResponse result = OpenAI_StringResponse(NULL); 1145 | cJSON * req = cJSON_CreateObject(); 1146 | if(req == NULL){ 1147 | log_e("cJSON_CreateObject failed!"); 1148 | return result; 1149 | } 1150 | reqAddString("model", (model == NULL)?"text-davinci-edit-001":model); 1151 | reqAddString("instruction", instruction.c_str()); 1152 | if(input){ 1153 | reqAddString("input", input.c_str()); 1154 | } 1155 | if(temperature != 1){ 1156 | reqAddNumber("temperature", temperature); 1157 | } 1158 | if(top_p != 1){ 1159 | reqAddNumber("top_p", top_p); 1160 | } 1161 | if(n != 1){ 1162 | reqAddNumber("n", n); 1163 | } 1164 | String jsonBody = String(cJSON_Print(req)); 1165 | cJSON_Delete(req); 1166 | 1167 | String res = oai.post(endpoint, jsonBody); 1168 | 1169 | if(!res.length()){ 1170 | log_e("Empty result!"); 1171 | return result; 1172 | } 1173 | return OpenAI_StringResponse(res.c_str()); 1174 | } 1175 | 1176 | // 1177 | // Images 1178 | // 1179 | 1180 | static const char * image_sizes[] = {"1024x1024","512x512","256x256"}; 1181 | static const char * image_response_formats[] = {"url","b64_json"}; 1182 | 1183 | // images/generations { //Creates an image given a prompt. 1184 | // "prompt": "A cute baby sea otter",//required 1185 | // "n": 1,//integer. The number of images to generate. Must be between 1 and 10. 1186 | // "size": "1024x1024",//string. The size of the generated images. Must be one of "256x256", "512x512", or "1024x1024" 1187 | // "response_format": "url",//string. The format in which the generated images are returned. Must be one of "url" or "b64_json". 1188 | // "user": null//string. A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 1189 | // } 1190 | 1191 | OpenAI_ImageGeneration::OpenAI_ImageGeneration(OpenAI &openai) 1192 | : oai(openai) 1193 | , size(OPENAI_IMAGE_SIZE_1024x1024) 1194 | , response_format(OPENAI_IMAGE_RESPONSE_FORMAT_URL) 1195 | , n(1) 1196 | , user(NULL) 1197 | {} 1198 | 1199 | OpenAI_ImageGeneration::~OpenAI_ImageGeneration(){ 1200 | if(user != NULL){ 1201 | free((void*)user); 1202 | } 1203 | } 1204 | 1205 | OpenAI_ImageGeneration & OpenAI_ImageGeneration::setSize(OpenAI_Image_Size s){ 1206 | if(s >= OPENAI_IMAGE_SIZE_1024x1024 && s <= OPENAI_IMAGE_SIZE_256x256){ 1207 | size = s; 1208 | } 1209 | return *this; 1210 | } 1211 | 1212 | OpenAI_ImageGeneration & OpenAI_ImageGeneration::setResponseFormat(OpenAI_Image_Response_Format f){ 1213 | if(f >= OPENAI_IMAGE_RESPONSE_FORMAT_URL && f <= OPENAI_IMAGE_RESPONSE_FORMAT_B64_JSON){ 1214 | response_format = f; 1215 | } 1216 | return *this; 1217 | } 1218 | 1219 | OpenAI_ImageGeneration & OpenAI_ImageGeneration::setN(unsigned int _n){ 1220 | if(n > 0 && n <= 10){ 1221 | n = _n; 1222 | } 1223 | return *this; 1224 | } 1225 | 1226 | OpenAI_ImageGeneration & OpenAI_ImageGeneration::setUser(const char * u){ 1227 | if(user != NULL){ 1228 | free((void*)user); 1229 | } 1230 | user = strdup(u); 1231 | return *this; 1232 | } 1233 | 1234 | OpenAI_ImageResponse OpenAI_ImageGeneration::prompt(String p){ 1235 | String endpoint = "images/generations"; 1236 | 1237 | OpenAI_ImageResponse result = OpenAI_ImageResponse(NULL); 1238 | cJSON * req = cJSON_CreateObject(); 1239 | if(req == NULL){ 1240 | log_e("cJSON_CreateObject failed!"); 1241 | return result; 1242 | } 1243 | reqAddString("prompt", p.c_str()); 1244 | if(size != OPENAI_IMAGE_SIZE_1024x1024){ 1245 | reqAddString("size", image_sizes[size]); 1246 | } 1247 | if(response_format != OPENAI_IMAGE_RESPONSE_FORMAT_URL){ 1248 | reqAddString("response_format", image_response_formats[response_format]); 1249 | } 1250 | if(n != 1){ 1251 | reqAddNumber("n", n); 1252 | } 1253 | if(user != NULL){ 1254 | reqAddString("user", user); 1255 | } 1256 | String jsonBody = String(cJSON_Print(req)); 1257 | cJSON_Delete(req); 1258 | String res = oai.post(endpoint, jsonBody); 1259 | if(!res.length()){ 1260 | log_e("Empty result!"); 1261 | return result; 1262 | } 1263 | return OpenAI_ImageResponse(res.c_str()); 1264 | } 1265 | 1266 | // images/variations { //Creates a variation of a given image. 1267 | // "image": "",//required string. The image to edit. Must be a valid PNG file, less than 4MB, and square. 1268 | // "n": 1,//integer. The number of images to generate. Must be between 1 and 10. 1269 | // "size": "1024x1024",//string. The size of the generated images. Must be one of "256x256", "512x512", or "1024x1024" 1270 | // "response_format": "url",//string. The format in which the generated images are returned. Must be one of "url" or "b64_json". 1271 | // "user": null//string. A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 1272 | // } 1273 | 1274 | OpenAI_ImageVariation::OpenAI_ImageVariation(OpenAI &openai) 1275 | : oai(openai) 1276 | , size(OPENAI_IMAGE_SIZE_1024x1024) 1277 | , response_format(OPENAI_IMAGE_RESPONSE_FORMAT_URL) 1278 | , n(1) 1279 | , user(NULL) 1280 | {} 1281 | 1282 | OpenAI_ImageVariation::~OpenAI_ImageVariation(){ 1283 | if(user != NULL){ 1284 | free((void*)user); 1285 | } 1286 | } 1287 | 1288 | OpenAI_ImageVariation & OpenAI_ImageVariation::setSize(OpenAI_Image_Size s){ 1289 | if(s >= OPENAI_IMAGE_SIZE_1024x1024 && s <= OPENAI_IMAGE_SIZE_256x256){ 1290 | size = s; 1291 | } 1292 | return *this; 1293 | } 1294 | 1295 | OpenAI_ImageVariation & OpenAI_ImageVariation::setResponseFormat(OpenAI_Image_Response_Format f){ 1296 | if(f >= OPENAI_IMAGE_RESPONSE_FORMAT_URL && f <= OPENAI_IMAGE_RESPONSE_FORMAT_B64_JSON){ 1297 | response_format = f; 1298 | } 1299 | return *this; 1300 | } 1301 | 1302 | OpenAI_ImageVariation & OpenAI_ImageVariation::setN(unsigned int _n){ 1303 | if(n > 0 && n <= 10){ 1304 | n = _n; 1305 | } 1306 | return *this; 1307 | } 1308 | 1309 | OpenAI_ImageVariation & OpenAI_ImageVariation::setUser(const char * u){ 1310 | if(user != NULL){ 1311 | free((void*)user); 1312 | } 1313 | user = strdup(u); 1314 | return *this; 1315 | } 1316 | 1317 | OpenAI_ImageResponse OpenAI_ImageVariation::image(uint8_t * img_data, size_t img_len){ 1318 | String endpoint = "images/variations"; 1319 | String boundary = "----WebKitFormBoundaryb9v538xFWfzLzRO3"; 1320 | String itemPrefix = "--" +boundary+ "\r\nContent-Disposition: form-data; name="; 1321 | uint8_t * data = NULL; 1322 | size_t len = 0; 1323 | OpenAI_ImageResponse result = OpenAI_ImageResponse(NULL); 1324 | 1325 | String reqBody = ""; 1326 | if(size != OPENAI_IMAGE_SIZE_1024x1024){ 1327 | reqBody += itemPrefix+"\"size\"\r\n\r\n"+String(image_sizes[size])+"\r\n"; 1328 | } 1329 | if(response_format != OPENAI_IMAGE_RESPONSE_FORMAT_URL){ 1330 | reqBody += itemPrefix+"\"response_format\"\r\n\r\n"+String(image_response_formats[response_format])+"\r\n"; 1331 | } 1332 | if(n != 1){ 1333 | reqBody += itemPrefix+"\"n\"\r\n\r\n"+String(n)+"\r\n"; 1334 | } 1335 | if(user != NULL){ 1336 | reqBody += itemPrefix+"\"user\"\r\n\r\n"+String(user)+"\r\n"; 1337 | } 1338 | reqBody += itemPrefix+"\"image\"; filename=\"image.png\"\r\nContent-Type: image/png\r\n\r\n"; 1339 | 1340 | String reqEndBody = "\r\n--" +boundary+ "--\r\n"; 1341 | 1342 | len = reqBody.length() + reqEndBody.length() + img_len; 1343 | 1344 | data = (uint8_t*)malloc(len + 1); 1345 | if(data == NULL){ 1346 | log_e("Failed to allocate request buffer! Len: %u", len); 1347 | return result; 1348 | } 1349 | uint8_t * d = data; 1350 | memcpy(d, reqBody.c_str(), reqBody.length()); 1351 | d += reqBody.length(); 1352 | memcpy(d, img_data, img_len); 1353 | d += img_len; 1354 | memcpy(d, reqEndBody.c_str(), reqEndBody.length()); 1355 | d += reqEndBody.length(); 1356 | *d = 0; 1357 | 1358 | String res = oai.upload(endpoint, boundary, data, len); 1359 | free(data); 1360 | if(!res.length()){ 1361 | log_e("Empty result!"); 1362 | return result; 1363 | } 1364 | return OpenAI_ImageResponse(res.c_str()); 1365 | } 1366 | 1367 | // images/edits { //Creates an edited or extended image given an original image and a prompt. 1368 | // "image": "",//required string. The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. 1369 | // "mask": "",//optional string. An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as image. 1370 | // "prompt": "A cute baby sea otter",//required. A text description of the desired image(s). The maximum length is 1000 characters. 1371 | // "n": 1,//integer. The number of images to generate. Must be between 1 and 10. 1372 | // "size": "1024x1024",//string. The size of the generated images. Must be one of "256x256", "512x512", or "1024x1024" 1373 | // "response_format": "url",//string. The format in which the generated images are returned. Must be one of "url" or "b64_json". 1374 | // "user": null//string. A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 1375 | // } 1376 | 1377 | OpenAI_ImageEdit::OpenAI_ImageEdit(OpenAI &openai) 1378 | : oai(openai) 1379 | , prompt(NULL) 1380 | , size(OPENAI_IMAGE_SIZE_1024x1024) 1381 | , response_format(OPENAI_IMAGE_RESPONSE_FORMAT_URL) 1382 | , n(1) 1383 | , user(NULL) 1384 | {} 1385 | 1386 | OpenAI_ImageEdit::~OpenAI_ImageEdit(){ 1387 | if(prompt != NULL){ 1388 | free((void*)prompt); 1389 | } 1390 | if(user != NULL){ 1391 | free((void*)user); 1392 | } 1393 | } 1394 | 1395 | OpenAI_ImageEdit & OpenAI_ImageEdit::setPrompt(const char * p){ 1396 | if(prompt != NULL){ 1397 | free((void*)prompt); 1398 | prompt = NULL; 1399 | } 1400 | if(p != NULL){ 1401 | prompt = strdup(p); 1402 | } 1403 | return *this; 1404 | } 1405 | 1406 | OpenAI_ImageEdit & OpenAI_ImageEdit::setSize(OpenAI_Image_Size s){ 1407 | if(s >= OPENAI_IMAGE_SIZE_1024x1024 && s <= OPENAI_IMAGE_SIZE_256x256){ 1408 | size = s; 1409 | } 1410 | return *this; 1411 | } 1412 | 1413 | OpenAI_ImageEdit & OpenAI_ImageEdit::setResponseFormat(OpenAI_Image_Response_Format f){ 1414 | if(f >= OPENAI_IMAGE_RESPONSE_FORMAT_URL && f <= OPENAI_IMAGE_RESPONSE_FORMAT_B64_JSON){ 1415 | response_format = f; 1416 | } 1417 | return *this; 1418 | } 1419 | 1420 | OpenAI_ImageEdit & OpenAI_ImageEdit::setN(unsigned int _n){ 1421 | if(n > 0 && n <= 10){ 1422 | n = _n; 1423 | } 1424 | return *this; 1425 | } 1426 | 1427 | OpenAI_ImageEdit & OpenAI_ImageEdit::setUser(const char * u){ 1428 | if(user != NULL){ 1429 | free((void*)user); 1430 | } 1431 | user = strdup(u); 1432 | return *this; 1433 | } 1434 | 1435 | OpenAI_ImageResponse OpenAI_ImageEdit::image(uint8_t * img_data, size_t img_len, uint8_t * mask_data, size_t mask_len){ 1436 | String endpoint = "images/edits"; 1437 | String boundary = "----WebKitFormBoundaryb9v538xFWfzLzRO3"; 1438 | String itemPrefix = "--" +boundary+ "\r\nContent-Disposition: form-data; name="; 1439 | uint8_t * data = NULL; 1440 | size_t len = 0; 1441 | OpenAI_ImageResponse result = OpenAI_ImageResponse(NULL); 1442 | 1443 | String reqBody = ""; 1444 | if(prompt != NULL){ 1445 | reqBody += itemPrefix+"\"prompt\"\r\n\r\n"+String(prompt)+"\r\n"; 1446 | } 1447 | if(size != OPENAI_IMAGE_SIZE_1024x1024){ 1448 | reqBody += itemPrefix+"\"size\"\r\n\r\n"+String(image_sizes[size])+"\r\n"; 1449 | } 1450 | if(response_format != OPENAI_IMAGE_RESPONSE_FORMAT_URL){ 1451 | reqBody += itemPrefix+"\"response_format\"\r\n\r\n"+String(image_response_formats[response_format])+"\r\n"; 1452 | } 1453 | if(n != 1){ 1454 | reqBody += itemPrefix+"\"n\"\r\n\r\n"+String(n)+"\r\n"; 1455 | } 1456 | if(user != NULL){ 1457 | reqBody += itemPrefix+"\"user\"\r\n\r\n"+String(user)+"\r\n"; 1458 | } 1459 | reqBody += itemPrefix+"\"image\"; filename=\"image.png\"\r\nContent-Type: image/png\r\n\r\n"; 1460 | 1461 | String reqEndBody = "\r\n--" +boundary+ "--\r\n"; 1462 | 1463 | len = reqBody.length() + reqEndBody.length() + img_len; 1464 | 1465 | String maskBody = ""; 1466 | if(mask_data != NULL && mask_len > 0){ 1467 | maskBody += "\r\n"+itemPrefix+"\"mask\"; filename=\"mask.png\"\r\nContent-Type: image/png\r\n\r\n"; 1468 | len += maskBody.length() + mask_len; 1469 | } 1470 | 1471 | data = (uint8_t*)malloc(len + 1); 1472 | if(data == NULL){ 1473 | log_e("Failed to allocate request buffer! Len: %u", len); 1474 | return result; 1475 | } 1476 | uint8_t * d = data; 1477 | memcpy(d, reqBody.c_str(), reqBody.length()); 1478 | d += reqBody.length(); 1479 | memcpy(d, img_data, img_len); 1480 | d += img_len; 1481 | if(mask_data != NULL && mask_len > 0){ 1482 | memcpy(d, maskBody.c_str(), maskBody.length()); 1483 | d += maskBody.length(); 1484 | memcpy(d, mask_data, mask_len); 1485 | d += mask_len; 1486 | } 1487 | memcpy(d, reqEndBody.c_str(), reqEndBody.length()); 1488 | d += reqEndBody.length(); 1489 | *d = 0; 1490 | 1491 | String res = oai.upload(endpoint, boundary, data, len); 1492 | free(data); 1493 | if(!res.length()){ 1494 | log_e("Empty result!"); 1495 | return result; 1496 | } 1497 | return OpenAI_ImageResponse(res.c_str()); 1498 | } 1499 | 1500 | // audio/transcriptions { //Transcribes audio into the input language. 1501 | // "file": "audio.mp3",//required. The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm. 1502 | // "model": "whisper-1",//required. ID of the model to use. Only whisper-1 is currently available. 1503 | // "prompt": "A cute baby sea otter",//An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language. 1504 | // "response_format": "json",//string. The format of the transcript output, in one of these options: "json", "text", "srt", "verbose_json", or "vtt". 1505 | // "temperature": 1,//float between 0 and 2 1506 | // "language": null//string. The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency. 1507 | // } 1508 | 1509 | static const char * audio_input_formats[] = { 1510 | "mp3", 1511 | "mp4", 1512 | "mpeg", 1513 | "mpga", 1514 | "m4a", 1515 | "wav", 1516 | "webm" 1517 | }; 1518 | static const char * audio_input_mime[] = { 1519 | "audio/mpeg", 1520 | "audio/mp4", 1521 | "audio/mpeg", 1522 | "audio/mpeg", 1523 | "audio/x-m4a", 1524 | "audio/x-wav", 1525 | "audio/webm" 1526 | }; 1527 | static const char * audio_response_formats[] = {"json", "text", "srt", "verbose_json", "vtt"}; 1528 | 1529 | const char * prompt; 1530 | OpenAI_Audio_Response_Format response_format; 1531 | float temperature; 1532 | const char * language; 1533 | 1534 | OpenAI_AudioTranscription::OpenAI_AudioTranscription(OpenAI &openai) 1535 | : oai(openai) 1536 | , prompt(NULL) 1537 | , response_format(OPENAI_AUDIO_RESPONSE_FORMAT_JSON) 1538 | , temperature(0) 1539 | , language(NULL) 1540 | {} 1541 | 1542 | OpenAI_AudioTranscription::~OpenAI_AudioTranscription(){ 1543 | if(prompt != NULL){ 1544 | free((void*)prompt); 1545 | } 1546 | if(language != NULL){ 1547 | free((void*)language); 1548 | } 1549 | } 1550 | 1551 | OpenAI_AudioTranscription & OpenAI_AudioTranscription::setPrompt(const char * p){ 1552 | if(prompt != NULL){ 1553 | free((void*)prompt); 1554 | prompt = NULL; 1555 | } 1556 | if(p != NULL){ 1557 | prompt = strdup(p); 1558 | } 1559 | return *this; 1560 | } 1561 | 1562 | OpenAI_AudioTranscription & OpenAI_AudioTranscription::setResponseFormat(OpenAI_Audio_Response_Format f){ 1563 | if(f >= OPENAI_AUDIO_RESPONSE_FORMAT_JSON && f <= OPENAI_AUDIO_RESPONSE_FORMAT_VTT){ 1564 | response_format = f; 1565 | } 1566 | return *this; 1567 | } 1568 | 1569 | OpenAI_AudioTranscription & OpenAI_AudioTranscription::setTemperature(float t){ 1570 | if(t >= 0 && t <= 2.0){ 1571 | temperature = t; 1572 | } 1573 | return *this; 1574 | } 1575 | 1576 | OpenAI_AudioTranscription & OpenAI_AudioTranscription::setLanguage(const char * l){ 1577 | if(language != NULL){ 1578 | free((void*)language); 1579 | language = NULL; 1580 | } 1581 | if(l != NULL){ 1582 | language = strdup(l); 1583 | } 1584 | return *this; 1585 | } 1586 | 1587 | String OpenAI_AudioTranscription::file(uint8_t * audio_data, size_t audio_len, OpenAI_Audio_Input_Format f){ 1588 | String endpoint = "audio/transcriptions"; 1589 | String boundary = "----WebKitFormBoundary9HKFexBRLrf9dcpY"; 1590 | String itemPrefix = "--" +boundary+ "\r\nContent-Disposition: form-data; name="; 1591 | uint8_t * data = NULL; 1592 | size_t len = 0; 1593 | 1594 | String reqBody = itemPrefix+"\"model\"\r\n\r\nwhisper-1\r\n"; 1595 | if(prompt != NULL){ 1596 | reqBody += itemPrefix+"\"prompt\"\r\n\r\n"+String(prompt)+"\r\n"; 1597 | } 1598 | if(response_format != OPENAI_AUDIO_RESPONSE_FORMAT_JSON){ 1599 | reqBody += itemPrefix+"\"response_format\"\r\n\r\n"+String(audio_response_formats[response_format])+"\r\n"; 1600 | } 1601 | if(temperature != 0){ 1602 | reqBody += itemPrefix+"\"temperature\"\r\n\r\n"+String(temperature)+"\r\n"; 1603 | } 1604 | if(language != NULL){ 1605 | reqBody += itemPrefix+"\"language\"\r\n\r\n"+String(language)+"\r\n"; 1606 | } 1607 | reqBody += itemPrefix+"\"file\"; filename=\"audio."+String(audio_input_formats[f])+"\"\r\nContent-Type: "+String(audio_input_mime[f])+"\r\n\r\n"; 1608 | 1609 | String reqEndBody = "\r\n--" +boundary+ "--\r\n"; 1610 | 1611 | len = reqBody.length() + reqEndBody.length() + audio_len; 1612 | data = (uint8_t*)malloc(len + 1); 1613 | if(data == NULL){ 1614 | log_e("Failed to allocate request buffer! Len: %u", len); 1615 | return String(); 1616 | } 1617 | uint8_t * d = data; 1618 | memcpy(d, reqBody.c_str(), reqBody.length()); 1619 | d += reqBody.length(); 1620 | memcpy(d, audio_data, audio_len); 1621 | d += audio_len; 1622 | memcpy(d, reqEndBody.c_str(), reqEndBody.length()); 1623 | d += reqEndBody.length(); 1624 | *d = 0; 1625 | 1626 | String result = oai.upload(endpoint, boundary, data, len); 1627 | free(data); 1628 | if(!result.length()){ 1629 | log_e("Empty result!"); 1630 | return result; 1631 | } 1632 | cJSON * json = cJSON_Parse(result.c_str()); 1633 | String error = getJsonError(json); 1634 | result = ""; 1635 | if(error.length()){ 1636 | log_e("%s",error.c_str()); 1637 | } else { 1638 | if(cJSON_HasObjectItem(json, "text")){ 1639 | cJSON * text = cJSON_GetObjectItem(json, "text"); 1640 | result = String(cJSON_GetStringValue(text)); 1641 | } 1642 | } 1643 | cJSON_Delete(json); 1644 | return result; 1645 | } 1646 | 1647 | // audio/translations { //Translates audio into into English. 1648 | // "file": "german.m4a",//required. The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm. 1649 | // "model": "whisper-1",//required. ID of the model to use. Only whisper-1 is currently available. 1650 | // "prompt": "A cute baby sea otter",//An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language. 1651 | // "response_format": "json",//string. The format of the transcript output, in one of these options: "json", "text", "srt", "verbose_json", or "vtt". 1652 | // "temperature": 1//float between 0 and 2 1653 | // } 1654 | 1655 | OpenAI_AudioTranslation::OpenAI_AudioTranslation(OpenAI &openai) 1656 | : oai(openai) 1657 | , prompt(NULL) 1658 | , response_format(OPENAI_AUDIO_RESPONSE_FORMAT_JSON) 1659 | , temperature(0) 1660 | {} 1661 | 1662 | OpenAI_AudioTranslation::~OpenAI_AudioTranslation(){ 1663 | if(prompt != NULL){ 1664 | free((void*)prompt); 1665 | } 1666 | } 1667 | 1668 | OpenAI_AudioTranslation & OpenAI_AudioTranslation::setPrompt(const char * p){ 1669 | if(prompt != NULL){ 1670 | free((void*)prompt); 1671 | prompt = NULL; 1672 | } 1673 | if(p != NULL){ 1674 | prompt = strdup(p); 1675 | } 1676 | return *this; 1677 | } 1678 | 1679 | OpenAI_AudioTranslation & OpenAI_AudioTranslation::setResponseFormat(OpenAI_Audio_Response_Format f){ 1680 | if(f >= OPENAI_AUDIO_RESPONSE_FORMAT_JSON && f <= OPENAI_AUDIO_RESPONSE_FORMAT_VTT){ 1681 | response_format = f; 1682 | } 1683 | return *this; 1684 | } 1685 | 1686 | OpenAI_AudioTranslation & OpenAI_AudioTranslation::setTemperature(float t){ 1687 | if(t >= 0 && t <= 2.0){ 1688 | temperature = t; 1689 | } 1690 | return *this; 1691 | } 1692 | 1693 | String OpenAI_AudioTranslation::file(uint8_t * audio_data, size_t audio_len, OpenAI_Audio_Input_Format f){ 1694 | String endpoint = "audio/translations"; 1695 | String boundary = "----WebKitFormBoundary9HKFexBRLrf9dcpY"; 1696 | String itemPrefix = "--" +boundary+ "\r\nContent-Disposition: form-data; name="; 1697 | uint8_t * data = NULL; 1698 | size_t len = 0; 1699 | 1700 | String reqBody = itemPrefix+"\"model\"\r\n\r\nwhisper-1\r\n"; 1701 | if(prompt != NULL){ 1702 | reqBody += itemPrefix+"\"prompt\"\r\n\r\n"+String(prompt)+"\r\n"; 1703 | } 1704 | if(response_format != OPENAI_AUDIO_RESPONSE_FORMAT_JSON){ 1705 | reqBody += itemPrefix+"\"response_format\"\r\n\r\n"+String(audio_response_formats[response_format])+"\r\n"; 1706 | } 1707 | if(temperature != 0){ 1708 | reqBody += itemPrefix+"\"temperature\"\r\n\r\n"+String(temperature)+"\r\n"; 1709 | } 1710 | reqBody += itemPrefix+"\"file\"; filename=\"audio."+String(audio_input_formats[f])+"\"\r\nContent-Type: "+String(audio_input_mime[f])+"\r\n\r\n"; 1711 | 1712 | String reqEndBody = "\r\n--" +boundary+ "--\r\n"; 1713 | 1714 | len = reqBody.length() + reqEndBody.length() + audio_len; 1715 | data = (uint8_t*)malloc(len + 1); 1716 | if(data == NULL){ 1717 | log_e("Failed to allocate request buffer! Len: %u", len); 1718 | return String(); 1719 | } 1720 | uint8_t * d = data; 1721 | memcpy(d, reqBody.c_str(), reqBody.length()); 1722 | d += reqBody.length(); 1723 | memcpy(d, audio_data, audio_len); 1724 | d += audio_len; 1725 | memcpy(d, reqEndBody.c_str(), reqEndBody.length()); 1726 | d += reqEndBody.length(); 1727 | *d = 0; 1728 | 1729 | String result = oai.upload(endpoint, boundary, data, len); 1730 | free(data); 1731 | if(!result.length()){ 1732 | log_e("Empty result!"); 1733 | return result; 1734 | } 1735 | cJSON * json = cJSON_Parse(result.c_str()); 1736 | String error = getJsonError(json); 1737 | result = ""; 1738 | if(error.length()){ 1739 | log_e("%s",error.c_str()); 1740 | } else { 1741 | if(cJSON_HasObjectItem(json, "text")){ 1742 | cJSON * text = cJSON_GetObjectItem(json, "text"); 1743 | result = String(cJSON_GetStringValue(text)); 1744 | } 1745 | } 1746 | cJSON_Delete(json); 1747 | return result; 1748 | } 1749 | 1750 | 1751 | // files { //Upload a file that contains document(s) to be used across various endpoints/features. 1752 | // "file": "mydata.jsonl",//required. Name of the JSON Lines file to be uploaded. If the purpose is set to "fine-tune", each line is a JSON record with "prompt" and "completion" fields representing your training examples. 1753 | // "purpose": "fine-tune"//required. The intended purpose of the uploaded documents. Use "fine-tune" for Fine-tuning. This allows us to validate the format of the uploaded file. 1754 | // } 1755 | 1756 | // GET files //Returns a list of files that belong to the user's organization. 1757 | // DELETE files/{file_id} //Delete a file. 1758 | // GET files/{file_id} //Returns information about a specific file. 1759 | // GET files/{file_id}/content //Returns the contents of the specified file 1760 | 1761 | -------------------------------------------------------------------------------- /src/OpenAI.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "Arduino.h" 3 | #include "cJSON.h" 4 | 5 | class OpenAI_Completion; 6 | class OpenAI_ChatCompletion; 7 | class OpenAI_Edit; 8 | class OpenAI_ImageGeneration; 9 | class OpenAI_ImageVariation; 10 | class OpenAI_ImageEdit; 11 | class OpenAI_AudioTranscription; 12 | class OpenAI_AudioTranslation; 13 | 14 | typedef enum { 15 | OPENAI_IMAGE_SIZE_1024x1024, 16 | OPENAI_IMAGE_SIZE_512x512, 17 | OPENAI_IMAGE_SIZE_256x256 18 | } OpenAI_Image_Size; 19 | 20 | typedef enum { 21 | OPENAI_IMAGE_RESPONSE_FORMAT_URL, 22 | OPENAI_IMAGE_RESPONSE_FORMAT_B64_JSON 23 | } OpenAI_Image_Response_Format; 24 | 25 | typedef enum { 26 | OPENAI_AUDIO_RESPONSE_FORMAT_JSON, 27 | OPENAI_AUDIO_RESPONSE_FORMAT_TEXT, 28 | OPENAI_AUDIO_RESPONSE_FORMAT_SRT, 29 | OPENAI_AUDIO_RESPONSE_FORMAT_VERBOSE_JSON, 30 | OPENAI_AUDIO_RESPONSE_FORMAT_VTT 31 | } OpenAI_Audio_Response_Format; 32 | 33 | typedef enum { 34 | OPENAI_AUDIO_INPUT_FORMAT_MP3, 35 | OPENAI_AUDIO_INPUT_FORMAT_MP4, 36 | OPENAI_AUDIO_INPUT_FORMAT_MPEG, 37 | OPENAI_AUDIO_INPUT_FORMAT_MPGA, 38 | OPENAI_AUDIO_INPUT_FORMAT_M4A, 39 | OPENAI_AUDIO_INPUT_FORMAT_WAV, 40 | OPENAI_AUDIO_INPUT_FORMAT_WEBM 41 | } OpenAI_Audio_Input_Format; 42 | 43 | typedef struct { 44 | unsigned int len; 45 | double * data; 46 | } OpenAI_EmbeddingData; 47 | 48 | class OpenAI_EmbeddingResponse { 49 | private: 50 | unsigned int usage; 51 | unsigned int len; 52 | OpenAI_EmbeddingData * data; 53 | char * error_str; 54 | 55 | public: 56 | OpenAI_EmbeddingResponse(const char * payload); 57 | ~OpenAI_EmbeddingResponse(); 58 | 59 | unsigned int tokens(){ 60 | return usage; 61 | } 62 | unsigned int length(){ 63 | return len; 64 | } 65 | OpenAI_EmbeddingData * getAt(unsigned int index){ 66 | if(index < len){ 67 | return &data[index]; 68 | } 69 | return NULL; 70 | } 71 | const char * error(){ 72 | return error_str; 73 | } 74 | }; 75 | 76 | class OpenAI_ModerationResponse { 77 | private: 78 | unsigned int len; 79 | bool * data; 80 | char * error_str; 81 | 82 | public: 83 | OpenAI_ModerationResponse(const char * payload); 84 | ~OpenAI_ModerationResponse(); 85 | 86 | unsigned int length(){ 87 | return len; 88 | } 89 | bool getAt(unsigned int index){ 90 | if(index < len){ 91 | return data[index]; 92 | } 93 | return false; 94 | } 95 | const char * error(){ 96 | return error_str; 97 | } 98 | }; 99 | 100 | class OpenAI_ImageResponse { 101 | private: 102 | unsigned int len; 103 | char ** data; 104 | char * error_str; 105 | 106 | public: 107 | OpenAI_ImageResponse(const char * payload); 108 | ~OpenAI_ImageResponse(); 109 | 110 | unsigned int length(){ 111 | return len; 112 | } 113 | const char * getAt(unsigned int index){ 114 | if(index < len){ 115 | return data[index]; 116 | } 117 | return ""; 118 | } 119 | const char * error(){ 120 | return error_str; 121 | } 122 | }; 123 | 124 | class OpenAI_StringResponse { 125 | private: 126 | unsigned int usage; 127 | unsigned int len; 128 | char ** data; 129 | char * error_str; 130 | 131 | public: 132 | OpenAI_StringResponse(const char * payload); 133 | ~OpenAI_StringResponse(); 134 | 135 | unsigned int tokens(){ 136 | return usage; 137 | } 138 | unsigned int length(){ 139 | return len; 140 | } 141 | const char * getAt(unsigned int index){ 142 | if(index < len){ 143 | return data[index]; 144 | } 145 | return ""; 146 | } 147 | const char * error(){ 148 | return error_str; 149 | } 150 | }; 151 | 152 | class OpenAI { 153 | private: 154 | String api_key; 155 | 156 | protected: 157 | 158 | public: 159 | OpenAI(const char *openai_api_key); 160 | ~OpenAI(); 161 | 162 | OpenAI_EmbeddingResponse embedding(String input, const char * model=NULL, const char * user=NULL); //Creates an embedding vector representing the input text. 163 | OpenAI_ModerationResponse moderation(String input, const char * model=NULL); //Classifies if text violates OpenAI's Content Policy 164 | 165 | OpenAI_Completion completion(); 166 | OpenAI_ChatCompletion chat(); 167 | OpenAI_Edit edit(); 168 | OpenAI_ImageGeneration imageGeneration(); 169 | OpenAI_ImageVariation imageVariation(); 170 | OpenAI_ImageEdit imageEdit(); 171 | OpenAI_AudioTranscription audioTranscription(); 172 | OpenAI_AudioTranslation audioTranslation(); 173 | 174 | String get(String endpoint); 175 | String del(String endpoint); 176 | String post(String endpoint, String jsonBody); 177 | String upload(String endpoint, String boundary, uint8_t * data, size_t len); 178 | }; 179 | 180 | class OpenAI_Completion { 181 | private: 182 | OpenAI & oai; 183 | const char * model; 184 | unsigned int max_tokens; 185 | float temperature; 186 | float top_p; 187 | unsigned int n; 188 | bool echo; 189 | const char * stop; 190 | float presence_penalty; 191 | float frequency_penalty; 192 | unsigned int best_of; 193 | const char * user; 194 | 195 | protected: 196 | 197 | public: 198 | OpenAI_Completion(OpenAI &openai); 199 | ~OpenAI_Completion(); 200 | 201 | OpenAI_Completion & setModel(const char * m); 202 | OpenAI_Completion & setMaxTokens(unsigned int m); //The maximum number of tokens to generate in the completion. 203 | OpenAI_Completion & setTemperature(float t); //float between 0 and 2. Higher value gives more random results. 204 | OpenAI_Completion & setTopP(float t); //float between 0 and 1. recommended to alter this or temperature but not both. 205 | OpenAI_Completion & setN(unsigned int n); //How many completions to generate for each prompt. 206 | OpenAI_Completion & setEcho(bool e); //Echo back the prompt in addition to the completion 207 | OpenAI_Completion & setStop(const char * s); //Up to 4 sequences where the API will stop generating further tokens. 208 | OpenAI_Completion & setPresencePenalty(float p); //float between -2.0 and 2.0. Positive values increase the model's likelihood to talk about new topics. 209 | OpenAI_Completion & setFrequencyPenalty(float p); //float between -2.0 and 2.0. Positive values decrease the model's likelihood to repeat the same line verbatim. 210 | OpenAI_Completion & setBestOf(unsigned int b); //Generates best_of completions server-side and returns the "best". "best_of" must be greater than "n" 211 | OpenAI_Completion & setUser(const char * u); //A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 212 | 213 | OpenAI_StringResponse prompt(String p); //Send the prompt for completion 214 | }; 215 | 216 | class OpenAI_ChatCompletion { 217 | private: 218 | OpenAI & oai; 219 | cJSON * messages; 220 | const char * model; 221 | const char * description; 222 | unsigned int max_tokens; 223 | float temperature; 224 | float top_p; 225 | const char * stop; 226 | float presence_penalty; 227 | float frequency_penalty; 228 | const char * user; 229 | 230 | protected: 231 | 232 | public: 233 | OpenAI_ChatCompletion(OpenAI &openai); 234 | ~OpenAI_ChatCompletion(); 235 | 236 | OpenAI_ChatCompletion & setModel(const char * m); 237 | OpenAI_ChatCompletion & setSystem(const char * s); //Description of the required assistant 238 | OpenAI_ChatCompletion & setMaxTokens(unsigned int m); //The maximum number of tokens to generate in the completion. 239 | OpenAI_ChatCompletion & setTemperature(float t); //float between 0 and 2. Higher value gives more random results. 240 | OpenAI_ChatCompletion & setTopP(float t); //float between 0 and 1. recommended to alter this or temperature but not both. 241 | OpenAI_ChatCompletion & setStop(const char * s); //Up to 4 sequences where the API will stop generating further tokens. 242 | OpenAI_ChatCompletion & setPresencePenalty(float p); //float between -2.0 and 2.0. Positive values increase the model's likelihood to talk about new topics. 243 | OpenAI_ChatCompletion & setFrequencyPenalty(float p); //float between -2.0 and 2.0. Positive values decrease the model's likelihood to repeat the same line verbatim. 244 | OpenAI_ChatCompletion & setUser(const char * u); //A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 245 | OpenAI_ChatCompletion & clearConversation(); //clears the accumulated conversation 246 | 247 | OpenAI_StringResponse message(String m, bool save=true);//Send the message for completion. Save it with the first response if selected 248 | }; 249 | 250 | class OpenAI_Edit { 251 | private: 252 | OpenAI & oai; 253 | const char * model; 254 | float temperature; 255 | float top_p; 256 | unsigned int n; 257 | 258 | protected: 259 | 260 | public: 261 | OpenAI_Edit(OpenAI &openai); 262 | ~OpenAI_Edit(); 263 | 264 | OpenAI_Edit & setModel(const char * m); 265 | OpenAI_Edit & setTemperature(float t); //float between 0 and 2. Higher value gives more random results. 266 | OpenAI_Edit & setTopP(float t); //float between 0 and 1. recommended to alter this or temperature but not both. 267 | OpenAI_Edit & setN(unsigned int n); //How many edits to generate for the input and instruction. 268 | 269 | OpenAI_StringResponse process(String instruction, String input=String()); //Creates a new edit for the provided input, instruction, and parameters. 270 | }; 271 | 272 | class OpenAI_ImageGeneration { 273 | private: 274 | OpenAI & oai; 275 | OpenAI_Image_Size size; 276 | OpenAI_Image_Response_Format response_format; 277 | unsigned int n; 278 | const char * user; 279 | 280 | protected: 281 | 282 | public: 283 | OpenAI_ImageGeneration(OpenAI &openai); 284 | ~OpenAI_ImageGeneration(); 285 | 286 | OpenAI_ImageGeneration & setSize(OpenAI_Image_Size s); //The size of the generated images. 287 | OpenAI_ImageGeneration & setResponseFormat(OpenAI_Image_Response_Format f); //The format in which the generated images are returned. 288 | OpenAI_ImageGeneration & setN(unsigned int n); //The number of images to generate. Must be between 1 and 10. 289 | OpenAI_ImageGeneration & setUser(const char * u); //A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 290 | 291 | OpenAI_ImageResponse prompt(String p); //Creates image/images from given a prompt. 292 | }; 293 | 294 | class OpenAI_ImageVariation { 295 | private: 296 | OpenAI & oai; 297 | OpenAI_Image_Size size; 298 | OpenAI_Image_Response_Format response_format; 299 | unsigned int n; 300 | const char * user; 301 | 302 | protected: 303 | 304 | public: 305 | OpenAI_ImageVariation(OpenAI &openai); 306 | ~OpenAI_ImageVariation(); 307 | 308 | OpenAI_ImageVariation & setSize(OpenAI_Image_Size s); //The size of the generated images. 309 | OpenAI_ImageVariation & setResponseFormat(OpenAI_Image_Response_Format f); //The format in which the generated images are returned. 310 | OpenAI_ImageVariation & setN(unsigned int n); //The number of images to generate. Must be between 1 and 10. 311 | OpenAI_ImageVariation & setUser(const char * u); //A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 312 | 313 | OpenAI_ImageResponse image(uint8_t * data, size_t len); //Creates an image given a prompt. 314 | }; 315 | 316 | class OpenAI_ImageEdit { 317 | private: 318 | OpenAI & oai; 319 | const char * prompt; 320 | OpenAI_Image_Size size; 321 | OpenAI_Image_Response_Format response_format; 322 | unsigned int n; 323 | const char * user; 324 | 325 | protected: 326 | 327 | public: 328 | OpenAI_ImageEdit(OpenAI &openai); 329 | ~OpenAI_ImageEdit(); 330 | 331 | OpenAI_ImageEdit & setPrompt(const char * p); //A text description of the desired image(s). The maximum length is 1000 characters. 332 | OpenAI_ImageEdit & setSize(OpenAI_Image_Size s); //The size of the generated images. 333 | OpenAI_ImageEdit & setResponseFormat(OpenAI_Image_Response_Format f); //The format in which the generated images are returned. 334 | OpenAI_ImageEdit & setN(unsigned int n); //The number of images to generate. Must be between 1 and 10. 335 | OpenAI_ImageEdit & setUser(const char * u); //A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. 336 | 337 | OpenAI_ImageResponse image(uint8_t * data, size_t len, uint8_t * mask_data=NULL, size_t mask_len=0); //Creates an edited or extended image given an original image and a prompt. 338 | }; 339 | 340 | class OpenAI_AudioTranscription { 341 | private: 342 | OpenAI & oai; 343 | const char * prompt; 344 | OpenAI_Audio_Response_Format response_format; 345 | float temperature; 346 | const char * language; 347 | 348 | protected: 349 | 350 | public: 351 | OpenAI_AudioTranscription(OpenAI &openai); 352 | ~OpenAI_AudioTranscription(); 353 | 354 | OpenAI_AudioTranscription & setPrompt(const char * p); //An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language. 355 | OpenAI_AudioTranscription & setResponseFormat(OpenAI_Audio_Response_Format f); //The format of the transcript output 356 | OpenAI_AudioTranscription & setTemperature(float t); //float between 0 and 2 357 | OpenAI_AudioTranscription & setLanguage(const char * l); //The language in ISO-639-1 format of the input audio. NULL for Auto 358 | 359 | String file(uint8_t * data, size_t len, OpenAI_Audio_Input_Format f); //Transcribe an audio file 360 | }; 361 | 362 | class OpenAI_AudioTranslation { 363 | private: 364 | OpenAI & oai; 365 | const char * prompt; 366 | OpenAI_Audio_Response_Format response_format; 367 | float temperature; 368 | 369 | protected: 370 | 371 | public: 372 | OpenAI_AudioTranslation(OpenAI &openai); 373 | ~OpenAI_AudioTranslation(); 374 | 375 | OpenAI_AudioTranslation & setPrompt(const char * p); //An optional text to guide the model's style or continue a previous audio segment. The prompt should be in English. 376 | OpenAI_AudioTranslation & setResponseFormat(OpenAI_Audio_Response_Format f); //The format of the transcript output 377 | OpenAI_AudioTranslation & setTemperature(float t); //float between 0 and 2 378 | 379 | String file(uint8_t * data, size_t len, OpenAI_Audio_Input_Format f); //Transcribe an audio file 380 | }; 381 | 382 | --------------------------------------------------------------------------------