├── LICENSE ├── README.md ├── index[Deprecated].js └── index.js /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 YAV.AI 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OpenAI Function Calling example in Node.js 2 | 3 | This repository contains a basic Node.js example demonstrating how to call functions using the OpenAI API. The example showcases a conversation with the OpenAI GPT-3.5 Turbo model, including making function calls and processing their responses. 4 | 5 | ## Whats New? 6 | 7 | - Uses `tools` and `tool_choice` 8 | - Supports parallel function calling 9 | 10 | ## Getting Started 11 | 12 | **Dependencies**: This project has no external dependencies, making it lightweight and easy to set up. You can run this example with just Node.js. However, if you have a preferred library or tool for making HTTP requests, feel free to integrate it as needed to enhance the functionality or suit your specific requirements. 13 | 14 | To get started with this example: 15 | 16 | 1. Clone this repository to your local machine: 17 | 18 | ```shell 19 | git clone 20 | ``` 21 | 22 | 2. Obtain an API key from OpenAI 23 | 24 | 3. Replace `Bearer sk-xxxxxxxxx` in the code with your API key. 25 | 26 | ### Parallel Function calling (Uses tools and tool_choice): 27 | 28 | ```shell 29 | node index.js 30 | ``` 31 | 32 | ### Single Function call (Deprecated) (Uses function_call and functions): 33 | 34 | ```shell 35 | node index[Deprecated].js 36 | ``` 37 | 38 | 5. Customise the functions and conversation as needed. In this example, a simple weather-related function is included, but you can add more functions to suit your use case. 39 | 40 | ## Usage 41 | 42 | This repository serves as a starting point for understanding how to integrate function calls into your conversational AI system using the OpenAI GPT-3.5 Turbo model. You can extend the example by adding more functions, handling different types of requests, and tailoring it to your specific application. 43 | 44 | ## License 45 | 46 | This project is licensed under the MIT License - see the LICENSE file for details. 47 | -------------------------------------------------------------------------------- /index[Deprecated].js: -------------------------------------------------------------------------------- 1 | const https = require("https"); 2 | 3 | // Example dummy function hard coded to return the same weather 4 | // In production, this could be your backend API or an external API 5 | function get_current_weather(location, unit = "celsius") { 6 | const weather_info = { 7 | location: location, 8 | temperature: "23", 9 | unit: unit, 10 | forecast: ["sunny", "windy"], 11 | }; 12 | return JSON.stringify(weather_info); 13 | } 14 | 15 | async function runConversation() { 16 | // Step 1: send the conversation and available functions to GPT 17 | const messages = [{ role: "user", content: "weather in melbourne" }]; 18 | const functions = [ 19 | { 20 | name: "get_current_weather", 21 | description: "Get the current weather in a given location", 22 | parameters: { 23 | type: "object", 24 | properties: { 25 | location: { 26 | type: "string", 27 | description: "The city and state, e.g. San Francisco, CA", 28 | }, 29 | unit: { type: "string", enum: ["celsius", "fahrenheit"] }, 30 | }, 31 | required: ["location"], 32 | }, 33 | }, 34 | ]; 35 | 36 | const requestData = JSON.stringify({ 37 | model: "gpt-3.5-turbo", 38 | messages: messages, 39 | functions: functions, 40 | function_call: "auto", // auto is default, but we'll be explicit 41 | }); 42 | 43 | const options = { 44 | hostname: "api.openai.com", 45 | path: "/v1/chat/completions", 46 | method: "POST", 47 | headers: { 48 | "Content-Type": "application/json", 49 | Authorization: "Bearer sk-xxxxxxxxx", // Replace with your OpenAI API key 50 | }, 51 | }; 52 | 53 | const response = await new Promise((resolve, reject) => { 54 | const req = https.request(options, (res) => { 55 | let data = ""; 56 | 57 | res.on("data", (chunk) => { 58 | data += chunk; 59 | }); 60 | 61 | res.on("end", () => { 62 | resolve(JSON.parse(data)); 63 | }); 64 | }); 65 | 66 | req.on("error", (error) => { 67 | reject(error); 68 | }); 69 | 70 | req.write(requestData); 71 | req.end(); 72 | }); 73 | 74 | const responseMessage = response.choices[0].message; 75 | 76 | // Step 2: check if GPT wanted to call a function 77 | if (responseMessage.function_call) { 78 | // Step 3: call the function 79 | const availableFunctions = { 80 | get_current_weather: get_current_weather, 81 | }; 82 | const functionName = responseMessage.function_call.name; 83 | const functionToCall = availableFunctions[functionName]; 84 | const functionArgs = JSON.parse(responseMessage.function_call.arguments); 85 | const functionResponse = functionToCall( 86 | functionArgs.location, 87 | functionArgs.unit 88 | ); 89 | 90 | // Step 4: send the info on the function call and function response to GPT 91 | messages.push(responseMessage); // extend conversation with assistant's reply 92 | messages.push({ 93 | role: "function", 94 | name: functionName, 95 | content: functionResponse, 96 | }); // extend conversation with function response 97 | 98 | const secondRequestData = JSON.stringify({ 99 | model: "gpt-3.5-turbo", 100 | messages: messages, 101 | }); 102 | 103 | const secondResponse = await new Promise((resolve, reject) => { 104 | const req = https.request(options, (res) => { 105 | let data = ""; 106 | 107 | res.on("data", (chunk) => { 108 | data += chunk; 109 | }); 110 | 111 | res.on("end", () => { 112 | resolve(JSON.parse(data)); 113 | }); 114 | }); 115 | 116 | req.on("error", (error) => { 117 | reject(error); 118 | }); 119 | 120 | req.write(secondRequestData); 121 | req.end(); 122 | }); 123 | return secondResponse; 124 | } 125 | } 126 | 127 | runConversation() 128 | .then((response) => { 129 | console.log(response.choices[0].message.content); 130 | }) 131 | .catch((error) => { 132 | console.error(error); 133 | }); 134 | -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | const https = require("https"); 2 | 3 | function getCurrentWeather(location, unit = "fahrenheit") { 4 | let weather_info = { 5 | location: location, 6 | temperature: "unknown", 7 | unit: unit, 8 | }; 9 | 10 | if (location.toLowerCase().includes("tokyo")) { 11 | weather_info = { location: "Tokyo", temperature: "10", unit: "celsius" }; 12 | } else if (location.toLowerCase().includes("san francisco")) { 13 | weather_info = { 14 | location: "San Francisco", 15 | temperature: "72", 16 | unit: "fahrenheit", 17 | }; 18 | } else if (location.toLowerCase().includes("paris")) { 19 | weather_info = { location: "Paris", temperature: "22", unit: "fahrenheit" }; 20 | } 21 | 22 | return JSON.stringify(weather_info); 23 | } 24 | async function runConversation() { 25 | const messages = [ 26 | { 27 | role: "user", 28 | content: "What's the weather like in San Francisco, Tokyo, and Paris?", 29 | }, 30 | ]; 31 | const tools = [ 32 | { 33 | type: "function", 34 | function: { 35 | name: "get_current_weather", 36 | description: "Get the current weather in a given location", 37 | parameters: { 38 | type: "object", 39 | properties: { 40 | location: { 41 | type: "string", 42 | description: "The city and state, e.g. San Francisco, CA", 43 | }, 44 | unit: { type: "string", enum: ["celsius", "fahrenheit"] }, 45 | }, 46 | required: ["location"], 47 | }, 48 | }, 49 | }, 50 | ]; 51 | 52 | const requestData = JSON.stringify({ 53 | model: "gpt-3.5-turbo", 54 | messages: messages, 55 | tools: tools, 56 | tool_choice: "auto", 57 | }); 58 | 59 | const options = { 60 | hostname: "api.openai.com", 61 | path: "/v1/chat/completions", 62 | method: "POST", 63 | headers: { 64 | "Content-Type": "application/json", 65 | Authorization: "Bearer sk-xxxxxxxxx", // Replace with your OpenAI API key 66 | }, 67 | }; 68 | 69 | const response = await new Promise((resolve, reject) => { 70 | const req = https.request(options, (res) => { 71 | let data = ""; 72 | 73 | res.on("data", (chunk) => { 74 | data += chunk; 75 | }); 76 | 77 | res.on("end", () => { 78 | resolve(JSON.parse(data)); 79 | }); 80 | }); 81 | 82 | req.on("error", (error) => { 83 | reject(error); 84 | }); 85 | 86 | req.write(requestData); 87 | req.end(); 88 | }); 89 | 90 | const responseMessage = response.choices[0].message; 91 | 92 | if (responseMessage.tool_calls) { 93 | const toolCalls = responseMessage.tool_calls; 94 | const availableFunctions = { 95 | get_current_weather: getCurrentWeather, 96 | }; 97 | messages.push(responseMessage); 98 | 99 | const functionResponses = await Promise.all( 100 | toolCalls.map(async (toolCall) => { 101 | const functionName = toolCall.function.name; 102 | const functionArgs = JSON.parse(toolCall.function.arguments); 103 | const functionToCall = availableFunctions[functionName]; 104 | const functionResponse = functionToCall( 105 | functionArgs.location, 106 | functionArgs.unit 107 | ); 108 | return { 109 | tool_call_id: toolCall.id, 110 | role: "tool", 111 | name: functionName, 112 | content: functionResponse, 113 | }; 114 | }) 115 | ); 116 | 117 | messages.push(...functionResponses); 118 | 119 | const secondRequestData = JSON.stringify({ 120 | model: "gpt-3.5-turbo", 121 | messages: messages, 122 | }); 123 | 124 | const secondResponse = await new Promise((resolve, reject) => { 125 | const req = https.request(options, (res) => { 126 | let data = ""; 127 | 128 | res.on("data", (chunk) => { 129 | data += chunk; 130 | }); 131 | 132 | res.on("end", () => { 133 | resolve(JSON.parse(data)); 134 | }); 135 | }); 136 | 137 | req.on("error", (error) => { 138 | reject(error); 139 | }); 140 | 141 | req.write(secondRequestData); 142 | req.end(); 143 | }); 144 | 145 | return secondResponse; 146 | } 147 | } 148 | 149 | runConversation() 150 | .then((response) => { 151 | const messageContent = response.choices[0].message.content; 152 | console.log(messageContent); 153 | }) 154 | .catch((error) => { 155 | console.error(error); 156 | }); 157 | --------------------------------------------------------------------------------