├── .coda-pack.json ├── .gitignore ├── .prettierrc ├── LICENSE.md ├── README.md ├── pack.ts ├── package-lock.json ├── package.json ├── renovate.json └── tsconfig.json /.coda-pack.json: -------------------------------------------------------------------------------- 1 | { 2 | "packId": 16414 3 | } 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .coda.json 2 | .coda-credentials.json 3 | node_modules/ 4 | test-urls 5 | patches/ 6 | -------------------------------------------------------------------------------- /.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "trailingComma": "all", 3 | "printWidth": 120, 4 | "singleQuote": true, 5 | "bracketSpacing": false, 6 | "arrowParens": "avoid" 7 | } 8 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Coda 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OpenAI Pack 2 | 3 | See the full pack details on the gallery page: https://coda.io/packs/openai-16414 4 | -------------------------------------------------------------------------------- /pack.ts: -------------------------------------------------------------------------------- 1 | // PLEASE READ BEFORE CONTRIBUTING 2 | // We want to keep Pack code here so that it's available via the Source Code tab in the listing page. 3 | // However, we want to keep it in sync with the https://github.com/coda/openai-pack repository. 4 | // Please copy the changes you make here to the repository and verify diffs are only what you did, 5 | // otherwise raise in #story-openai-pack 6 | 7 | import * as coda from '@codahq/packs-sdk'; 8 | 9 | export const pack = coda.newPack(); 10 | 11 | const DEFAULT_MODEL = 'gpt-3.5-turbo-instruct'; 12 | 13 | pack.setUserAuthentication({ 14 | type: coda.AuthenticationType.HeaderBearerToken, 15 | instructionsUrl: 'https://platform.openai.com/account/api-keys', 16 | }); 17 | 18 | pack.addNetworkDomain('openai.com'); 19 | 20 | interface CompletionsRequest { 21 | model: string; 22 | prompt: string; 23 | max_tokens?: number; 24 | temperature?: number; 25 | stop?: string[]; 26 | } 27 | 28 | interface ChatCompletionMessage { 29 | role: 'system' | 'user'; 30 | content: string; 31 | } 32 | 33 | interface ChatCompletionRequest { 34 | model: string; 35 | messages: ChatCompletionMessage[]; 36 | max_tokens?: number; 37 | temperature?: number; 38 | stop?: string[]; 39 | } 40 | 41 | function isChatCompletionModel(model: string): boolean { 42 | // Also works with snapshot model like `gpt-3.5-turbo-0301` & `gpt-4-0314` 43 | return model.includes('gpt-3.5-turbo') || model.includes('gpt-4'); 44 | } 45 | 46 | async function getChatCompletion(context: coda.ExecutionContext, request: ChatCompletionRequest): Promise { 47 | const resp = await context.fetcher.fetch({ 48 | url: 'https://api.openai.com/v1/chat/completions', 49 | method: 'POST', 50 | body: JSON.stringify(request), 51 | headers: {'Content-Type': 'application/json'}, 52 | }); 53 | return resp.body.choices[0].message.content.trim(); 54 | } 55 | 56 | async function getCompletion(context: coda.ExecutionContext, request: CompletionsRequest): Promise { 57 | try { 58 | // Call Chat Completion API if the model is a chat completion model. 59 | if (isChatCompletionModel(request.model)) { 60 | return getChatCompletion(context, { 61 | model: request.model, 62 | max_tokens: request.max_tokens, 63 | temperature: request.temperature, 64 | messages: [{role: 'user', content: request.prompt}], 65 | }); 66 | } 67 | 68 | const resp = await context.fetcher.fetch({ 69 | url: 'https://api.openai.com/v1/completions', 70 | method: 'POST', 71 | body: JSON.stringify(request), 72 | headers: {'Content-Type': 'application/json'}, 73 | }); 74 | return resp.body.choices[0].text.trim(); 75 | } catch (err: any) { 76 | if (err.statusCode === 429 && err.type === 'insufficient_quota') { 77 | throw new coda.UserVisibleError( 78 | "You've exceed your current OpenAI API quota. Please check your plan and billing details. For help, see https://help.openai.com/en/articles/6891831-error-code-429-you-exceeded-your-current-quota-please-check-your-plan-and-billing-details", 79 | ); 80 | } 81 | 82 | throw err; 83 | } 84 | } 85 | 86 | const promptParam = coda.makeParameter({ 87 | type: coda.ParameterType.String, 88 | name: 'prompt', 89 | description: 'prompt', 90 | }); 91 | 92 | const modelParameter = coda.makeParameter({ 93 | type: coda.ParameterType.String, 94 | name: 'model', 95 | description: 96 | "the GPT-3 model to process your request. If you don't specify a model, it defaults to gpt-3.5-turbo-instruct, which is the fastest and lowest cost. For higher quality generation, consider gpt-4. For more information, see https://platform.openai.com/docs/models/overview.", 97 | optional: true, 98 | autocomplete: async () => { 99 | return [ 100 | 'gpt-3.5-turbo', 101 | 'gpt-3.5-turbo-instruct', 102 | 'gpt-3.5-turbo-16k', 103 | 'gpt-4', 104 | 'gpt-4-32k', 105 | ]; 106 | }, 107 | }); 108 | 109 | const numTokensParam = coda.makeParameter({ 110 | type: coda.ParameterType.Number, 111 | name: 'numTokens', 112 | description: 113 | 'the maximum number of tokens for the completion to output. Defaults to 512. Maximum of 2048 for most models and 4000 for davinci', 114 | optional: true, 115 | }); 116 | 117 | const temperatureParam = coda.makeParameter({ 118 | type: coda.ParameterType.Number, 119 | name: 'temperature', 120 | description: 121 | 'the temperature for how creative GPT-3 is with the completion. Must be between 0.0 and 1.0. Defaults to 1.0.', 122 | optional: true, 123 | }); 124 | 125 | const systemPromptParam = coda.makeParameter({ 126 | type: coda.ParameterType.String, 127 | name: 'systemPrompt', 128 | description: "Optional. Helps define the behavior of the assistant. e.g. 'You are a helpful assistant.'", 129 | optional: true, 130 | }); 131 | 132 | const stopParam = coda.makeParameter({ 133 | type: coda.ParameterType.StringArray, 134 | name: 'stop', 135 | description: 'Optional. Up to 4 sequences where the API will stop generating further tokens.', 136 | optional: true, 137 | }); 138 | 139 | const commonPromptParams = { 140 | parameters: [promptParam, modelParameter, numTokensParam, temperatureParam, stopParam], 141 | resultType: coda.ValueType.String, 142 | onError: handleError, 143 | execute: async function ([prompt, model = DEFAULT_MODEL, max_tokens = 512, temperature, stop], context) { 144 | if (prompt.length === 0) { 145 | return ''; 146 | } 147 | 148 | const request = { 149 | model, 150 | prompt, 151 | max_tokens, 152 | temperature, 153 | stop, 154 | }; 155 | 156 | const result = await getCompletion(context, request); 157 | return result; 158 | }, 159 | }; 160 | 161 | pack.addFormula({ 162 | name: 'ChatCompletion', 163 | description: 164 | 'Takes prompt as input, and return a model-generated message as output. Optionally, you can provide a system message to control the behavior of the chatbot.', 165 | parameters: [promptParam, systemPromptParam, modelParameter, numTokensParam, temperatureParam, stopParam], 166 | resultType: coda.ValueType.String, 167 | onError: handleError, 168 | execute: async function ( 169 | [userPrompt, systemPrompt, model = 'gpt-3.5-turbo', maxTokens = 512, temperature, stop], 170 | context, 171 | ) { 172 | coda.assertCondition(isChatCompletionModel(model), 'Must use `gpt-3.5-turbo`-related models for this formula.'); 173 | 174 | if (userPrompt.length === 0) { 175 | return ''; 176 | } 177 | 178 | const messages: ChatCompletionMessage[] = []; 179 | 180 | if (systemPrompt && systemPrompt.length > 0) { 181 | messages.push({role: 'system', content: systemPrompt}); 182 | } 183 | 184 | messages.push({role: 'user', content: userPrompt}); 185 | 186 | const request = { 187 | model, 188 | messages, 189 | max_tokens: maxTokens, 190 | temperature, 191 | stop, 192 | }; 193 | 194 | const result = await getChatCompletion(context, request); 195 | 196 | return result; 197 | }, 198 | }); 199 | 200 | pack.addFormula({ 201 | name: 'GPT3Prompt', 202 | description: 'Complete text from a prompt', 203 | ...commonPromptParams, 204 | isExperimental: true, 205 | } as any); 206 | 207 | pack.addFormula({ 208 | name: 'Prompt', 209 | description: 'Complete text from a prompt', 210 | ...commonPromptParams, 211 | } as any); 212 | 213 | pack.addFormula({ 214 | name: 'AnswerPrompt', 215 | description: 216 | 'Complete text from a prompt, outputs the result from the action. This should only be used in a table in combination with outputting the result to a result column; otherwise, it takes no effect.', 217 | ...commonPromptParams, 218 | isAction: true, 219 | } as any); 220 | 221 | pack.addFormula({ 222 | name: 'GPT3PromptExamples', 223 | description: 'Complete text from a prompt and a set of examples', 224 | parameters: [ 225 | coda.makeParameter({ 226 | type: coda.ParameterType.String, 227 | name: 'prompt', 228 | description: 'prompt', 229 | }), 230 | coda.makeParameter({ 231 | type: coda.ParameterType.StringArray, 232 | name: 'trainingPrompts', 233 | description: 'Example prompts. Should be the same length as `trainingResponses`', 234 | }), 235 | coda.makeParameter({ 236 | type: coda.ParameterType.StringArray, 237 | name: 'trainingResponses', 238 | description: 'Example responses corresponding to `trainingPrompts`. Should be the same length.', 239 | }), 240 | modelParameter, 241 | numTokensParam, 242 | temperatureParam, 243 | stopParam, 244 | ], 245 | resultType: coda.ValueType.String, 246 | onError: handleError, 247 | execute: async function ( 248 | [prompt, trainingPrompts, trainingResponses, model = DEFAULT_MODEL, max_tokens = 512, temperature, stop], 249 | context, 250 | ) { 251 | coda.assertCondition( 252 | trainingPrompts.length === trainingResponses.length, 253 | 'Must have same number of example prompts as example responses', 254 | ); 255 | if (prompt.length === 0) { 256 | return ''; 257 | } 258 | coda.assertCondition(trainingResponses.length > 0, 'Please provide some training responses'); 259 | 260 | const exampleData = trainingPrompts.map((promptEx, i) => `${promptEx}\n${trainingResponses[i]}`).join('```'); 261 | 262 | const request = { 263 | model, 264 | prompt: exampleData + '```' + prompt + '\n', 265 | max_tokens, 266 | temperature, 267 | stop, 268 | }; 269 | 270 | const result = await getCompletion(context, request); 271 | 272 | return result; 273 | }, 274 | }); 275 | 276 | pack.addFormula({ 277 | name: 'QuestionAnswer', 278 | description: 'Answer a question, simply provide a natural language question that you might ask Google or Wikipedia', 279 | parameters: [promptParam, modelParameter, numTokensParam, temperatureParam, stopParam], 280 | resultType: coda.ValueType.String, 281 | onError: handleError, 282 | execute: async function ([prompt, model = DEFAULT_MODEL, max_tokens = 128, temperature, stop], context) { 283 | if (prompt.length === 0) { 284 | return ''; 285 | } 286 | 287 | const newPrompt = `I am a highly intelligent question answering bot. If you ask me a question that is rooted in truth, I will give you the answer. If you ask me a question that is nonsense, trickery, or has no clear answer, I will respond with "Unknown". 288 | 289 | Q: What is human life expectancy in the United States? 290 | A: Human life expectancy in the United States is 78 years. 291 | 292 | Q: Who was president of the United States in 1955? 293 | A: Dwight D. Eisenhower was president of the United States in 1955. 294 | 295 | Q: Which party did he belong to? 296 | A: He belonged to the Republican Party. 297 | 298 | Q: What is the square root of banana? 299 | A: Unknown 300 | 301 | Q: How does a telescope work? 302 | A: Telescopes use lenses or mirrors to focus light and make objects appear closer. 303 | 304 | Q: Where were the 1992 Olympics held? 305 | A: The 1992 Olympics were held in Barcelona, Spain. 306 | 307 | Q: How many squigs are in a bonk? 308 | A: Unknown 309 | 310 | Q: ${prompt} 311 | A: `; 312 | 313 | const request = { 314 | model, 315 | prompt: newPrompt, 316 | max_tokens, 317 | temperature, 318 | stop, 319 | }; 320 | 321 | const result = await getCompletion(context, request); 322 | 323 | return result; 324 | }, 325 | }); 326 | 327 | pack.addFormula({ 328 | name: 'Summarize', 329 | description: 'Summarize a large chunk of text', 330 | parameters: [promptParam, modelParameter, numTokensParam, temperatureParam, stopParam], 331 | resultType: coda.ValueType.String, 332 | onError: handleError, 333 | execute: async function ([prompt, model = DEFAULT_MODEL, max_tokens = 64, temperature, stop], context) { 334 | if (prompt.length === 0) { 335 | return ''; 336 | } 337 | 338 | const newPrompt = `${prompt}\ntldr;\n`; 339 | 340 | const request = { 341 | model, 342 | prompt: newPrompt, 343 | max_tokens, 344 | temperature, 345 | stop, 346 | }; 347 | 348 | const result = await getCompletion(context, request); 349 | 350 | return result; 351 | }, 352 | }); 353 | 354 | pack.addFormula({ 355 | name: 'Keywords', 356 | description: 'Extract keywords from a large chunk of text', 357 | parameters: [promptParam, modelParameter, numTokensParam, temperatureParam, stopParam], 358 | resultType: coda.ValueType.String, 359 | onError: handleError, 360 | execute: async function ([prompt, model = DEFAULT_MODEL, max_tokens = 64, temperature, stop], context) { 361 | if (prompt.length === 0) { 362 | return ''; 363 | } 364 | 365 | const newPrompt = `Extract keywords from this text: 366 | ${prompt}`; 367 | 368 | const request = { 369 | model, 370 | prompt: newPrompt, 371 | max_tokens, 372 | temperature, 373 | stop, 374 | }; 375 | 376 | const result = await getCompletion(context, request); 377 | 378 | return result; 379 | }, 380 | }); 381 | 382 | pack.addFormula({ 383 | name: 'MoodToColor', 384 | description: 'Generate a color for a mood', 385 | parameters: [promptParam, modelParameter, numTokensParam, temperatureParam, stopParam], 386 | resultType: coda.ValueType.String, 387 | onError: handleError, 388 | execute: async function ([prompt, model = DEFAULT_MODEL, max_tokens = 6, temperature, stop], context) { 389 | if (prompt.length === 0) { 390 | return ''; 391 | } 392 | 393 | const newPrompt = `The css code for a color like ${prompt}: 394 | background-color: #`; 395 | 396 | const request = { 397 | model, 398 | prompt: newPrompt, 399 | max_tokens, 400 | temperature, 401 | stop, 402 | }; 403 | 404 | const result = await getCompletion(context, request); 405 | 406 | return result; 407 | }, 408 | }); 409 | 410 | pack.addFormula({ 411 | name: 'SentimentClassifier', 412 | description: 'Categorizes sentiment of text into positive, neutral, or negative', 413 | parameters: [promptParam, modelParameter, numTokensParam, temperatureParam, stopParam], 414 | resultType: coda.ValueType.String, 415 | onError: handleError, 416 | execute: async function ([prompt, model = DEFAULT_MODEL, max_tokens = 20, temperature, stop], context) { 417 | if (prompt.length === 0) { 418 | return ''; 419 | } 420 | 421 | const newPrompt = `Decide whether the text's sentiment is positive, neutral, or negative. 422 | Text: ${prompt} 423 | Sentiment: `; 424 | 425 | const request = { 426 | model, 427 | prompt: newPrompt, 428 | max_tokens, 429 | temperature, 430 | stop, 431 | }; 432 | 433 | const result = await getCompletion(context, request); 434 | 435 | return result; 436 | }, 437 | }); 438 | 439 | const styleParameter = coda.makeParameter({ 440 | type: coda.ParameterType.String, 441 | name: 'style', 442 | description: 443 | "the style to use for your image. If you provide this, you don't need to specify the style in the prompt", 444 | optional: true, 445 | autocomplete: async () => { 446 | return Object.keys(StyleNameToPrompt); 447 | }, 448 | }); 449 | 450 | const StyleNameToPrompt = { 451 | 'Cave wall': 'drawn on a cave wall', 452 | Basquiat: 'in the style of Basquiat', 453 | 'Digital art': 'as digital art', 454 | Photorealistic: 'in a photorealistic style', 455 | 'Andy Warhol': 'in the style of Andy Warhol', 456 | 'Pencil drawing': 'as a pencil drawing', 457 | '1990s Saturday morning cartoon': 'as a 1990s Saturday morning cartoon', 458 | Steampunk: 'in a steampunk style', 459 | Solarpunk: 'in a solarpunk style', 460 | 'Studio Ghibli': 'in the style of Studio Ghibli', 461 | 'Movie poster': 'as a movie poster', 462 | 'Book cover': 'as a book cover', 463 | 'Album cover': 'as an album cover', 464 | '3D Icon': 'as a 3D icon', 465 | 'Ukiyo-e': 'in the style of Ukiyo-e', 466 | }; 467 | 468 | pack.addFormula({ 469 | name: 'CreateDalleImage', 470 | description: 'Create image from prompt', 471 | cacheTtlSecs: 60 * 60, 472 | parameters: [ 473 | coda.makeParameter({ 474 | type: coda.ParameterType.String, 475 | name: 'prompt', 476 | description: 'prompt', 477 | }), 478 | coda.makeParameter({ 479 | type: coda.ParameterType.String, 480 | name: 'size', 481 | description: 'size', 482 | optional: true, 483 | autocomplete: async () => { 484 | return ['256x256', '512x512', '1024x1024']; 485 | }, 486 | }), 487 | styleParameter, 488 | coda.makeParameter({ 489 | type: coda.ParameterType.Boolean, 490 | name: 'temporaryUrl', 491 | description: 'Return a temporary URL that expires after an hour. Useful for adding the image to an Image column, because the default data URIs are too long.', 492 | optional: true, 493 | }), 494 | ], 495 | resultType: coda.ValueType.String, 496 | codaType: coda.ValueHintType.ImageReference, 497 | onError: handleError, 498 | execute: async function ([prompt, size = '512x512', style, temporaryUrl], context) { 499 | if (prompt.length === 0) { 500 | return ''; 501 | } 502 | 503 | const request = { 504 | size, 505 | prompt: style ? prompt + ' ' + StyleNameToPrompt[style] ?? style : prompt, 506 | response_format: temporaryUrl ? 'url' : 'b64_json', 507 | }; 508 | 509 | const resp = await context.fetcher.fetch({ 510 | url: 'https://api.openai.com/v1/images/generations', 511 | method: 'POST', 512 | body: JSON.stringify(request), 513 | headers: {'Content-Type': 'application/json'}, 514 | }); 515 | if (temporaryUrl) { 516 | return resp.body.data[0].url; 517 | } else { 518 | return `data:image/png;base64,${resp.body.data[0].b64_json}`; 519 | } 520 | }, 521 | }); 522 | 523 | function handleError(error: Error) { 524 | if (coda.StatusCodeError.isStatusCodeError(error)) { 525 | // Cast the error as a StatusCodeError, for better intellisense. 526 | let statusError = error as coda.StatusCodeError; 527 | let message = statusError.body?.error?.message; 528 | 529 | // If the API returned a 400 error with message, show it to the user. 530 | if (statusError.statusCode === 400 && message) { 531 | if (message) { 532 | throw new coda.UserVisibleError(message); 533 | } 534 | } 535 | } 536 | // The request failed for some other reason. Re-throw the error so that it 537 | // bubbles up. 538 | throw error; 539 | } -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": { 3 | "@codahq/packs-sdk": "^1.2.0" 4 | }, 5 | "devDependencies": { 6 | "@types/chai": "^4.3.4", 7 | "@types/chai-as-promised": "^7.1.5", 8 | "@types/mocha": "^10.0.0", 9 | "@types/node": "^18.11.9", 10 | "@types/sinon": "^10.0.13", 11 | "@typescript-eslint/eslint-plugin": "^5.38.1", 12 | "@typescript-eslint/experimental-utils": "^5.38.1", 13 | "@typescript-eslint/parser": "^5.38.1", 14 | "chai": "^4.3.7", 15 | "chai-as-promised": "^7.1.1", 16 | "eslint": "^8.24.0", 17 | "eslint-plugin-ban": "^1.6.0", 18 | "eslint-plugin-filenames": "^1.3.2", 19 | "eslint-plugin-local": "^1.0.0", 20 | "eslint-plugin-prefer-let": "^3.0.1", 21 | "json-schema": "^0.4.0", 22 | "mocha": "^10.1.0", 23 | "sinon": "^14.0.2", 24 | "ts-node": "^10.9.1", 25 | "typescript": "^4.8.4" 26 | }, 27 | "scripts": { 28 | "postinstall": "npx patch-package" 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:base" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "lib": ["es2020"] 4 | } 5 | } 6 | --------------------------------------------------------------------------------