├── .github └── workflows │ └── test.yml ├── .gitignore ├── CHANGELOG.md ├── LICENSE ├── README.md ├── biome.json ├── build.ts ├── bun.lock ├── docs ├── chat-streaming.md └── chat-structured-outputs.md ├── examples ├── anthropic-chat.ts ├── anthropic-models.ts ├── deepseek-chat.ts ├── deepseek-models.ts ├── fal-image-stream.ts ├── fal-image.ts ├── fireworks-chat.ts ├── google-chat-stream.ts ├── google-chat.ts ├── groq-chat.ts ├── ollama-chat.ts ├── ollama-embedding.ts ├── ollama-models.ts ├── openai-chat-json-schema-stream.ts ├── openai-chat-json-schema.ts ├── openai-chat-load.ts ├── openai-chat-stream.ts ├── openai-chat-tool-stream.ts ├── openai-chat-tool.ts ├── openai-chat-vision-stream.ts ├── openai-chat-vision.ts ├── openai-chat.ts ├── openai-embedding.ts ├── openai-image.ts ├── openai-models.ts ├── perplexity-chat.ts ├── together-chat.ts └── voyage-embedding.ts ├── package.json ├── src ├── index.ts ├── jobs │ ├── builder.ts │ ├── chat │ │ ├── builder.ts │ │ ├── index.ts │ │ ├── schema.ts │ │ ├── tool.ts │ │ └── utils.ts │ ├── embedding │ │ ├── builder.ts │ │ ├── index.ts │ │ └── schema.ts │ ├── image │ │ ├── builder.ts │ │ ├── index.ts │ │ └── schema.ts │ ├── load.ts │ ├── models │ │ ├── builder.ts │ │ ├── index.ts │ │ └── schema.ts │ ├── schema.ts │ └── stream.ts └── providers │ ├── anthropic │ ├── chat.ts │ ├── index.ts │ ├── models.ts │ └── schema.ts │ ├── deepseek │ └── index.ts │ ├── fal │ ├── image.ts │ ├── index.ts │ └── schema.ts │ ├── fireworks │ └── index.ts │ ├── google │ ├── chat.ts │ ├── index.ts │ └── schema.ts │ ├── luma │ └── index.ts │ ├── ollama │ ├── chat.ts │ ├── embedding.ts │ ├── index.ts │ ├── models.ts │ └── schema.ts │ ├── openai │ ├── chat.ts │ ├── embedding.ts │ ├── image.ts │ ├── index.ts │ ├── models.ts │ └── schema.ts │ ├── together │ └── index.ts │ └── voyage │ └── index.ts ├── test ├── __snapshots__ │ ├── chat.test.ts.snap │ ├── embedding.test.ts.snap │ ├── image.test.ts.snap │ ├── models.snap │ └── models.test.ts.snap ├── chat.test.ts ├── embedding.test.ts ├── image.test.ts ├── models.test.ts └── utils.ts └── tsconfig.json /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | pull_request: 9 | branches: 10 | - main 11 | 12 | jobs: 13 | test: 14 | runs-on: ${{ matrix.os }} 15 | 16 | strategy: 17 | matrix: 18 | os: [ubuntu-latest, macos-latest] 19 | fail-fast: false 20 | 21 | steps: 22 | - id: checkout 23 | name: Checkout 24 | uses: actions/checkout@v3 25 | - id: setup-bun 26 | name: Setup Bun 27 | uses: oven-sh/setup-bun@v1 28 | with: 29 | bun-version: latest 30 | - id: install-deps 31 | name: Install dependencies 32 | run: | 33 | bun install 34 | # - id: tsc 35 | # run: bunx tsc 36 | - id: build 37 | name: Run build 38 | run: | 39 | bun run build 40 | - id: test 41 | name: Run test 42 | run: | 43 | bun test 44 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | lerna-debug.log* 8 | .pnpm-debug.log* 9 | .vscode 10 | # Diagnostic reports (https://nodejs.org/api/report.html) 11 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 12 | 13 | # Runtime data 14 | pids 15 | *.pid 16 | *.seed 17 | *.pid.lock 18 | 19 | # Directory for instrumented libs generated by jscoverage/JSCover 20 | lib-cov 21 | 22 | # Coverage directory used by tools like istanbul 23 | coverage 24 | *.lcov 25 | 26 | # nyc test coverage 27 | .nyc_output 28 | 29 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 30 | .grunt 31 | 32 | # Bower dependency directory (https://bower.io/) 33 | bower_components 34 | 35 | # node-waf configuration 36 | .lock-wscript 37 | 38 | # Compiled binary addons (https://nodejs.org/api/addons.html) 39 | build/Release 40 | 41 | # Dependency directories 42 | node_modules/ 43 | jspm_packages/ 44 | 45 | # Snowpack dependency directory (https://snowpack.dev/) 46 | web_modules/ 47 | 48 | # TypeScript cache 49 | *.tsbuildinfo 50 | 51 | # Optional npm cache directory 52 | .npm 53 | 54 | # Optional eslint cache 55 | .eslintcache 56 | 57 | # Optional stylelint cache 58 | .stylelintcache 59 | 60 | # Microbundle cache 61 | .rpt2_cache/ 62 | .rts2_cache_cjs/ 63 | .rts2_cache_es/ 64 | .rts2_cache_umd/ 65 | 66 | # Optional REPL history 67 | .node_repl_history 68 | 69 | # Output of 'npm pack' 70 | *.tgz 71 | 72 | # Yarn Integrity file 73 | .yarn-integrity 74 | 75 | # dotenv environment variable files 76 | .env 77 | .env.development.local 78 | .env.test.local 79 | .env.production.local 80 | .env.local 81 | 82 | # parcel-bundler cache (https://parceljs.org/) 83 | .cache 84 | .parcel-cache 85 | 86 | # Next.js build output 87 | .next 88 | out 89 | 90 | # Nuxt.js build / generate output 91 | .nuxt 92 | dist 93 | 94 | # Gatsby files 95 | .cache/ 96 | # Comment in the public line in if your project uses Gatsby and not Next.js 97 | # https://nextjs.org/blog/next-9-1#public-directory-support 98 | # public 99 | 100 | # vuepress build output 101 | .vuepress/dist 102 | 103 | # vuepress v2.x temp and cache directory 104 | .temp 105 | .cache 106 | 107 | # Docusaurus cache and generated files 108 | .docusaurus 109 | 110 | # Serverless directories 111 | .serverless/ 112 | 113 | # FuseBox cache 114 | .fusebox/ 115 | 116 | # DynamoDB Local files 117 | .dynamodb/ 118 | 119 | # TernJS port file 120 | .tern-port 121 | 122 | # Stores VSCode versions used for testing VSCode extensions 123 | .vscode-test 124 | 125 | # yarn v2 126 | .yarn/cache 127 | .yarn/unplugged 128 | .yarn/build-state.yml 129 | .yarn/install-state.gz 130 | .pnp.* 131 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # CHANGELOG 2 | 3 | ## Unreleased 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # fluent-ai 2 | 3 | [![NPM Version](https://img.shields.io/npm/v/fluent-ai)](http://npmjs.com/fluent-ai) 4 | [![GitHub Actions Workflow Status](https://img.shields.io/github/actions/workflow/status/modalityml/fluent-ai/test.yml)](https://github.com/modalityml/fluent-ai/actions/workflows/test.yml) 5 | 6 | > [!WARNING] 7 | > This project is in beta. The API is subject to changes and may break. 8 | 9 | fluent-ai is a lightweight, type-safe AI toolkit that seamlessly integrates multiple AI providers. It features structured outputs, streaming capabilities, and job serialization support. 10 | 11 | ## Installation 12 | 13 | [Zod](https://zod.dev/) is a popular type of validation library for TypeScript and JavaScript that allows developers to define and validate data schemas in a concise and type-safe manner. fluent-ai is built upon zod. 14 | 15 | ```sh 16 | npm install fluent-ai zod@next 17 | ``` 18 | 19 | ## AI Service provider support 20 | 21 | fluent-ai includes support for multiple AI providers and modalities. 22 | 23 | | provider | chat completion | embedding | image generation | list models | 24 | | --------- | ------------------ | ------------------ | ------------------ | ------------------ | 25 | | anthropic | :white_check_mark: | | | :white_check_mark: | 26 | | fal | | | :white_check_mark: | | 27 | | google | :white_check_mark: | | | | 28 | | ollama | :white_check_mark: | :white_check_mark: | | :white_check_mark: | 29 | | openai | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | 30 | | voyage | | :white_check_mark: | | | 31 | 32 | By default, API keys for providers are read from environment variable (`process.env`) following the format `_API_KEY` (e.g., `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`). 33 | 34 | You can also initialize a provider with manual API key settings: 35 | 36 | ```ts 37 | import { openai } from "fluent-ai"; 38 | 39 | openai({ apiKey: "" }); 40 | ``` 41 | 42 | For more examples with different AI providers, check out the [examples](/examples/) directory. 43 | 44 | Don't see your AI providers? Feel free to [open an issue](https://github.com/modalityml/fluent-ai/issues) or [start a discussion](https://github.com/modalityml/fluent-ai/discussions) to request support. [Join our Discord community](https://discord.gg/HzGZWbY8Fx) 45 | 46 | ## Job API 47 | 48 | Each request to AI providers is wrapped in a `Job`. which can also serialized and deserialized. A [fluent](https://en.wikipedia.org/wiki/Fluent_interface) API with method chaining help create jobs easily. 49 | 50 | ### Method chaining 51 | 52 | ```ts 53 | import { openai, user } from "fluent-ai"; 54 | 55 | const job = openai() 56 | .chat("gpt-4o-mini") 57 | .messages([user("Hi")]) 58 | .temperature(0.5) 59 | .maxTokens(1024); 60 | ``` 61 | 62 | ### Declaration 63 | 64 | Alternatively, fluent-ai supports declarative job creation using JSON objects, with full TypeScript autocompletion support. 65 | 66 | ```ts 67 | import { load } from "fluent-ai"; 68 | 69 | const job = load({ 70 | provider: "openai", 71 | type: "chat", 72 | input: { 73 | model: "gpt-4o-mini", 74 | messages: [{ role: "user", content: "hi" }], 75 | temperature: 0.5, 76 | }, 77 | }); 78 | ``` 79 | 80 | fluent-ai provides built-in TypeScript type definitions and schema validation for jobs: 81 | 82 | ```ts 83 | import { type Job } from "fluent-ai"; // TypeScript type 84 | import { JobSchema } from "fluent-ai"; // Zod schema 85 | import { jobJSONSchema } from "fluent-ai"; // JSON Schema 86 | ``` 87 | 88 | ### Job serialization and deserialization 89 | 90 | To serialize a job to a JSON object, use the `dump` method: 91 | 92 | ```ts 93 | const payload = job.dump(); 94 | ``` 95 | 96 | This allows you to save the job's state for later use, such as storing it in a queue or database. 97 | To recreate and execute a job from the JSON object, use the `load` function: 98 | 99 | ```ts 100 | import { load } from "fluent-ai"; 101 | 102 | const job = load(payload); 103 | await job.run(); 104 | ``` 105 | 106 | ## Chat completions 107 | 108 | Chat completion, such as ChatGPT, is the most common AI service. It generates responses in a conversational format based on given inputs, also knows as prompts. 109 | 110 | ### Text generation 111 | 112 | ```ts 113 | import { openai, system, user, text } from "fluent-ai"; 114 | 115 | const job = openai() 116 | .chat("gpt-4o-mini") 117 | .messages([system("You are a helpful assistant"), user("Hi")]); 118 | 119 | const result = await job.run(); 120 | console.log(text(result)); 121 | ``` 122 | 123 | ### Function calling (tool calling) 124 | 125 | Function calling (or tool calling) is an advanced functionality in chat completions that enhances their ability to interact with external systems and perform specific tasks. 126 | 127 | Here's how to create a tool: 128 | 129 | ```ts 130 | import { z } from "zod"; 131 | import { tool } from "fluent-ai"; 132 | 133 | const weatherTool = tool("get_current_weather") 134 | .description("Get the current weather in a given location") 135 | .parameters( 136 | z.object({ 137 | location: z.string(), 138 | unit: z.enum(["celsius", "fahrenheit"]).optional(), 139 | }) 140 | ); 141 | ``` 142 | 143 | To use the tool, add it to a chat job with a function-calling-enabled model, such as `gpt-4o-mini` from openai. 144 | 145 | ```ts 146 | const job = openai().chat("gpt-4o-mini").tool(weatherTool); 147 | 148 | await job.messages([user("What is the weather in San Francisco?")]).run(); 149 | ``` 150 | 151 | ### Streaming support 152 | 153 | Rather than waiting for the complete response, streaming enables the model to return portions of the response as they're generated. fluent-ai provides built-in streaming support for text, objects, and tools in chat models. 154 | 155 | ```ts 156 | const job = openai() 157 | .chat("gpt-4o-mini") 158 | .messages([system("You are a helpful assistant"), user("Hi")]) 159 | .stream(); 160 | 161 | for await (const event of await job.run()) { 162 | console.log(text(event)); 163 | } 164 | ``` 165 | 166 | fluent-ai supports streaming text, object and tool calls on demand. For more details, see the [streaming docs](/docs/chat-streaming.md). 167 | 168 | ## Embedding 169 | 170 | ```ts 171 | import { openai } from "fluent-ai"; 172 | 173 | const job = openai().embedding("text-embedding-3-small").value("hello"); 174 | const result = await job.run(); 175 | ``` 176 | 177 | ## Image generation 178 | 179 | ```ts 180 | import { openai } from "fluent-ai"; 181 | 182 | const job = openai().image("dalle-2").prompt("a cat").n(1).size("512x512"); 183 | const result = await job.run(); 184 | ``` 185 | 186 | ## List models 187 | 188 | fluent-ai provides an easy way to retrieve all available models from supported providers (openai, anthropic, ollama). 189 | 190 | ```ts 191 | import { openai } from "fluent-ai"; 192 | 193 | const models = await openai().models().run(); 194 | ``` 195 | 196 | ## Support 197 | 198 | Feel free to [open an issue](https://github.com/modalityml/fluent-ai/issues) or [start a discussion](https://github.com/modalityml/fluent-ai/discussions) if you have any questions. If you would like to request support for a new AI provider, please create an issue with details about the provider's API. [Join our Discord community](https://discord.gg/HzGZWbY8Fx) for help and updates. 199 | 200 | ## License 201 | 202 | fluent-ai is licensed under Apache 2.0 as found in the LICENSE file. 203 | -------------------------------------------------------------------------------- /biome.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://biomejs.dev/schemas/1.9.4/schema.json", 3 | "vcs": { 4 | "enabled": false, 5 | "clientKind": "git", 6 | "useIgnoreFile": false 7 | }, 8 | "files": { 9 | "ignoreUnknown": false, 10 | "ignore": [ 11 | "**/node_modules/**", 12 | "**/dist/**", 13 | "**/build/**" 14 | ] 15 | }, 16 | "formatter": { 17 | "enabled": true, 18 | "indentStyle": "space", 19 | "indentWidth": 2 20 | }, 21 | "organizeImports": { 22 | "enabled": true 23 | }, 24 | "linter": { 25 | "enabled": true, 26 | "rules": { 27 | "recommended": false, 28 | "suspicious": { 29 | "noExplicitAny": "off" 30 | }, 31 | "style": { 32 | "noNonNullAssertion": "off", 33 | "useImportType": "off" 34 | }, 35 | "correctness": { 36 | "useExhaustiveDependencies": "warn" 37 | } 38 | } 39 | }, 40 | "javascript": { 41 | "formatter": { 42 | "indentStyle": "space", 43 | "indentWidth": 2, 44 | "quoteStyle": "double", 45 | "arrowParentheses": "always", 46 | "bracketSameLine": false, 47 | "bracketSpacing": true, 48 | "jsxQuoteStyle": "double", 49 | "quoteProperties": "asNeeded", 50 | "semicolons": "always", 51 | "trailingCommas": "all" 52 | } 53 | }, 54 | "json": { 55 | "formatter": { 56 | "trailingCommas": "none" 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /build.ts: -------------------------------------------------------------------------------- 1 | import type { BuildConfig } from "bun"; 2 | import dts from "bun-plugin-dts"; 3 | 4 | const defaultBuildConfig: BuildConfig = { 5 | entrypoints: ["./src/index.ts"], 6 | outdir: "./dist", 7 | external: ["zod"], 8 | }; 9 | 10 | await Promise.all([ 11 | Bun.build({ 12 | ...defaultBuildConfig, 13 | plugins: [dts()], 14 | format: "esm", 15 | naming: "[dir]/[name].js", 16 | }), 17 | Bun.build({ 18 | ...defaultBuildConfig, 19 | format: "cjs", 20 | naming: "[dir]/[name].cjs", 21 | }), 22 | ]); 23 | -------------------------------------------------------------------------------- /bun.lock: -------------------------------------------------------------------------------- 1 | { 2 | "lockfileVersion": 1, 3 | "workspaces": { 4 | "": { 5 | "name": "fluent-ai", 6 | "dependencies": { 7 | "eventsource-parser": "^3.0.0", 8 | "partial-json": "^0.1.7", 9 | }, 10 | "devDependencies": { 11 | "@types/bun": "latest", 12 | "bun-plugin-dts": "^0.3.0", 13 | }, 14 | "peerDependencies": { 15 | "typescript": "^5.0.0", 16 | "zod": "^4.0.0-beta.20250418T202744", 17 | }, 18 | }, 19 | }, 20 | "packages": { 21 | "@types/bun": ["@types/bun@1.2.3", "", { "dependencies": { "bun-types": "1.2.3" } }, "sha512-054h79ipETRfjtsCW9qJK8Ipof67Pw9bodFWmkfkaUaRiIQ1dIV2VTlheshlBx3mpKr0KeK8VqnMMCtgN9rQtw=="], 22 | 23 | "@types/node": ["@types/node@22.13.5", "", { "dependencies": { "undici-types": "~6.20.0" } }, "sha512-+lTU0PxZXn0Dr1NBtC7Y8cR21AJr87dLLU953CWA6pMxxv/UDc7jYAY90upcrie1nRcD6XNG5HOYEDtgW5TxAg=="], 24 | 25 | "@types/ws": ["@types/ws@8.5.14", "", { "dependencies": { "@types/node": "*" } }, "sha512-bd/YFLW+URhBzMXurx7lWByOu+xzU9+kb3RboOteXYDfW+tr+JZa99OyNmPINEGB/ahzKrEuc8rcv4gnpJmxTw=="], 26 | 27 | "@zod/core": ["@zod/core@0.4.6", "", {}, "sha512-0IeEldTobOdkoJPyINVQgOHlgCWbpozSsDjdcd2+VcxKvf8T+SYLDVk7NKt0XJx3C0ImNXAXh3V3yIw46NJyQA=="], 28 | 29 | "ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="], 30 | 31 | "ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], 32 | 33 | "bun-plugin-dts": ["bun-plugin-dts@0.3.0", "", { "dependencies": { "common-path-prefix": "^3.0.0", "dts-bundle-generator": "^9.5.1", "get-tsconfig": "^4.8.1" } }, "sha512-QpiAOKfPcdOToxySOqRY8FwL+brTvyXEHWzrSCRKt4Pv7Z4pnUrhK9tFtM7Ndm7ED09B/0cGXnHJKqmekr/ERw=="], 34 | 35 | "bun-types": ["bun-types@1.2.3", "", { "dependencies": { "@types/node": "*", "@types/ws": "~8.5.10" } }, "sha512-P7AeyTseLKAvgaZqQrvp3RqFM3yN9PlcLuSTe7SoJOfZkER73mLdT2vEQi8U64S1YvM/ldcNiQjn0Sn7H9lGgg=="], 36 | 37 | "cliui": ["cliui@8.0.1", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.1", "wrap-ansi": "^7.0.0" } }, "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ=="], 38 | 39 | "color-convert": ["color-convert@2.0.1", "", { "dependencies": { "color-name": "~1.1.4" } }, "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ=="], 40 | 41 | "color-name": ["color-name@1.1.4", "", {}, "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="], 42 | 43 | "common-path-prefix": ["common-path-prefix@3.0.0", "", {}, "sha512-QE33hToZseCH3jS0qN96O/bSh3kaw/h+Tq7ngyY9eWDUnTlTNUyqfqvCXioLe5Na5jFsL78ra/wuBU4iuEgd4w=="], 44 | 45 | "dts-bundle-generator": ["dts-bundle-generator@9.5.1", "", { "dependencies": { "typescript": ">=5.0.2", "yargs": "^17.6.0" }, "bin": { "dts-bundle-generator": "dist/bin/dts-bundle-generator.js" } }, "sha512-DxpJOb2FNnEyOzMkG11sxO2dmxPjthoVWxfKqWYJ/bI/rT1rvTMktF5EKjAYrRZu6Z6t3NhOUZ0sZ5ZXevOfbA=="], 46 | 47 | "emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="], 48 | 49 | "escalade": ["escalade@3.2.0", "", {}, "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA=="], 50 | 51 | "eventsource-parser": ["eventsource-parser@3.0.0", "", {}, "sha512-T1C0XCUimhxVQzW4zFipdx0SficT651NnkR0ZSH3yQwh+mFMdLfgjABVi4YtMTtaL4s168593DaoaRLMqryavA=="], 52 | 53 | "get-caller-file": ["get-caller-file@2.0.5", "", {}, "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg=="], 54 | 55 | "get-tsconfig": ["get-tsconfig@4.10.0", "", { "dependencies": { "resolve-pkg-maps": "^1.0.0" } }, "sha512-kGzZ3LWWQcGIAmg6iWvXn0ei6WDtV26wzHRMwDSzmAbcXrTEXxHy6IehI6/4eT6VRKyMP1eF1VqwrVUmE/LR7A=="], 56 | 57 | "is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="], 58 | 59 | "partial-json": ["partial-json@0.1.7", "", {}, "sha512-Njv/59hHaokb/hRUjce3Hdv12wd60MtM9Z5Olmn+nehe0QDAsRtRbJPvJ0Z91TusF0SuZRIvnM+S4l6EIP8leA=="], 60 | 61 | "require-directory": ["require-directory@2.1.1", "", {}, "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q=="], 62 | 63 | "resolve-pkg-maps": ["resolve-pkg-maps@1.0.0", "", {}, "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw=="], 64 | 65 | "string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="], 66 | 67 | "strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="], 68 | 69 | "typescript": ["typescript@5.7.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-84MVSjMEHP+FQRPy3pX9sTVV/INIex71s9TL2Gm5FG/WG1SqXeKyZ0k7/blY/4FdOzI12CBy1vGc4og/eus0fw=="], 70 | 71 | "undici-types": ["undici-types@6.20.0", "", {}, "sha512-Ny6QZ2Nju20vw1SRHe3d9jVu6gJ+4e3+MMpqu7pqE5HT6WsTSlce++GQmK5UXS8mzV8DSYHrQH+Xrf2jVcuKNg=="], 72 | 73 | "wrap-ansi": ["wrap-ansi@7.0.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q=="], 74 | 75 | "y18n": ["y18n@5.0.8", "", {}, "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA=="], 76 | 77 | "yargs": ["yargs@17.7.2", "", { "dependencies": { "cliui": "^8.0.1", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.3", "y18n": "^5.0.5", "yargs-parser": "^21.1.1" } }, "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w=="], 78 | 79 | "yargs-parser": ["yargs-parser@21.1.1", "", {}, "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw=="], 80 | 81 | "zod": ["zod@4.0.0-beta.20250412T085909", "", { "dependencies": { "@zod/core": "0.4.6" } }, "sha512-W4OjoCnF5XTuljw4X6Hh3Inp45Ug7mmCWzWfFPv+RZcSIAGz7OPqBKOEDtLjfnJAvoxvY3vO0SkxH1zr0Md/Ag=="], 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /docs/chat-streaming.md: -------------------------------------------------------------------------------- 1 | # Streaming in chat completions 2 | 3 | ## Stream options 4 | 5 | ```ts 6 | export interface StreamOptions { 7 | includeUsage?: boolean; 8 | } 9 | ``` 10 | 11 | ## Text streaming 12 | 13 | ```ts 14 | const { textStream } = await openai() 15 | .chat("gpt-4o-mini") 16 | .messages([user("hi")]) 17 | .stream() 18 | .run(); 19 | 20 | for await (const text of textStream) { 21 | process.stdout.write(text); 22 | } 23 | ``` 24 | 25 | ## Streaming with tools 26 | 27 | ```ts 28 | const { toolCallStream } = await openai() 29 | .chat("gpt-4o-mini") 30 | .tool(weatherTool) 31 | .messages([user("What's the weather like in Boston, Beijing, Tokyo today?")]) 32 | .stream() 33 | .run(); 34 | 35 | for await (const toolCalls of toolCallStream) { 36 | console.log(toolCalls); 37 | } 38 | ``` 39 | 40 | ## Structured output streaming 41 | 42 | ```ts 43 | const { objectStream } = await openai() 44 | .chat("gpt-4o-mini") 45 | .messages([user("generate a person with name and age in json format")]) 46 | .responseSchema(personSchema) 47 | .objectStream() 48 | .run(); 49 | 50 | for await (const object of objectStream) { 51 | console.log(object); 52 | } 53 | ``` 54 | 55 | ## Chunk Stream 56 | 57 | The original chunk object from providers 58 | 59 | ```ts 60 | const { stream } = await openai() 61 | .chat("gpt-4o-mini") 62 | .messages([user("hi")]); 63 | 64 | for await (const chunk of stream) { 65 | console.log(chunk); 66 | } 67 | ``` 68 | -------------------------------------------------------------------------------- /docs/chat-structured-outputs.md: -------------------------------------------------------------------------------- 1 | # Guide to Structured Outputs in Chat Completions 2 | 3 | Structured outputs are crucial when working with Language Models (LLMs) in real-world applications. Rather than generating plain text responses, structured outputs enable you to receive data in predefined formats that are easier to process and integrate into applications. 4 | 5 | As demand for prompt engineering continues to grow, one of the most important skills in this field is instructing a language model to generate structured data. While traditional text-based outputs (like those used in chatbots) are useful, structured outputs—especially those based on predefined schemas—are essential for real-world applications such as information extraction, dynamic user interface generation, and more. 6 | 7 | This guide explores different approaches for generating structured outputs from chat completions across various language model providers. 8 | 9 | ## 1. JSON Schema Response Format 10 | 11 | ```ts 12 | import { openai } from "fluent-ai"; 13 | 14 | const personSchema = { 15 | name: "person", 16 | strict: true, 17 | schema: { 18 | type: "object", 19 | properties: { 20 | name: { 21 | type: "string", 22 | }, 23 | age: { 24 | type: "number", 25 | }, 26 | }, 27 | required: ["name", "age"], 28 | additionalProperties: false, 29 | }, 30 | }; 31 | 32 | openai() 33 | .chat("gpt-4o-mini") 34 | .messages([ 35 | { 36 | role: "user", 37 | content: "generate a person with name and age in json format", 38 | }, 39 | ]) 40 | .responseFormat(personSchema); 41 | ``` 42 | 43 | ## response_format: json_object 44 | 45 | ```ts 46 | import { openai } from "fluent-ai"; 47 | 48 | openai() 49 | .chat("gpt-4o-mini") 50 | .messages([ 51 | { 52 | role: "user", 53 | content: "generate a person with name and age in json format", 54 | }, 55 | ]) 56 | .jsonObject(); 57 | ``` 58 | 59 | ## tool argument 60 | 61 | ```ts 62 | import { openai, tool } from "fluent-ai"; 63 | 64 | const schema = tool("person") 65 | .description("person with name and age") 66 | .parameters({ 67 | type: "object", 68 | properties: { 69 | name: { 70 | type: "string", 71 | description: "person name", 72 | }, 73 | age: { 74 | type: "number", 75 | description: "person age", 76 | }, 77 | }, 78 | required: ["name", "age"], 79 | }); 80 | openai().chat("gpt-4o-mini").tool(schema); 81 | ``` 82 | 83 | ## jsonSchema() in fluent-ai 84 | 85 | jsonSchema is a fluent-ai API for structure output 86 | 87 | ```ts 88 | const personSchema = z.object({ 89 | name: z.string(), 90 | age: z.number(), 91 | }); 92 | 93 | const { object } = await openai() 94 | .chat("gpt-4o-mini") 95 | .schema(personSchema) 96 | .run(); 97 | ``` 98 | -------------------------------------------------------------------------------- /examples/anthropic-chat.ts: -------------------------------------------------------------------------------- 1 | import { anthropic, text, user } from "../src"; 2 | 3 | const job = anthropic() 4 | .chat("claude-3-5-sonnet-20241022") 5 | .maxTokens(1024) 6 | .messages([user("Hello, world")]); 7 | const result = await job.run(); 8 | console.log(text(result)); 9 | -------------------------------------------------------------------------------- /examples/anthropic-models.ts: -------------------------------------------------------------------------------- 1 | import { anthropic } from "../src"; 2 | 3 | const job = anthropic().models(); 4 | const result = await job.run(); 5 | console.log(result); 6 | -------------------------------------------------------------------------------- /examples/deepseek-chat.ts: -------------------------------------------------------------------------------- 1 | import { deepseek, system, user } from "../src"; 2 | 3 | const job = deepseek() 4 | .chat("deepseek-chat") 5 | .messages([system("you are a helpful assistant"), user("who are you")]); 6 | const result = await job.run(); 7 | 8 | console.log(result); 9 | -------------------------------------------------------------------------------- /examples/deepseek-models.ts: -------------------------------------------------------------------------------- 1 | import { deepseek } from "../src"; 2 | 3 | const job = deepseek().models(); 4 | const result = await job.run(); 5 | 6 | console.log(result); 7 | -------------------------------------------------------------------------------- /examples/fal-image-stream.ts: -------------------------------------------------------------------------------- 1 | import { fal } from "../src"; 2 | 3 | const job = fal().image("fal-ai/flux/dev").prompt("A cat on a horse"); 4 | 5 | const stream = await job.run(); 6 | for await (const event of stream) { 7 | console.log(event); 8 | } 9 | console.log(result); 10 | -------------------------------------------------------------------------------- /examples/fal-image.ts: -------------------------------------------------------------------------------- 1 | import { fal } from "../src"; 2 | 3 | const job = fal().image("fal-ai/flux/dev").prompt("A cat on a horse"); 4 | const stream = await job.run(); 5 | 6 | for await (const event of stream) { 7 | console.log(event); 8 | } 9 | -------------------------------------------------------------------------------- /examples/fireworks-chat.ts: -------------------------------------------------------------------------------- 1 | import { fireworks, system, user } from "../src"; 2 | 3 | const job = await fireworks() 4 | .chat("accounts/fireworks/models/llama-v3p1-70b-instruct") 5 | .messages([system("you are a helpful assistant"), user("hi")]); 6 | const result = await job.run(); 7 | 8 | console.log(result); 9 | -------------------------------------------------------------------------------- /examples/google-chat-stream.ts: -------------------------------------------------------------------------------- 1 | import { google, system, user, text } from "../src"; 2 | 3 | const job = google() 4 | .chat("gemini-1.5-flash") 5 | .messages([system("you are a helpful assistant"), user("hi")]) 6 | .stream(); 7 | for await (const chunk of await job.run()) { 8 | console.log(text(chunk)); 9 | } 10 | -------------------------------------------------------------------------------- /examples/google-chat.ts: -------------------------------------------------------------------------------- 1 | import { google, system, user, text } from "../src"; 2 | 3 | const job = google() 4 | .chat("gemini-1.5-flash") 5 | .messages([system("you are a helpful assistant"), user("hi")]); 6 | const result = await job.run(); 7 | console.log(text(result)); 8 | -------------------------------------------------------------------------------- /examples/groq-chat.ts: -------------------------------------------------------------------------------- 1 | import { openai, system, text, user } from "../src"; 2 | 3 | const job = openai({ 4 | apiKey: process.env.GROQ_API_KEY, 5 | baseURL: "https://api.groq.com/openai/v1", 6 | }) 7 | .chat("meta-llama/llama-4-scout-17b-16e-instruct") 8 | .messages([ 9 | system("you are a helpful assistant"), 10 | user("write a 100 word story"), 11 | ]) 12 | .stream(); 13 | for await (const chunk of await job.run()) { 14 | process.stdout.write(text(chunk)); 15 | } 16 | -------------------------------------------------------------------------------- /examples/ollama-chat.ts: -------------------------------------------------------------------------------- 1 | import { ollama, system, user } from "../src"; 2 | 3 | const job = ollama() 4 | .chat("llama3.2") 5 | .messages([system("you are a helpful assistant"), user("hi")]); 6 | const result = await job.run(); 7 | console.log(result); 8 | -------------------------------------------------------------------------------- /examples/ollama-embedding.ts: -------------------------------------------------------------------------------- 1 | import { ollama } from "../src"; 2 | 3 | const job = ollama().embedding("nomic-embed-text").value("hello"); 4 | const result = await job.run(); 5 | console.log(result); 6 | -------------------------------------------------------------------------------- /examples/ollama-models.ts: -------------------------------------------------------------------------------- 1 | import { ollama } from "../src"; 2 | 3 | const job = ollama().models(); 4 | const result = await job.run(); 5 | console.log(result); 6 | -------------------------------------------------------------------------------- /examples/openai-chat-json-schema-stream.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | import { openai, partialParse, text, user } from "../src"; 3 | 4 | const personSchema = z.object({ 5 | name: z.string(), 6 | age: z.number(), 7 | }); 8 | 9 | const job = openai() 10 | .chat("gpt-4o-mini") 11 | .messages([user("generate a person with name and age in json format")]) 12 | .jsonSchema(personSchema, "person") 13 | .stream(); 14 | 15 | let content = ""; 16 | for await (const event of await job.run()) { 17 | content += text(event); 18 | console.log(partialParse(content)); 19 | } 20 | const person = personSchema.parse(JSON.parse(content)); 21 | console.log(person); 22 | -------------------------------------------------------------------------------- /examples/openai-chat-json-schema.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | import { openai, user, object } from "../src"; 3 | 4 | const personSchema = z.object({ 5 | name: z.string(), 6 | age: z.number(), 7 | }); 8 | const job = openai() 9 | .chat("gpt-4o-mini") 10 | .messages([user("generate a person with name and age in json format")]) 11 | .jsonSchema(personSchema, "person"); 12 | 13 | const result = await job.run(); 14 | const person = personSchema.parse(object(result)); 15 | console.log(person); 16 | -------------------------------------------------------------------------------- /examples/openai-chat-load.ts: -------------------------------------------------------------------------------- 1 | import { load } from "../src"; 2 | 3 | const job = load({ 4 | provider: "openai", 5 | type: "chat", 6 | input: { 7 | model: "gpt-4o-mini", 8 | messages: [{ role: "user", content: "hi" }], 9 | }, 10 | }); 11 | const result = await job.run(); 12 | console.log(result); 13 | -------------------------------------------------------------------------------- /examples/openai-chat-stream.ts: -------------------------------------------------------------------------------- 1 | import { text, openai } from "../src"; 2 | 3 | const job = openai() 4 | .chat("gpt-4o-mini") 5 | .messages([{ role: "user", content: "generate a 50 words text" }]) 6 | .stream(); 7 | const stream = await job.run(); 8 | for await (const chunk of stream) { 9 | process.stdout.write(text(chunk)); 10 | } 11 | -------------------------------------------------------------------------------- /examples/openai-chat-tool-stream.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | import { openai, tool } from "../src"; 3 | 4 | const weatherTool = tool("get_current_weather") 5 | .description("Get the current weather in a given location") 6 | .parameters( 7 | z.object({ 8 | location: z.string(), 9 | unit: z.enum(["celsius", "fahrenheit"]).optional(), 10 | }) 11 | ); 12 | const job = openai() 13 | .chat("gpt-4o-mini") 14 | .tool(weatherTool) 15 | .messages([ 16 | { 17 | role: "user", 18 | content: "What's the weather like in Boston, Beijing, Tokyo today?", 19 | }, 20 | ]) 21 | .stream(); 22 | const stream = await job.run(); 23 | 24 | for await (const event of stream) { 25 | console.log(event); 26 | } 27 | -------------------------------------------------------------------------------- /examples/openai-chat-tool.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | import { openai, tool } from "../src"; 3 | 4 | const weatherTool = tool("get_current_weather") 5 | .description("Get the current weather in a given location") 6 | .parameters( 7 | z.object({ 8 | location: z.string(), 9 | unit: z.enum(["celsius", "fahrenheit"]).optional(), 10 | }) 11 | ); 12 | const job = openai() 13 | .chat("gpt-4o-mini") 14 | .tool(weatherTool) 15 | .messages([ 16 | { 17 | role: "user", 18 | content: "What's the weather like in Boston, Beijing, Tokyo today?", 19 | }, 20 | ]); 21 | const result = await job.run(); 22 | console.log(JSON.stringify(result, null, 2)); 23 | console.log(job.dump()); 24 | -------------------------------------------------------------------------------- /examples/openai-chat-vision-stream.ts: -------------------------------------------------------------------------------- 1 | import { openai, text } from "../src"; 2 | 3 | const job = openai() 4 | .chat("gpt-4o-mini") 5 | .messages([ 6 | { 7 | role: "user", 8 | content: [ 9 | { type: "text", text: "What is in this image?" }, 10 | { 11 | type: "image_url", 12 | image_url: { 13 | url: "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", 14 | }, 15 | }, 16 | ], 17 | }, 18 | ]) 19 | .stream(); 20 | const stream = await job.run(); 21 | for await (const chunk of stream) { 22 | process.stdout.write(text(chunk)); 23 | } 24 | -------------------------------------------------------------------------------- /examples/openai-chat-vision.ts: -------------------------------------------------------------------------------- 1 | import { openai, text } from "../src"; 2 | 3 | const job = openai() 4 | .chat("gpt-4o-mini") 5 | .messages([ 6 | { 7 | role: "user", 8 | content: [ 9 | { type: "text", text: "What is in this image?" }, 10 | { 11 | type: "image_url", 12 | image_url: { 13 | url: "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", 14 | }, 15 | }, 16 | ], 17 | }, 18 | ]); 19 | const result = await job.run(); 20 | console.log(text(result)); 21 | -------------------------------------------------------------------------------- /examples/openai-chat.ts: -------------------------------------------------------------------------------- 1 | import { openai, system, text, user } from "../src"; 2 | 3 | const job = openai({}) 4 | .chat("gpt-4o-mini") 5 | .messages([system("you are a helpful assistant"), user("hi")]); 6 | const result = await job.run(); 7 | console.log(text(result)); 8 | -------------------------------------------------------------------------------- /examples/openai-embedding.ts: -------------------------------------------------------------------------------- 1 | import { openai } from "../src"; 2 | 3 | const job = openai().embedding("text-embedding-3-small").value("hello"); 4 | const result = await job.run(); 5 | console.log(result.embedding); 6 | -------------------------------------------------------------------------------- /examples/openai-image.ts: -------------------------------------------------------------------------------- 1 | import { openai } from "../src"; 2 | 3 | const job = openai() 4 | .image("dalle-2") 5 | .prompt("a cat") 6 | .size({ width: 512, height: 512 }); 7 | const result = await job.run(); 8 | 9 | console.log(result); 10 | -------------------------------------------------------------------------------- /examples/openai-models.ts: -------------------------------------------------------------------------------- 1 | import { openai } from "../src"; 2 | 3 | const job = openai().models(); 4 | const result = await job.run(); 5 | 6 | console.log(result); 7 | -------------------------------------------------------------------------------- /examples/perplexity-chat.ts: -------------------------------------------------------------------------------- 1 | import { openai, system, user } from "../src"; 2 | 3 | const job = openai({ 4 | apiKey: process.env.PERPLEXITY_API_KEY, 5 | baseURL: "https://api.perplexity.ai", 6 | }) 7 | .chat("llama-3.1-sonar-small-128k-online") 8 | .messages([ 9 | system("Be precise and concise."), 10 | user("How many stars are there in our galaxy?"), 11 | ]); 12 | const result = await job.run(); 13 | 14 | console.log(result); 15 | -------------------------------------------------------------------------------- /examples/together-chat.ts: -------------------------------------------------------------------------------- 1 | import { system, together, user } from "../src"; 2 | 3 | const job = together() 4 | .chat("meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo") 5 | .messages([system("you are a helpful assistant"), user("who are you")]); 6 | 7 | const result = await job.run(); 8 | console.log(result.text); 9 | -------------------------------------------------------------------------------- /examples/voyage-embedding.ts: -------------------------------------------------------------------------------- 1 | import { voyage } from "../src"; 2 | 3 | const job = voyage().embedding("voyage-3-lite").value("hello"); 4 | const result = await job.run(); 5 | console.log(result); 6 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "fluent-ai", 3 | "description": "A lightweight AI toolkit for multiple platforms", 4 | "version": "0.3.0", 5 | "type": "module", 6 | "main": "./dist/index.cjs", 7 | "module": "./dist/index.js", 8 | "types": "./dist/index.d.ts", 9 | "exports": { 10 | "types": "./dist/index.d.ts", 11 | "import": "./dist/index.js", 12 | "require": "./dist/index.cjs" 13 | }, 14 | "scripts": { 15 | "build": "bun run build.ts", 16 | "prepublishOnly": "bun run build" 17 | }, 18 | "files": [ 19 | "dist" 20 | ], 21 | "dependencies": { 22 | "eventsource-parser": "^3.0.0", 23 | "partial-json": "^0.1.7" 24 | }, 25 | "keywords": [ 26 | "ai", 27 | "openai", 28 | "llm", 29 | "bun", 30 | "ollama", 31 | "anthropic", 32 | "zod" 33 | ], 34 | "devDependencies": { 35 | "@types/bun": "latest", 36 | "bun-plugin-dts": "^0.3.0" 37 | }, 38 | "peerDependencies": { 39 | "typescript": "^5.0.0", 40 | "zod": "^4.0.0-beta.20250418T202744" 41 | }, 42 | "license": "Apache-2.0", 43 | "repository": { 44 | "type": "git", 45 | "url": "git+https://github.com/modalityml/fluent-ai.git" 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | export * from "./jobs/load"; 2 | export * from "./jobs/schema"; 3 | export * from "./jobs/chat"; 4 | export * from "./jobs/image"; 5 | export * from "./jobs/embedding"; 6 | export * from "./jobs/models"; 7 | 8 | export * from "./providers/anthropic"; 9 | export * from "./providers/deepseek"; 10 | export * from "./providers/fal"; 11 | export * from "./providers/fireworks"; 12 | export * from "./providers/google"; 13 | export * from "./providers/luma"; 14 | export * from "./providers/ollama"; 15 | export * from "./providers/openai"; 16 | export * from "./providers/together"; 17 | export * from "./providers/voyage"; 18 | -------------------------------------------------------------------------------- /src/jobs/builder.ts: -------------------------------------------------------------------------------- 1 | import { version } from "../../package.json"; 2 | import type { Job } from "./load"; 3 | import type { 4 | JobCost, 5 | JobOptions, 6 | JobPerformance, 7 | JobProvider, 8 | JobType, 9 | } from "./schema"; 10 | 11 | export class HTTPError extends Error { 12 | status: number; 13 | json?: any; 14 | 15 | constructor(message: string, status: number, json?: any) { 16 | super(message); 17 | this.status = status; 18 | this.json = json; 19 | } 20 | } 21 | 22 | export class JobBuilder { 23 | provider!: JobProvider; 24 | options!: JobOptions; 25 | type!: JobType; 26 | input?: Input; 27 | output?: Output; 28 | cost?: JobCost; 29 | performance?: JobPerformance; // TODO: track job performance 30 | 31 | makeRequest?: () => Request; 32 | handleResponse?: (response: Response) => any; 33 | 34 | async run(): Promise { 35 | const request = this.makeRequest!(); 36 | const response = await fetch(request); 37 | if (!response.ok) { 38 | let json; 39 | try { 40 | json = await response.json(); 41 | } catch (e) {} 42 | 43 | throw new HTTPError( 44 | `Fetch error: ${response.statusText}`, 45 | response.status, 46 | json 47 | ); 48 | } 49 | return await this.handleResponse!(response); 50 | } 51 | 52 | dump() { 53 | return { 54 | version: version, 55 | provider: this.provider, 56 | options: this.options, 57 | type: this.type, 58 | input: this.input!, 59 | output: this.output as any, 60 | cost: this.cost, 61 | performance: this.performance, 62 | } as Job; 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /src/jobs/chat/builder.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | import { JobBuilder } from "~/jobs/builder"; 3 | import type { 4 | ChatInput, 5 | ChatOutput, 6 | ChatStreamOptions, 7 | Message, 8 | ResponseFormat, 9 | } from "./schema"; 10 | import type { ChatTool } from "./tool"; 11 | 12 | export class ChatJobBuilder extends JobBuilder { 13 | input: ChatInput; 14 | 15 | constructor(model: string) { 16 | super(); 17 | this.type = "chat"; 18 | this.input = { 19 | model: model, 20 | messages: [], 21 | }; 22 | } 23 | 24 | system(system: string) { 25 | this.input.system = system; 26 | return this; 27 | } 28 | 29 | messages(messages: Message[]) { 30 | this.input.messages = messages; 31 | return this; 32 | } 33 | 34 | temperature(temperature: number) { 35 | this.input.temperature = temperature; 36 | return this; 37 | } 38 | 39 | maxTokens(maxTokens: number) { 40 | this.input.maxTokens = maxTokens; 41 | return this; 42 | } 43 | 44 | topP(topP: number) { 45 | this.input.topP = topP; 46 | return this; 47 | } 48 | 49 | topK(topK: number) { 50 | this.input.topK = topK; 51 | return this; 52 | } 53 | 54 | tools(tools: ChatTool[]) { 55 | this.input.tools = tools.map((tool) => tool.params); 56 | return this; 57 | } 58 | 59 | tool(tool: ChatTool) { 60 | if (!this.input.tools) { 61 | this.input.tools = []; 62 | } 63 | this.input.tools.push(tool.params); 64 | return this; 65 | } 66 | 67 | toolChoice(toolChoice: string) { 68 | this.input.toolChoice = toolChoice; 69 | return this; 70 | } 71 | 72 | responseFormat(responseFormat: ResponseFormat) { 73 | this.input.responseFormat = responseFormat; 74 | return this; 75 | } 76 | 77 | jsonSchema(schema: z.ZodType, name: string, description?: string) { 78 | this.input.jsonSchema = { 79 | name, 80 | description, 81 | schema, 82 | }; 83 | 84 | return this; 85 | } 86 | 87 | stream(streamOptions?: ChatStreamOptions) { 88 | this.input.stream = true; 89 | if (streamOptions) { 90 | this.input.streamOptions = streamOptions; 91 | } 92 | return this; 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /src/jobs/chat/index.ts: -------------------------------------------------------------------------------- 1 | export * from "./builder"; 2 | export * from "./schema"; 3 | export * from "./tool"; 4 | export * from "./utils"; 5 | -------------------------------------------------------------------------------- /src/jobs/chat/schema.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | import { BaseJobSchema } from "~/jobs/schema"; 3 | 4 | export const MessageSchema = z.object({ 5 | role: z.enum(["system", "user", "assistant"]), 6 | content: z.union([ 7 | z.string(), 8 | z.array( 9 | z.union([ 10 | z.object({ type: z.literal("text"), text: z.string() }), 11 | z.object({ 12 | type: z.literal("image_url"), 13 | image_url: z.object({ 14 | url: z.string(), 15 | }), 16 | }), 17 | ]) 18 | ), 19 | ]), 20 | }); 21 | 22 | export type Message = z.infer; 23 | 24 | export const ChatStreamOptionsSchema = z.object({ 25 | includeUsage: z.boolean().optional(), 26 | }); 27 | 28 | export type ChatStreamOptions = z.infer; 29 | 30 | export const ResponseFormatSchema = z.object({ 31 | type: z.enum(["json_object", "json_schema"]), 32 | json_schema: z.any().optional(), 33 | }); 34 | 35 | export type ResponseFormat = z.infer; 36 | 37 | export const ChatToolSchema = z.object({ 38 | name: z.string(), 39 | description: z.string().optional(), 40 | parameters: z.any().optional(), 41 | }); 42 | 43 | export const JsonSchemaDefSchema = z.object({ 44 | name: z.string(), 45 | description: z.string().optional(), 46 | schema: z.any(), 47 | }); 48 | 49 | export const ChunkSchema = z.object({}); 50 | 51 | export const ChatResultSchema = z.object({ 52 | message: z.object({ 53 | role: z.literal("assistant"), 54 | content: z.string().nullable(), 55 | }), 56 | usage: z 57 | .object({ 58 | prompt_tokens: z.number(), 59 | completion_tokens: z.number(), 60 | total_tokens: z.number(), 61 | }) 62 | .optional(), 63 | tool_calls: z 64 | .array( 65 | z.object({ 66 | name: z.string(), 67 | arguments: z.record(z.string(), z.any()), 68 | }) 69 | ) 70 | .optional(), 71 | }); 72 | 73 | export const ChatInputSchema = z.object({ 74 | model: z.string(), 75 | temperature: z.number().optional(), 76 | stream: z.boolean().optional(), 77 | streamOptions: ChatStreamOptionsSchema.optional(), 78 | maxTokens: z.number().optional(), 79 | messages: z.array(MessageSchema), 80 | tools: z.array(ChatToolSchema).optional(), 81 | toolChoice: z.string().optional(), 82 | responseFormat: ResponseFormatSchema.optional(), 83 | topP: z.number().optional(), 84 | topK: z.number().optional(), 85 | system: z.string().optional(), 86 | jsonSchema: JsonSchemaDefSchema.optional(), 87 | }); 88 | 89 | // TODO: Add a schema for the output 90 | export const ChatOutputSchema = z.any(); 91 | 92 | export const ChatJobSchema = BaseJobSchema.extend({ 93 | type: z.literal("chat"), 94 | input: ChatInputSchema, 95 | output: ChatOutputSchema.optional(), 96 | }); 97 | 98 | export type ChatInput = z.infer; 99 | 100 | export type ChatOutput = z.infer; 101 | -------------------------------------------------------------------------------- /src/jobs/chat/tool.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | import type { ChatToolSchema } from "./schema"; 3 | 4 | export class ChatTool { 5 | public params: z.infer; 6 | 7 | constructor(name: string) { 8 | this.params = { name }; 9 | } 10 | 11 | description(description: string) { 12 | this.params.description = description; 13 | return this; 14 | } 15 | 16 | parameters(parameters: z.ZodType) { 17 | this.params.parameters = parameters; 18 | return this; 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /src/jobs/chat/utils.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | import { parse } from "partial-json"; 3 | import type { ChatToolSchema, Message } from "./schema"; 4 | import { ChatTool } from "./tool"; 5 | 6 | export function convertTools(tools: z.infer[]) { 7 | return tools.map((tool) => ({ 8 | type: "function", 9 | function: { 10 | name: tool.name, 11 | description: tool.description, 12 | parameters: z.toJSONSchema(tool.parameters), 13 | }, 14 | })); 15 | } 16 | 17 | export function user(content: string) { 18 | return { role: "user", content: content } as Message; 19 | } 20 | 21 | export function assistant(content: string) { 22 | return { role: "assistant", content: content } as Message; 23 | } 24 | 25 | export function system(content: string) { 26 | return { role: "system", content: content } as Message; 27 | } 28 | 29 | export function audio() { 30 | throw new Error("Not implemented"); 31 | } 32 | 33 | export function image() { 34 | throw new Error("Not implemented"); 35 | } 36 | 37 | export function tool(name: string) { 38 | return new ChatTool(name); 39 | } 40 | 41 | export function text(result: any) { 42 | if (result.raw) { 43 | if (result.raw.candidates) { 44 | return result.raw.candidates[0].content.parts[0].text; 45 | } 46 | 47 | if (result.raw.choices[0].message) { 48 | return result.raw.choices[0].message.content; 49 | } 50 | 51 | if (result.raw.choices[0].delta.content) { 52 | return result.raw.choices[0].delta.content; 53 | } 54 | } 55 | return ""; 56 | } 57 | 58 | export function object(result: any) { 59 | return JSON.parse(result.raw.choices[0].message.content); 60 | } 61 | 62 | export function partialParse(content: string) { 63 | if (content) { 64 | return parse(content); 65 | } 66 | return {}; 67 | } 68 | -------------------------------------------------------------------------------- /src/jobs/embedding/builder.ts: -------------------------------------------------------------------------------- 1 | import { JobBuilder } from "~/jobs/builder"; 2 | import type { EmbeddingInput, EmbeddingOutput } from "./schema"; 3 | 4 | export class EmbeddingJobBuilder extends JobBuilder< 5 | EmbeddingInput, 6 | EmbeddingOutput 7 | > { 8 | input: EmbeddingInput; 9 | 10 | constructor(model: string) { 11 | super(); 12 | this.type = "embedding"; 13 | this.input = { 14 | model: model, 15 | }; 16 | } 17 | 18 | value(value: string) { 19 | this.input.value = value; 20 | return this; 21 | } 22 | 23 | dimensions(dimensions: number) { 24 | this.input.dimensions = dimensions; 25 | return this; 26 | } 27 | 28 | encodingFormat(encodingFormat: string) { 29 | this.input.encodingFormat = encodingFormat; 30 | return this; 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /src/jobs/embedding/index.ts: -------------------------------------------------------------------------------- 1 | export * from "./builder"; 2 | export * from "./schema"; 3 | -------------------------------------------------------------------------------- /src/jobs/embedding/schema.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | import { BaseJobSchema } from "~/jobs/schema"; 3 | 4 | const EmbeddingInputSchema = z.object({ 5 | model: z.string(), 6 | value: z.string().optional(), 7 | dimensions: z.number().optional(), 8 | encodingFormat: z.string().optional(), 9 | }); 10 | 11 | const EmbeddingOutputSchema = z.object({ 12 | raw: z.any(), 13 | embedding: z.array(z.number()), 14 | }); 15 | 16 | export const EmbeddingJobSchema = BaseJobSchema.extend({ 17 | type: z.literal("embedding"), 18 | input: EmbeddingInputSchema, 19 | output: EmbeddingOutputSchema.optional(), 20 | }); 21 | 22 | export type EmbeddingInput = z.infer; 23 | 24 | export type EmbeddingOutput = z.infer; 25 | -------------------------------------------------------------------------------- /src/jobs/image/builder.ts: -------------------------------------------------------------------------------- 1 | import { JobBuilder } from "~/jobs/builder"; 2 | import type { ImageInput, ImageOutput, ImageSize } from "./schema"; 3 | 4 | export class ImageJobBuilder extends JobBuilder { 5 | input: ImageInput; 6 | 7 | constructor(model: string) { 8 | super(); 9 | this.type = "image"; 10 | this.input = { 11 | model: model, 12 | }; 13 | } 14 | 15 | prompt(prompt: string) { 16 | this.input.prompt = prompt; 17 | return this; 18 | } 19 | 20 | n(numImages: number) { 21 | this.input.n = numImages; 22 | return this; 23 | } 24 | 25 | quality(quality: string) { 26 | this.input.quality = quality; 27 | return this; 28 | } 29 | 30 | responseFormat(responseFormat: string) { 31 | this.input.responseFormat = responseFormat; 32 | return this; 33 | } 34 | 35 | size(imageSize: ImageSize) { 36 | this.input.size = imageSize; 37 | return this; 38 | } 39 | 40 | style(style: string) { 41 | this.input.style = style; 42 | return this; 43 | } 44 | 45 | user(user: string) { 46 | this.input.user = user; 47 | return this; 48 | } 49 | 50 | numInferenceSteps(numInferenceSteps: number) { 51 | this.input.numInferenceSteps = numInferenceSteps; 52 | return this; 53 | } 54 | 55 | seed(seed: number) { 56 | this.input.seed = seed; 57 | return this; 58 | } 59 | 60 | guidanceScale(guidanceScale: number) { 61 | this.input.guidanceScale = guidanceScale; 62 | return this; 63 | } 64 | 65 | syncMode(syncMode: boolean) { 66 | this.input.syncMode = syncMode; 67 | return this; 68 | } 69 | 70 | enableSafetyChecker(enableSafetyChecker: boolean) { 71 | this.input.enableSafetyChecker = enableSafetyChecker; 72 | return this; 73 | } 74 | 75 | stream() { 76 | this.input.stream = true; 77 | return this; 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /src/jobs/image/index.ts: -------------------------------------------------------------------------------- 1 | export * from "./builder"; 2 | export * from "./schema"; 3 | -------------------------------------------------------------------------------- /src/jobs/image/schema.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | import { BaseJobSchema } from "~/jobs/schema"; 3 | 4 | export const ImageSizeSchema = z.union([ 5 | z.literal("square_hd"), 6 | z.literal("square"), 7 | z.literal("portrait_4_3"), 8 | z.literal("portrait_16_9"), 9 | z.literal("landscape_4_3"), 10 | z.literal("landscape_16_9"), 11 | z.object({ 12 | width: z.number(), 13 | height: z.number(), 14 | }), 15 | ]); 16 | 17 | export type ImageSize = z.infer; 18 | 19 | export const ImageInputSchema = z.object({ 20 | model: z.string(), 21 | prompt: z.string().optional(), 22 | n: z.number().optional(), 23 | quality: z.string().optional(), 24 | responseFormat: z.string().optional(), 25 | size: ImageSizeSchema.optional(), 26 | style: z.string().optional(), 27 | user: z.string().optional(), 28 | numInferenceSteps: z.number().optional(), 29 | seed: z.number().optional(), 30 | guidanceScale: z.number().optional(), 31 | syncMode: z.boolean().optional(), 32 | enableSafetyChecker: z.boolean().optional(), 33 | stream: z.boolean().optional(), 34 | }); 35 | 36 | const ImageOuputSchema = z.object({ 37 | raw: z.any(), 38 | images: z.array( 39 | z.union([ 40 | z.object({ 41 | url: z.string(), 42 | }), 43 | z.object({ 44 | base64: z.string(), 45 | }), 46 | ]) 47 | ), 48 | metadata: z 49 | .object({ 50 | prompt: z.string(), 51 | size: ImageSizeSchema, 52 | seed: z.number().optional(), 53 | }) 54 | .optional(), 55 | }); 56 | 57 | export const ImageJobSchema = BaseJobSchema.extend({ 58 | type: z.literal("image"), 59 | input: ImageInputSchema, 60 | output: ImageOuputSchema.optional(), 61 | }); 62 | 63 | export type ImageInput = z.infer; 64 | 65 | export type ImageOutput = z.infer; 66 | -------------------------------------------------------------------------------- /src/jobs/load.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | import { anthropic, AnthropicJobSchema } from "~/providers/anthropic"; 3 | import { deepseek, DeepseekJobSchema } from "~/providers/deepseek"; 4 | import { fal, FalJobSchema } from "~/providers/fal"; 5 | import { GoogleJobSchema } from "~/providers/google"; 6 | import { LumaJobSchema } from "~/providers/luma"; 7 | import { ollama, OllamaJobSchema } from "~/providers/ollama"; 8 | import { openai, OpenAIJobSchema } from "~/providers/openai"; 9 | import { voyage, VoyageJobSchema } from "~/providers/voyage"; 10 | 11 | export const JobSchema = z.union([ 12 | AnthropicJobSchema, 13 | DeepseekJobSchema, 14 | FalJobSchema, 15 | GoogleJobSchema, 16 | LumaJobSchema, 17 | OllamaJobSchema, 18 | OpenAIJobSchema, 19 | VoyageJobSchema, 20 | ]); 21 | 22 | export type Job = z.infer; 23 | 24 | export const jobJSONSchema = z.toJSONSchema(JobSchema); 25 | 26 | export function load(obj: Job) { 27 | obj = JobSchema.parse(obj); 28 | 29 | let provider = null; 30 | if (obj.provider === "anthropic") { 31 | provider = anthropic(obj.options); 32 | } else if (obj.provider === "deepseek") { 33 | provider = deepseek(obj.options); 34 | } else if (obj.provider === "fal") { 35 | provider = fal(obj.options); 36 | } else if (obj.provider === "ollama") { 37 | provider = ollama(); 38 | } else if (obj.provider === "openai") { 39 | provider = openai(obj.options); 40 | } else if (obj.provider === "voyage") { 41 | provider = voyage(obj.options); 42 | } 43 | 44 | if (!provider) { 45 | throw new Error("Unknown provider " + obj.provider); 46 | } 47 | 48 | let builder = null; 49 | 50 | if (obj.type === "chat" && "chat" in provider) { 51 | builder = provider.chat(obj.input.model); 52 | } 53 | if (obj.type === "embedding" && "embedding" in provider) { 54 | builder = provider.embedding(obj.input.model); 55 | } 56 | if (obj.type === "image" && "image" in provider) { 57 | builder = provider.image(obj.input.model); 58 | } 59 | if (obj.type === "models" && "models" in provider) { 60 | builder = provider.models(); 61 | } 62 | 63 | if (!builder) { 64 | throw new Error("Failed to load job"); 65 | } 66 | 67 | builder.input = obj.input; 68 | 69 | return builder; 70 | } 71 | -------------------------------------------------------------------------------- /src/jobs/models/builder.ts: -------------------------------------------------------------------------------- 1 | import { JobBuilder } from "~/jobs/builder"; 2 | import type { ModelsInput, ModelsOutput } from "./schema"; 3 | 4 | export class ModelsJobBuilder extends JobBuilder { 5 | input: ModelsInput; 6 | 7 | constructor() { 8 | super(); 9 | this.type = "models"; 10 | this.input = {}; 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /src/jobs/models/index.ts: -------------------------------------------------------------------------------- 1 | export * from "./builder"; 2 | export * from "./schema"; 3 | -------------------------------------------------------------------------------- /src/jobs/models/schema.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | import { BaseJobSchema } from "~/jobs/schema"; 3 | 4 | export const ModelsInputSchema = z.object({}); 5 | 6 | export const ModelsOuputSchema = z.object({}); 7 | 8 | export const ModelsJobSchema = BaseJobSchema.extend({ 9 | type: z.literal("models"), 10 | input: ModelsInputSchema, 11 | output: ModelsOuputSchema.optional(), 12 | }); 13 | 14 | export type ModelsInput = z.infer; 15 | 16 | export type ModelsOutput = z.infer; 17 | -------------------------------------------------------------------------------- /src/jobs/schema.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | 3 | export const JobProviderSchema = z.enum([ 4 | "anthropic", 5 | "deepseek", 6 | "fal", 7 | "google", 8 | "luma", 9 | "ollama", 10 | "openai", 11 | "voyage", 12 | ]); 13 | 14 | export const JobTypeSchema = z.enum(["chat", "image", "models", "embedding"]); 15 | 16 | export const JobOptionsSchema = z.object({ 17 | apiKey: z.string().optional(), 18 | baseURL: z.string().optional(), 19 | }); 20 | 21 | export const JobCostSchema = z.object({ 22 | promptTokens: z.number().optional(), 23 | completionTokens: z.number().optional(), 24 | totalTokens: z.number().optional(), 25 | }); 26 | 27 | export const JobPerformance = z.object({}); 28 | 29 | export type JobCost = z.infer; 30 | 31 | export type JobPerformance = z.infer; 32 | 33 | export const BaseJobSchema = z.object({ 34 | version: z.string().optional(), 35 | options: JobOptionsSchema.optional(), 36 | cost: JobCostSchema.optional(), 37 | performance: JobPerformance.optional(), 38 | }); 39 | 40 | export type JobProvider = z.infer; 41 | export type JobType = z.infer; 42 | export type BaseJob = z.infer; 43 | export type JobOptions = z.infer; 44 | -------------------------------------------------------------------------------- /src/jobs/stream.ts: -------------------------------------------------------------------------------- 1 | import { EventSourceParserStream } from "eventsource-parser/stream"; 2 | 3 | export function jobStream(response: Response) { 4 | return (async function* () { 5 | const eventStream = response 6 | .body!.pipeThrough(new TextDecoderStream()) 7 | .pipeThrough(new EventSourceParserStream()); 8 | const reader = eventStream.getReader(); 9 | for (;;) { 10 | const { done, value } = await reader.read(); 11 | if (done || value.data === "[DONE]") { 12 | break; 13 | } 14 | const chunk = JSON.parse(value.data); 15 | yield { raw: chunk } as Output; 16 | } 17 | })(); 18 | } 19 | -------------------------------------------------------------------------------- /src/providers/anthropic/chat.ts: -------------------------------------------------------------------------------- 1 | import { ChatJobBuilder, convertTools } from "~/jobs/chat"; 2 | import type { JobOptions } from "~/jobs/schema"; 3 | 4 | export class AnthropicChatJobBuilder extends ChatJobBuilder { 5 | constructor(options: JobOptions, model: string) { 6 | super(model); 7 | this.provider = "anthropic"; 8 | this.options = options; 9 | } 10 | 11 | makeRequest = () => { 12 | const requestParams = { 13 | model: this.input.model, 14 | max_tokens: this.input.maxTokens, 15 | messages: this.input.messages, 16 | } as any; 17 | 18 | if (this.input.tools && this.input.tools.length) { 19 | requestParams.tools = convertTools(this.input.tools); 20 | requestParams.tool_choice = this.input.toolChoice; 21 | } 22 | 23 | const headers = { 24 | "anthropic-version": "2023-06-01", 25 | "x-api-key": this.options.apiKey!, 26 | "Content-Type": "application/json", 27 | }; 28 | 29 | return new Request("https://api.anthropic.com/v1/messages", { 30 | method: "POST", 31 | headers: headers, 32 | body: JSON.stringify(requestParams), 33 | }); 34 | }; 35 | 36 | handleResponse = async (response: Response) => { 37 | const raw = await response.json(); 38 | return { raw }; 39 | }; 40 | } 41 | -------------------------------------------------------------------------------- /src/providers/anthropic/index.ts: -------------------------------------------------------------------------------- 1 | import type { JobOptions } from "~/jobs/schema"; 2 | import { AnthropicChatJobBuilder } from "./chat"; 3 | import { AnthropicModelsJobBuilder } from "./models"; 4 | 5 | export function anthropic(options?: JobOptions) { 6 | options = options || {}; 7 | options.apiKey = options.apiKey || process.env.ANTHROPIC_API_KEY; 8 | 9 | return { 10 | chat(model: string) { 11 | return new AnthropicChatJobBuilder(options, model); 12 | }, 13 | models() { 14 | return new AnthropicModelsJobBuilder(options); 15 | }, 16 | }; 17 | } 18 | 19 | export * from "./chat"; 20 | export * from "./models"; 21 | export * from "./schema"; 22 | -------------------------------------------------------------------------------- /src/providers/anthropic/models.ts: -------------------------------------------------------------------------------- 1 | import { ModelsJobBuilder } from "~/jobs/models"; 2 | import type { JobOptions } from "~/jobs/schema"; 3 | 4 | export class AnthropicModelsJobBuilder extends ModelsJobBuilder { 5 | constructor(options: JobOptions) { 6 | super(); 7 | this.provider = "anthropic"; 8 | this.options = options; 9 | } 10 | 11 | makeRequest = () => { 12 | const headers = { 13 | "anthropic-version": "2023-06-01", 14 | "x-api-key": this.options.apiKey!, 15 | "Content-Type": "application/json", 16 | }; 17 | 18 | return new Request("https://api.anthropic.com/v1/models", { 19 | method: "GET", 20 | headers: headers, 21 | }); 22 | }; 23 | 24 | handleResponse = async (response: Response) => { 25 | const json = await response.json(); 26 | return json; 27 | }; 28 | } 29 | -------------------------------------------------------------------------------- /src/providers/anthropic/schema.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | import { ChatJobSchema } from "~/jobs/chat"; 3 | import { ModelsJobSchema } from "~/jobs/models"; 4 | 5 | export const AnthropicBaseJobSchema = z.object({ 6 | provider: z.literal("anthropic"), 7 | }); 8 | 9 | export const AnthropicChatJobSchema = ChatJobSchema.extend( 10 | AnthropicBaseJobSchema 11 | ); 12 | export type AnthropicChatJob = z.infer; 13 | 14 | export const AnthropicModelsJobSchema = ModelsJobSchema.extend( 15 | AnthropicBaseJobSchema 16 | ); 17 | export type AnthropicModelsJob = z.infer; 18 | 19 | export const AnthropicJobSchema = z.discriminatedUnion("type", [ 20 | AnthropicChatJobSchema, 21 | AnthropicModelsJobSchema, 22 | ]); 23 | export type AnthropicJob = z.infer; 24 | -------------------------------------------------------------------------------- /src/providers/deepseek/index.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | import { ChatJobSchema } from "~/jobs/chat"; 3 | import { ModelsJobSchema } from "~/jobs/models"; 4 | import type { JobOptions } from "~/jobs/schema"; 5 | import { OpenAIChatJobBuilder } from "~/providers/openai"; 6 | import { OpenAIModelsJobBuilder } from "~/providers/openai/models"; 7 | 8 | export const BaseDeepseekJobSchema = z.object({ 9 | provider: z.literal("deepseek"), 10 | }); 11 | export const DeepseekChatJobSchema = ChatJobSchema.extend( 12 | BaseDeepseekJobSchema 13 | ); 14 | export type DeepseekChatJob = z.infer; 15 | export const DeepseekModelsJobSchema = ModelsJobSchema.extend( 16 | BaseDeepseekJobSchema 17 | ); 18 | export type DeepseekModelsJob = z.infer; 19 | export const DeepseekJobSchema = z.discriminatedUnion("type", [ 20 | DeepseekChatJobSchema, 21 | DeepseekModelsJobSchema, 22 | ]); 23 | export type DeepseekJob = z.infer; 24 | 25 | export function deepseek(options?: JobOptions) { 26 | options = options || {}; 27 | options.apiKey = options.apiKey || process.env.DEEPSEEK_API_KEY; 28 | 29 | return { 30 | chat(model: string) { 31 | return new OpenAIChatJobBuilder( 32 | { 33 | ...options, 34 | baseURL: "https://api.deepseek.com", 35 | }, 36 | model 37 | ); 38 | }, 39 | 40 | models() { 41 | return new OpenAIModelsJobBuilder({ 42 | ...options, 43 | baseURL: "https://api.deepseek.com", 44 | }); 45 | }, 46 | }; 47 | } 48 | -------------------------------------------------------------------------------- /src/providers/fal/image.ts: -------------------------------------------------------------------------------- 1 | import { ImageJobBuilder } from "~/jobs/image"; 2 | import type { JobOptions } from "~/jobs/schema"; 3 | 4 | export class FalImageJobBuilder extends ImageJobBuilder { 5 | constructor(options: JobOptions, model: string) { 6 | super(model); 7 | this.provider = "fal"; 8 | this.options = options; 9 | } 10 | 11 | makeRequest = () => { 12 | return new Request(`https://queue.fal.run/${this.input.model}`, { 13 | method: "POST", 14 | headers: { 15 | Authorization: `Key ${this.options.apiKey}`, 16 | "Content-Type": "application/json", 17 | }, 18 | body: JSON.stringify({ 19 | prompt: this.input.prompt, 20 | image_size: this.input.size, 21 | num_inference_steps: this.input.numInferenceSteps, 22 | seed: this.input.seed, 23 | guidance_scale: this.input.guidanceScale, 24 | sync_mode: this.input.syncMode, 25 | num_images: this.input.n, 26 | enable_safety_checker: this.input.enableSafetyChecker, 27 | }), 28 | }); 29 | }; 30 | 31 | handleResponse = async (response: Response) => { 32 | return await response.json(); 33 | }; 34 | } 35 | -------------------------------------------------------------------------------- /src/providers/fal/index.ts: -------------------------------------------------------------------------------- 1 | import type { JobOptions } from "~/jobs/schema"; 2 | import { FalImageJobBuilder } from "./image"; 3 | 4 | export function fal(options?: JobOptions) { 5 | options = options || {}; 6 | options.apiKey = options.apiKey || process.env.FAL_API_KEY; 7 | 8 | return { 9 | image(model: string) { 10 | return new FalImageJobBuilder(options, model); 11 | }, 12 | }; 13 | } 14 | 15 | export * from "./image"; 16 | export * from "./schema"; 17 | -------------------------------------------------------------------------------- /src/providers/fal/schema.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | import { ImageJobSchema } from "~/jobs/image"; 3 | 4 | export type FalImage = { 5 | url: string; 6 | width: number; 7 | height: number; 8 | contentType: string; 9 | }; 10 | 11 | export const FalBaseJobSchema = z.object({ 12 | provider: z.literal("fal"), 13 | }); 14 | 15 | export const FalImageJobSchema = ImageJobSchema.extend(FalBaseJobSchema); 16 | export type FalImageJob = z.infer; 17 | 18 | export const FalJobSchema = z.discriminatedUnion("type", [FalImageJobSchema]); 19 | export type FalJob = z.infer; 20 | -------------------------------------------------------------------------------- /src/providers/fireworks/index.ts: -------------------------------------------------------------------------------- 1 | import type { JobOptions } from "~/jobs/schema"; 2 | import { OpenAIChatJobBuilder } from "~/providers/openai"; 3 | 4 | export function fireworks(options?: JobOptions) { 5 | options = options || {}; 6 | options.apiKey = options.apiKey || process.env.FIREWORKS_API_KEY; 7 | 8 | return { 9 | chat(model: string) { 10 | return new OpenAIChatJobBuilder( 11 | { 12 | ...options, 13 | baseURL: "https://api.fireworks.ai/inference/v1", 14 | }, 15 | model 16 | ); 17 | }, 18 | }; 19 | } 20 | -------------------------------------------------------------------------------- /src/providers/google/chat.ts: -------------------------------------------------------------------------------- 1 | import { ChatJobBuilder } from "~/jobs/chat"; 2 | import type { JobOptions } from "~/jobs/schema"; 3 | 4 | export class GoogleChatJobBuilder extends ChatJobBuilder { 5 | constructor(options: JobOptions, model: string) { 6 | super(model); 7 | this.provider = "google"; 8 | this.options = options; 9 | } 10 | 11 | makeRequest = () => { 12 | return new Request( 13 | `https://generativelanguage.googleapis.com/v1beta/models/${this.input.model}:generateContent?key=${this.options.apiKey}`, 14 | { 15 | method: "POST", 16 | headers: { 17 | "Content-Type": "application/json", 18 | }, 19 | body: JSON.stringify({ 20 | contents: this.input.messages.map((msg) => ({ 21 | role: msg.role === "user" ? "user" : "model", 22 | parts: [{ text: msg.content }], 23 | })), 24 | }), 25 | } 26 | ); 27 | }; 28 | 29 | handleResponse = async (response: Response) => { 30 | const raw = await response.json(); 31 | this.cost = { 32 | promptTokens: raw.usageMetadata.promptTokenCount, 33 | completionTokens: raw.usageMetadata.candidatesTokenCount, 34 | totalTokens: raw.usageMetadata.totalTokenCount, 35 | }; 36 | return { raw }; 37 | }; 38 | } 39 | -------------------------------------------------------------------------------- /src/providers/google/index.ts: -------------------------------------------------------------------------------- 1 | import type { JobOptions } from "~/jobs/schema"; 2 | import { GoogleChatJobBuilder } from "./chat"; 3 | 4 | export function google(options?: JobOptions) { 5 | options = options || {}; 6 | options.apiKey = options.apiKey || process.env.GOOGLE_API_KEY; 7 | 8 | return { 9 | chat(model: string) { 10 | return new GoogleChatJobBuilder(options, model); 11 | }, 12 | }; 13 | } 14 | 15 | export * from "./chat"; 16 | export * from "./schema"; 17 | -------------------------------------------------------------------------------- /src/providers/google/schema.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | import { ChatJobSchema } from "~/jobs/chat"; 3 | 4 | export const GoogleBaseJobSchema = z.object({ 5 | provider: z.literal("google"), 6 | }); 7 | 8 | export const GoogleChatJobSchema = ChatJobSchema.extend(GoogleBaseJobSchema); 9 | export type GoogleChatJob = z.infer; 10 | 11 | export const GoogleJobSchema = z.discriminatedUnion("type", [ 12 | GoogleChatJobSchema, 13 | ]); 14 | export type GoogleJob = z.infer; 15 | -------------------------------------------------------------------------------- /src/providers/luma/index.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | import { ImageJobBuilder, ImageJobSchema } from "~/jobs/image"; 3 | import type { JobOptions } from "~/jobs/schema"; 4 | 5 | export const LumaBaseJobSchema = z.object({ 6 | provider: z.literal("luma"), 7 | }); 8 | 9 | export const LumaImageJobSchema = ImageJobSchema.extend(LumaBaseJobSchema); 10 | 11 | export const LumaJobSchema = z.discriminatedUnion("type", [LumaImageJobSchema]); 12 | export type LumaJob = z.infer; 13 | 14 | export function luma(options?: JobOptions) { 15 | options = options || {}; 16 | options.apiKey = options.apiKey || process.env.LUMA_API_KEY; 17 | 18 | if (!options.apiKey) { 19 | throw new Error("Luma API key is required"); 20 | } 21 | 22 | return { 23 | image(model: string) { 24 | return new LumaImageJobBuilder(options, model); 25 | }, 26 | }; 27 | } 28 | 29 | export class LumaImageJobBuilder extends ImageJobBuilder { 30 | constructor(options: JobOptions, model: string) { 31 | super(model); 32 | this.provider = "luma"; 33 | this.options = options; 34 | } 35 | 36 | makeRequest = () => { 37 | return new Request("https://api.lumalabs.ai/dream-machine/v1/generations", { 38 | method: "POST", 39 | headers: { 40 | Authorization: `Bearer ${this.options.apiKey}`, 41 | "Content-Type": "application/json", 42 | }, 43 | body: JSON.stringify({ 44 | generation_type: "image", 45 | prompt: this.input.prompt, 46 | }), 47 | }); 48 | }; 49 | } 50 | -------------------------------------------------------------------------------- /src/providers/ollama/chat.ts: -------------------------------------------------------------------------------- 1 | import { ChatJobBuilder, convertTools } from "~/jobs/chat"; 2 | import type { JobOptions } from "~/jobs/schema"; 3 | 4 | export class OllamaChatJobBuilder extends ChatJobBuilder { 5 | constructor(options: JobOptions, model: string) { 6 | super(model); 7 | this.provider = "ollama"; 8 | this.options = options; 9 | } 10 | 11 | makeRequest = () => { 12 | const requestBody = { 13 | model: this.input.model, 14 | messages: this.input.messages, 15 | stream: false, 16 | } as any; 17 | 18 | if (this.input.tools && this.input.tools.length) { 19 | requestBody.tools = convertTools(this.input.tools); 20 | } 21 | 22 | return new Request("http://localhost:11434/api/chat", { 23 | method: "POST", 24 | body: JSON.stringify(requestBody), 25 | }); 26 | }; 27 | 28 | handleResponse = async (response: Response) => { 29 | const json = await response.json(); 30 | return json; 31 | }; 32 | } 33 | -------------------------------------------------------------------------------- /src/providers/ollama/embedding.ts: -------------------------------------------------------------------------------- 1 | import { EmbeddingJobBuilder } from "~/jobs/embedding"; 2 | import type { JobOptions } from "~/jobs/schema"; 3 | 4 | export class OllamaEmbeddingJobBuilder extends EmbeddingJobBuilder { 5 | constructor(options: JobOptions, model: string) { 6 | super(model); 7 | this.provider = "ollama"; 8 | this.options = options; 9 | } 10 | 11 | makeRequest = () => { 12 | return new Request("http://localhost:11434/api/embed", { 13 | method: "POST", 14 | body: JSON.stringify({ 15 | model: this.input.model, 16 | input: this.input.value, 17 | }), 18 | }); 19 | }; 20 | 21 | handleResponse = async (response: Response) => { 22 | const raw = await response.json(); 23 | return { raw, embeddings: raw.embeddings }; 24 | }; 25 | } 26 | -------------------------------------------------------------------------------- /src/providers/ollama/index.ts: -------------------------------------------------------------------------------- 1 | import type { JobOptions } from "~/jobs/schema"; 2 | import { OllamaChatJobBuilder } from "./chat"; 3 | import { OllamaEmbeddingJobBuilder } from "./embedding"; 4 | import { OllamaModelsJobBuilder } from "./models"; 5 | 6 | export function ollama(options?: JobOptions) { 7 | options = options || {}; 8 | 9 | return { 10 | chat(model: string) { 11 | return new OllamaChatJobBuilder(options, model); 12 | }, 13 | embedding(model: string) { 14 | return new OllamaEmbeddingJobBuilder(options, model); 15 | }, 16 | models() { 17 | return new OllamaModelsJobBuilder(options); 18 | }, 19 | }; 20 | } 21 | 22 | export * from "./chat"; 23 | export * from "./embedding"; 24 | export * from "./models"; 25 | export * from "./schema"; 26 | -------------------------------------------------------------------------------- /src/providers/ollama/models.ts: -------------------------------------------------------------------------------- 1 | import type { JobOptions } from "~/jobs/schema"; 2 | import { ModelsJobBuilder } from "~/jobs/models"; 3 | 4 | export class OllamaModelsJobBuilder extends ModelsJobBuilder { 5 | constructor(options: JobOptions) { 6 | super(); 7 | this.provider = "ollama"; 8 | this.options = options; 9 | } 10 | 11 | makeRequest = () => { 12 | return new Request("http://localhost:11434/api/tags", { method: "GET" }); 13 | }; 14 | 15 | handleResponse = async (response: Response) => { 16 | const json = await response.json(); 17 | return json; 18 | }; 19 | } 20 | -------------------------------------------------------------------------------- /src/providers/ollama/schema.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | import { ChatJobSchema } from "~/jobs/chat"; 3 | import { EmbeddingJobSchema } from "~/jobs/embedding"; 4 | import { ModelsJobSchema } from "~/jobs/models"; 5 | 6 | export const OllamaBaseJobSchema = z.object({ 7 | provider: z.literal("ollama"), 8 | }); 9 | 10 | export const OllamaChatJobSchema = ChatJobSchema.extend(OllamaBaseJobSchema); 11 | export type OllamaChatJob = z.infer; 12 | 13 | export const OllamaEmbeddingJobSchema = 14 | EmbeddingJobSchema.extend(OllamaBaseJobSchema); 15 | export type OllamaEmbeddingJob = z.infer; 16 | 17 | export const OllamaModelsJobSchema = 18 | ModelsJobSchema.extend(OllamaBaseJobSchema); 19 | export type OllamaModelsJob = z.infer; 20 | 21 | export const OllamaJobSchema = z.discriminatedUnion("type", [ 22 | OllamaChatJobSchema, 23 | OllamaEmbeddingJobSchema, 24 | OllamaModelsJobSchema, 25 | ]); 26 | export type OllamaJob = z.infer; 27 | -------------------------------------------------------------------------------- /src/providers/openai/chat.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | import { ChatJobBuilder, convertTools } from "~/jobs/chat"; 3 | import type { JobOptions } from "~/jobs/schema"; 4 | import { jobStream } from "~/jobs/stream"; 5 | import { OPENAI_BASE_URL } from "./schema"; 6 | 7 | export class OpenAIChatJobBuilder extends ChatJobBuilder { 8 | constructor(options: JobOptions, model: string) { 9 | super(model); 10 | this.provider = "openai"; 11 | this.options = options; 12 | } 13 | 14 | makeRequest = () => { 15 | const baseURL = this.options.baseURL || OPENAI_BASE_URL; 16 | const messages = this.input.messages; 17 | 18 | if (this.input.system) { 19 | messages.unshift({ 20 | role: "system", 21 | content: this.input.system, 22 | }); 23 | } 24 | const requestBody = { 25 | messages: messages, 26 | model: this.input.model, 27 | temperature: this.input.temperature, 28 | stream: this.input.stream, 29 | response_format: this.input.responseFormat, 30 | } as any; 31 | 32 | if (this.input.tools && this.input.tools.length) { 33 | requestBody.tools = convertTools(this.input.tools); 34 | requestBody.tool_choice = this.input.toolChoice; 35 | } 36 | 37 | if (this.input.jsonSchema) { 38 | const schema = z.toJSONSchema(this.input.jsonSchema.schema); 39 | requestBody.response_format = { 40 | type: "json_schema", 41 | json_schema: { 42 | name: this.input.jsonSchema.name, 43 | description: this.input.jsonSchema.description, 44 | schema: schema, 45 | }, 46 | }; 47 | } 48 | 49 | return new Request(`${baseURL}/chat/completions`, { 50 | headers: { 51 | Authorization: `Bearer ${this.options.apiKey}`, 52 | "Content-Type": "application/json", 53 | }, 54 | method: "POST", 55 | body: JSON.stringify(requestBody), 56 | }); 57 | }; 58 | 59 | handleResponse = async (response: Response) => { 60 | if (this.input.stream) { 61 | return jobStream(response); 62 | } 63 | 64 | const raw = await response.json(); 65 | this.cost = { 66 | promptTokens: raw.usage.prompt_tokens, 67 | completionTokens: raw.usage.completion_tokens, 68 | totalTokens: raw.usage.total_tokens, 69 | }; 70 | return { raw }; 71 | }; 72 | } 73 | -------------------------------------------------------------------------------- /src/providers/openai/embedding.ts: -------------------------------------------------------------------------------- 1 | import { EmbeddingJobBuilder } from "~/jobs/embedding"; 2 | import type { JobOptions } from "~/jobs/schema"; 3 | import { OPENAI_BASE_URL } from "./schema"; 4 | 5 | export class OpenAIEmbeddingJobBuilder extends EmbeddingJobBuilder { 6 | constructor(options: JobOptions, model: string) { 7 | super(model); 8 | this.provider = "openai"; 9 | this.options = options || {}; 10 | } 11 | 12 | makeRequest = () => { 13 | const baseURL = this.options.baseURL || OPENAI_BASE_URL; 14 | return new Request(`${baseURL}/embeddings`, { 15 | headers: { 16 | Authorization: `Bearer ${this.options.apiKey}`, 17 | "Content-Type": "application/json", 18 | }, 19 | method: "POST", 20 | body: JSON.stringify({ 21 | model: this.input.model, 22 | input: this.input.value, 23 | encoding_format: this.input.encodingFormat, 24 | dimensions: this.input.dimensions, 25 | }), 26 | }); 27 | }; 28 | 29 | handleResponse = async (response: Response) => { 30 | const raw = await response.json(); 31 | this.cost = { 32 | promptTokens: raw.usage.prompt_tokens, 33 | totalTokens: raw.usage.total_tokens, 34 | }; 35 | return { embedding: raw.data[0].embedding, raw }; 36 | }; 37 | } 38 | -------------------------------------------------------------------------------- /src/providers/openai/image.ts: -------------------------------------------------------------------------------- 1 | import { ImageJobBuilder } from "~/jobs/image"; 2 | import type { JobOptions } from "~/jobs/schema"; 3 | import { OPENAI_BASE_URL } from "./schema"; 4 | 5 | export class OpenAIImageJobBuilder extends ImageJobBuilder { 6 | constructor(options: JobOptions, model: string) { 7 | super(model); 8 | this.provider = "openai"; 9 | this.options = options; 10 | } 11 | 12 | makeRequest = () => { 13 | const baseURL = this.options.baseURL || OPENAI_BASE_URL; 14 | return new Request(`${baseURL}/image/generations`, { 15 | headers: { 16 | Authorization: `Bearer ${this.options.apiKey}`, 17 | "Content-Type": "application/json", 18 | }, 19 | method: "POST", 20 | body: JSON.stringify({ 21 | prompt: this.input.prompt, 22 | model: this.input.model, 23 | n: this.input.n, 24 | quality: this.input.quality, 25 | response_format: this.input.responseFormat, 26 | size: this.input.size, 27 | style: this.input.style, 28 | user: this.input.user, 29 | }), 30 | }); 31 | }; 32 | } 33 | -------------------------------------------------------------------------------- /src/providers/openai/index.ts: -------------------------------------------------------------------------------- 1 | import { type JobOptions } from "~/jobs/schema"; 2 | import { OpenAIChatJobBuilder } from "~/providers/openai/chat"; 3 | import { OpenAIImageJobBuilder } from "~/providers/openai/image"; 4 | import { OpenAIEmbeddingJobBuilder } from "~/providers/openai/embedding"; 5 | import { OpenAIModelsJobBuilder } from "~/providers/openai/models"; 6 | 7 | export function openai(options?: JobOptions) { 8 | options = options || {}; 9 | options.apiKey = options.apiKey || process.env.OPENAI_API_KEY; 10 | 11 | if (!options.apiKey) { 12 | throw new Error("OpenAI API key is required"); 13 | } 14 | 15 | return { 16 | chat(model: string) { 17 | return new OpenAIChatJobBuilder(options, model); 18 | }, 19 | image(model: string) { 20 | return new OpenAIImageJobBuilder(options, model); 21 | }, 22 | embedding(model: string) { 23 | return new OpenAIEmbeddingJobBuilder(options, model); 24 | }, 25 | models() { 26 | return new OpenAIModelsJobBuilder(options); 27 | }, 28 | }; 29 | } 30 | 31 | export * from "./chat"; 32 | export * from "./image"; 33 | export * from "./embedding"; 34 | export * from "./schema"; 35 | -------------------------------------------------------------------------------- /src/providers/openai/models.ts: -------------------------------------------------------------------------------- 1 | import type { JobOptions } from "~/jobs/schema"; 2 | import { ModelsJobBuilder } from "~/jobs/models"; 3 | import { OPENAI_BASE_URL } from "./schema"; 4 | 5 | export class OpenAIModelsJobBuilder extends ModelsJobBuilder { 6 | constructor(options: JobOptions) { 7 | super(); 8 | this.provider = "openai"; 9 | this.options = options; 10 | } 11 | 12 | makeRequest = () => { 13 | const baseURL = this.options.baseURL || OPENAI_BASE_URL; 14 | return new Request(`${baseURL}/models`, { 15 | headers: { 16 | Authorization: `Bearer ${this.options.apiKey}`, 17 | "Content-Type": "application/json", 18 | }, 19 | method: "GET", 20 | }); 21 | }; 22 | 23 | handleResponse = async (response: Response) => { 24 | return await response.json(); 25 | }; 26 | } 27 | -------------------------------------------------------------------------------- /src/providers/openai/schema.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | import { ChatJobSchema } from "~/jobs/chat"; 3 | import { EmbeddingJobSchema } from "~/jobs/embedding"; 4 | import { ImageJobSchema } from "~/jobs/image"; 5 | import { ModelsJobSchema } from "~/jobs/models"; 6 | 7 | export const OPENAI_BASE_URL = "https://api.openai.com/v1"; 8 | 9 | export const BaseOpenAIJobSchema = z.object({ 10 | provider: z.literal("openai"), 11 | }); 12 | 13 | export const OpenAIChatJobSchema = ChatJobSchema.extend(BaseOpenAIJobSchema); 14 | export type OpenAIChatJob = z.infer; 15 | 16 | export const OpenAIEmbeddingJobSchema = 17 | EmbeddingJobSchema.extend(BaseOpenAIJobSchema); 18 | export type OpenAIEmbeddingJob = z.infer; 19 | 20 | export const OpenAIImageJobSchema = ImageJobSchema.extend(BaseOpenAIJobSchema); 21 | export type OpenAIImageJob = z.infer; 22 | 23 | export const OpenAIModelsJobSchema = 24 | ModelsJobSchema.extend(BaseOpenAIJobSchema); 25 | export type OpenAIModelsJob = z.infer; 26 | 27 | export const OpenAIJobSchema = z.discriminatedUnion("type", [ 28 | OpenAIChatJobSchema, 29 | OpenAIEmbeddingJobSchema, 30 | OpenAIImageJobSchema, 31 | OpenAIModelsJobSchema, 32 | ]); 33 | export type OpenAIJob = z.infer; 34 | -------------------------------------------------------------------------------- /src/providers/together/index.ts: -------------------------------------------------------------------------------- 1 | import type { JobOptions } from "~/jobs/schema"; 2 | import { OpenAIChatJobBuilder } from "~/providers/openai"; 3 | 4 | export function together(options?: JobOptions) { 5 | options = options || {}; 6 | options.apiKey = options.apiKey || process.env.TOGETHER_API_KEY; 7 | 8 | return { 9 | chat(model: string) { 10 | return new OpenAIChatJobBuilder( 11 | { 12 | ...options, 13 | baseURL: "https://api.together.xyz/v1", 14 | }, 15 | model 16 | ); 17 | }, 18 | }; 19 | } 20 | -------------------------------------------------------------------------------- /src/providers/voyage/index.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | import { EmbeddingJobBuilder, EmbeddingJobSchema } from "~/jobs/embedding"; 3 | import type { JobOptions } from "~/jobs/schema"; 4 | 5 | export const VoyageBaseJobSchema = z.object({ 6 | provider: z.literal("voyage"), 7 | }); 8 | 9 | export const VoyageEmbeddingJobSchema = 10 | EmbeddingJobSchema.extend(VoyageBaseJobSchema); 11 | 12 | export const VoyageJobSchema = z.discriminatedUnion("type", [ 13 | VoyageEmbeddingJobSchema, 14 | ]); 15 | 16 | export type VoyageJob = z.infer; 17 | 18 | export function voyage(options?: JobOptions) { 19 | options = options || {}; 20 | options.apiKey = options.apiKey || process.env.VOYAGE_API_KEY; 21 | 22 | return { 23 | embedding(model: string) { 24 | return new VoyageEmbeddingJobBuilder(options, model); 25 | }, 26 | }; 27 | } 28 | 29 | export class VoyageEmbeddingJobBuilder extends EmbeddingJobBuilder { 30 | constructor(options: JobOptions, model: string) { 31 | super(model); 32 | this.provider = "voyage"; 33 | this.options = options; 34 | } 35 | 36 | makeRequest = () => { 37 | return new Request("https://api.voyageai.com/v1/embeddings", { 38 | method: "POST", 39 | headers: { 40 | Authorization: `Bearer ${this.options.apiKey}`, 41 | "Content-Type": "application/json", 42 | }, 43 | body: JSON.stringify({ 44 | model: this.input.model, 45 | input: this.input.value, 46 | }), 47 | }); 48 | }; 49 | 50 | handleResponse = async (response: Response) => { 51 | const json = await response.json(); 52 | return json; 53 | }; 54 | } 55 | -------------------------------------------------------------------------------- /test/__snapshots__/chat.test.ts.snap: -------------------------------------------------------------------------------- 1 | // Bun Snapshot v1, https://goo.gl/fbAQLP 2 | 3 | exports[`chat 1`] = ` 4 | { 5 | "body": "{"model":"claude-3-5-sonnet-20241022","messages":[{"role":"system","content":"you are a helpful assistant"},{"role":"user","content":"hi"}]}", 6 | "headers": { 7 | "anthropic-version": "2023-06-01", 8 | "content-type": "application/json", 9 | "x-api-key": "", 10 | }, 11 | "method": "POST", 12 | "url": "https://api.anthropic.com/v1/messages", 13 | } 14 | `; 15 | 16 | exports[`chat 2`] = ` 17 | { 18 | "body": "{"model":"llama3.2","messages":[{"role":"system","content":"you are a helpful assistant"},{"role":"user","content":"hi"}],"stream":false}", 19 | "headers": {}, 20 | "method": "POST", 21 | "url": "http://localhost:11434/api/chat", 22 | } 23 | `; 24 | 25 | exports[`chat 3`] = ` 26 | { 27 | "body": "{"messages":[{"role":"system","content":"you are a helpful assistant"},{"role":"user","content":"hi"}],"model":"gpt-4o-mini","temperature":0.5}", 28 | "headers": { 29 | "authorization": "Bearer ", 30 | "content-type": "application/json", 31 | }, 32 | "method": "POST", 33 | "url": "https://api.openai.com/v1/chat/completions", 34 | } 35 | `; 36 | 37 | exports[`stream 1`] = ` 38 | { 39 | "body": "{"model":"claude-3-5-sonnet-20241022","messages":[{"role":"system","content":"you are a helpful assistant"},{"role":"user","content":"hi"}]}", 40 | "headers": { 41 | "anthropic-version": "2023-06-01", 42 | "content-type": "application/json", 43 | "x-api-key": "", 44 | }, 45 | "method": "POST", 46 | "url": "https://api.anthropic.com/v1/messages", 47 | } 48 | `; 49 | 50 | exports[`stream 2`] = ` 51 | { 52 | "body": "{"model":"llama3.2","messages":[{"role":"system","content":"you are a helpful assistant"},{"role":"user","content":"hi"}],"stream":false}", 53 | "headers": {}, 54 | "method": "POST", 55 | "url": "http://localhost:11434/api/chat", 56 | } 57 | `; 58 | 59 | exports[`stream 3`] = ` 60 | { 61 | "body": "{"messages":[{"role":"system","content":"you are a helpful assistant"},{"role":"user","content":"hi"}],"model":"gpt-4o-mini","stream":true}", 62 | "headers": { 63 | "authorization": "Bearer ", 64 | "content-type": "application/json", 65 | }, 66 | "method": "POST", 67 | "url": "https://api.openai.com/v1/chat/completions", 68 | } 69 | `; 70 | 71 | exports[`dump 1`] = ` 72 | { 73 | "cost": undefined, 74 | "input": { 75 | "messages": [], 76 | "model": "claude-3-5-sonnet-20241022", 77 | }, 78 | "options": { 79 | "apiKey": "", 80 | }, 81 | "output": undefined, 82 | "performance": undefined, 83 | "provider": "anthropic", 84 | "type": "chat", 85 | "version": "0.3.0", 86 | } 87 | `; 88 | 89 | exports[`dump 2`] = ` 90 | { 91 | "cost": undefined, 92 | "input": { 93 | "messages": [], 94 | "model": "llama3.2", 95 | }, 96 | "options": {}, 97 | "output": undefined, 98 | "performance": undefined, 99 | "provider": "ollama", 100 | "type": "chat", 101 | "version": "0.3.0", 102 | } 103 | `; 104 | 105 | exports[`dump 3`] = ` 106 | { 107 | "cost": undefined, 108 | "input": { 109 | "messages": [], 110 | "model": "gpt-4o-mini", 111 | }, 112 | "options": { 113 | "apiKey": "", 114 | }, 115 | "output": undefined, 116 | "performance": undefined, 117 | "provider": "openai", 118 | "type": "chat", 119 | "version": "0.3.0", 120 | } 121 | `; 122 | 123 | exports[`json_object 1`] = ` 124 | { 125 | "body": "{"messages":[{"role":"user","content":"hi"}],"model":"gpt-4o-mini","response_format":{"type":"json_object"}}", 126 | "headers": { 127 | "authorization": "Bearer ", 128 | "content-type": "application/json", 129 | }, 130 | "method": "POST", 131 | "url": "https://api.openai.com/v1/chat/completions", 132 | } 133 | `; 134 | 135 | exports[`tool 1`] = ` 136 | { 137 | "body": "{"model":"claude-3-5-sonnet-20241022","messages":[{"role":"user","content":"What's the weather like in Boston today?"}],"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string"},"unit":{"oneOf":[{"enum":["celsius","fahrenheit"]},{"type":"null"}]}},"required":["location"]}}}]}", 138 | "headers": { 139 | "anthropic-version": "2023-06-01", 140 | "content-type": "application/json", 141 | "x-api-key": "", 142 | }, 143 | "method": "POST", 144 | "url": "https://api.anthropic.com/v1/messages", 145 | } 146 | `; 147 | 148 | exports[`tool 2`] = ` 149 | { 150 | "body": "{"model":"llama3.2","messages":[{"role":"user","content":"What's the weather like in Boston today?"}],"stream":false,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string"},"unit":{"oneOf":[{"enum":["celsius","fahrenheit"]},{"type":"null"}]}},"required":["location"]}}}]}", 151 | "headers": {}, 152 | "method": "POST", 153 | "url": "http://localhost:11434/api/chat", 154 | } 155 | `; 156 | 157 | exports[`tool 3`] = ` 158 | { 159 | "body": "{"messages":[{"role":"user","content":"What's the weather like in Boston today?"}],"model":"gpt-4o-mini","tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string"},"unit":{"oneOf":[{"enum":["celsius","fahrenheit"]},{"type":"null"}]}},"required":["location"]}}}]}", 160 | "headers": { 161 | "authorization": "Bearer ", 162 | "content-type": "application/json", 163 | }, 164 | "method": "POST", 165 | "url": "https://api.openai.com/v1/chat/completions", 166 | } 167 | `; 168 | 169 | exports[`jsonSchema 1`] = ` 170 | { 171 | "body": "{"model":"claude-3-5-sonnet-20241022","messages":[{"role":"user","content":"generate a person with name and age in json format"}]}", 172 | "headers": { 173 | "anthropic-version": "2023-06-01", 174 | "content-type": "application/json", 175 | "x-api-key": "", 176 | }, 177 | "method": "POST", 178 | "url": "https://api.anthropic.com/v1/messages", 179 | } 180 | `; 181 | 182 | exports[`jsonSchema 2`] = ` 183 | { 184 | "body": "{"model":"llama3.2","messages":[{"role":"user","content":"generate a person with name and age in json format"}],"stream":false}", 185 | "headers": {}, 186 | "method": "POST", 187 | "url": "http://localhost:11434/api/chat", 188 | } 189 | `; 190 | 191 | exports[`jsonSchema 3`] = ` 192 | { 193 | "body": "{"messages":[{"role":"user","content":"generate a person with name and age in json format"}],"model":"gpt-4o-mini","response_format":{"type":"json_schema","json_schema":{"name":"person","schema":{"type":"object","properties":{"name":{"type":"string"},"age":{"type":"number"}},"required":["name","age"]}}}}", 194 | "headers": { 195 | "authorization": "Bearer ", 196 | "content-type": "application/json", 197 | }, 198 | "method": "POST", 199 | "url": "https://api.openai.com/v1/chat/completions", 200 | } 201 | `; 202 | -------------------------------------------------------------------------------- /test/__snapshots__/embedding.test.ts.snap: -------------------------------------------------------------------------------- 1 | // Bun Snapshot v1, https://goo.gl/fbAQLP 2 | 3 | exports[`embeddings 1`] = ` 4 | { 5 | "body": "{"model":"nomic-embed-text","input":"hi"}", 6 | "headers": {}, 7 | "method": "POST", 8 | "url": "http://localhost:11434/api/embed", 9 | } 10 | `; 11 | 12 | exports[`embeddings 2`] = ` 13 | { 14 | "body": "{"model":"text-embedding-ada-002","input":"hi"}", 15 | "headers": { 16 | "authorization": "Bearer ", 17 | "content-type": "application/json", 18 | }, 19 | "method": "POST", 20 | "url": "https://api.openai.com/v1/embeddings", 21 | } 22 | `; 23 | 24 | exports[`embeddings 3`] = ` 25 | { 26 | "body": "{"model":"voyage-3-lite","input":"hi"}", 27 | "headers": { 28 | "authorization": "Bearer ", 29 | "content-type": "application/json", 30 | }, 31 | "method": "POST", 32 | "url": "https://api.voyageai.com/v1/embeddings", 33 | } 34 | `; 35 | 36 | exports[`dump 1`] = ` 37 | { 38 | "cost": undefined, 39 | "input": { 40 | "model": "nomic-embed-text", 41 | "value": "hi", 42 | }, 43 | "options": {}, 44 | "output": undefined, 45 | "performance": undefined, 46 | "provider": "ollama", 47 | "type": "embedding", 48 | "version": "0.3.0", 49 | } 50 | `; 51 | 52 | exports[`dump 2`] = ` 53 | { 54 | "cost": undefined, 55 | "input": { 56 | "model": "text-embedding-ada-002", 57 | "value": "hi", 58 | }, 59 | "options": { 60 | "apiKey": "", 61 | }, 62 | "output": undefined, 63 | "performance": undefined, 64 | "provider": "openai", 65 | "type": "embedding", 66 | "version": "0.3.0", 67 | } 68 | `; 69 | 70 | exports[`dump 3`] = ` 71 | { 72 | "cost": undefined, 73 | "input": { 74 | "model": "voyage-3-lite", 75 | "value": "hi", 76 | }, 77 | "options": { 78 | "apiKey": "", 79 | }, 80 | "output": undefined, 81 | "performance": undefined, 82 | "provider": "voyage", 83 | "type": "embedding", 84 | "version": "0.3.0", 85 | } 86 | `; 87 | -------------------------------------------------------------------------------- /test/__snapshots__/image.test.ts.snap: -------------------------------------------------------------------------------- 1 | // Bun Snapshot v1, https://goo.gl/fbAQLP 2 | 3 | exports[`image 1`] = ` 4 | { 5 | "body": "{"prompt":"A cute baby sea otter","num_images":3}", 6 | "headers": { 7 | "authorization": "Key ", 8 | "content-type": "application/json", 9 | }, 10 | "method": "POST", 11 | "url": "https://queue.fal.run/fal-ai/flux/dev", 12 | } 13 | `; 14 | 15 | exports[`image 2`] = ` 16 | { 17 | "body": "{"prompt":"A cute baby sea otter","model":"dall-e-2","n":3}", 18 | "headers": { 19 | "authorization": "Bearer ", 20 | "content-type": "application/json", 21 | }, 22 | "method": "POST", 23 | "url": "https://api.openai.com/v1/image/generations", 24 | } 25 | `; 26 | 27 | exports[`dump 1`] = ` 28 | { 29 | "cost": undefined, 30 | "input": { 31 | "model": "fal-ai/flux/dev", 32 | "n": 3, 33 | "prompt": "A cute baby sea otter", 34 | }, 35 | "options": { 36 | "apiKey": "", 37 | }, 38 | "output": undefined, 39 | "performance": undefined, 40 | "provider": "fal", 41 | "type": "image", 42 | "version": "0.3.0", 43 | } 44 | `; 45 | 46 | exports[`dump 2`] = ` 47 | { 48 | "cost": undefined, 49 | "input": { 50 | "model": "dall-e-2", 51 | "n": 3, 52 | "prompt": "A cute baby sea otter", 53 | }, 54 | "options": { 55 | "apiKey": "", 56 | }, 57 | "output": undefined, 58 | "performance": undefined, 59 | "provider": "openai", 60 | "type": "image", 61 | "version": "0.3.0", 62 | } 63 | `; 64 | -------------------------------------------------------------------------------- /test/__snapshots__/models.snap: -------------------------------------------------------------------------------- 1 | // Bun Snapshot v1, https://goo.gl/fbAQLP 2 | 3 | exports[`models 1`] = ` 4 | { 5 | "body": "", 6 | "headers": {}, 7 | "method": "GET", 8 | "url": "http://localhost:11434/api/tags", 9 | } 10 | `; 11 | 12 | exports[`models 2`] = ` 13 | { 14 | "body": "", 15 | "headers": { 16 | "authorization": "Bearer ", 17 | "content-type": "application/json", 18 | }, 19 | "method": "GET", 20 | "url": "https://api.openai.com/v1/models", 21 | } 22 | `; 23 | 24 | exports[`models 3`] = ` 25 | { 26 | "body": "", 27 | "headers": { 28 | "anthropic-version": "2023-06-01", 29 | "content-type": "application/json", 30 | "x-api-key": "", 31 | }, 32 | "method": "GET", 33 | "url": "https://api.anthropic.com/v1/models", 34 | } 35 | `; 36 | 37 | exports[`dump 1`] = ` 38 | { 39 | "options": {}, 40 | "params": {}, 41 | "provider": "ollama", 42 | "type": "models", 43 | "version": "0.2.4", 44 | } 45 | `; 46 | 47 | exports[`dump 2`] = ` 48 | { 49 | "options": { 50 | "apiKey": "", 51 | }, 52 | "params": {}, 53 | "provider": "openai", 54 | "type": "models", 55 | "version": "0.2.4", 56 | } 57 | `; 58 | 59 | exports[`dump 3`] = ` 60 | { 61 | "options": { 62 | "apiKey": "", 63 | }, 64 | "params": {}, 65 | "provider": "anthropic", 66 | "type": "models", 67 | "version": "0.2.4", 68 | } 69 | `; 70 | -------------------------------------------------------------------------------- /test/__snapshots__/models.test.ts.snap: -------------------------------------------------------------------------------- 1 | // Bun Snapshot v1, https://goo.gl/fbAQLP 2 | 3 | exports[`models 1`] = ` 4 | { 5 | "body": "", 6 | "headers": {}, 7 | "method": "GET", 8 | "url": "http://localhost:11434/api/tags", 9 | } 10 | `; 11 | 12 | exports[`models 2`] = ` 13 | { 14 | "body": "", 15 | "headers": { 16 | "authorization": "Bearer ", 17 | "content-type": "application/json", 18 | }, 19 | "method": "GET", 20 | "url": "https://api.openai.com/v1/models", 21 | } 22 | `; 23 | 24 | exports[`models 3`] = ` 25 | { 26 | "body": "", 27 | "headers": { 28 | "anthropic-version": "2023-06-01", 29 | "content-type": "application/json", 30 | "x-api-key": "", 31 | }, 32 | "method": "GET", 33 | "url": "https://api.anthropic.com/v1/models", 34 | } 35 | `; 36 | 37 | exports[`dump 1`] = ` 38 | { 39 | "cost": undefined, 40 | "input": {}, 41 | "options": {}, 42 | "output": undefined, 43 | "performance": undefined, 44 | "provider": "ollama", 45 | "type": "models", 46 | "version": "0.3.0", 47 | } 48 | `; 49 | 50 | exports[`dump 2`] = ` 51 | { 52 | "cost": undefined, 53 | "input": {}, 54 | "options": { 55 | "apiKey": "", 56 | }, 57 | "output": undefined, 58 | "performance": undefined, 59 | "provider": "openai", 60 | "type": "models", 61 | "version": "0.3.0", 62 | } 63 | `; 64 | 65 | exports[`dump 3`] = ` 66 | { 67 | "cost": undefined, 68 | "input": {}, 69 | "options": { 70 | "apiKey": "", 71 | }, 72 | "output": undefined, 73 | "performance": undefined, 74 | "provider": "anthropic", 75 | "type": "models", 76 | "version": "0.3.0", 77 | } 78 | `; 79 | -------------------------------------------------------------------------------- /test/chat.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "bun:test"; 2 | import { openai, ollama, system, user, tool, anthropic, load } from "../src"; 3 | import { z } from "zod"; 4 | import { requestObject } from "./utils"; 5 | 6 | function createJobs() { 7 | return [ 8 | anthropic({ apiKey: "" }).chat("claude-3-5-sonnet-20241022"), 9 | ollama().chat("llama3.2"), 10 | openai({ apiKey: "" }).chat("gpt-4o-mini"), 11 | ]; 12 | } 13 | 14 | test("chat", async () => { 15 | for (const job of createJobs()) { 16 | expect( 17 | await requestObject( 18 | job 19 | .messages([system("you are a helpful assistant"), user("hi")]) 20 | .temperature(0.5) 21 | .makeRequest() 22 | ) 23 | ).toMatchSnapshot(); 24 | } 25 | }); 26 | 27 | test("stream", async () => { 28 | for (const job of createJobs()) { 29 | expect( 30 | await requestObject( 31 | job 32 | .messages([system("you are a helpful assistant"), user("hi")]) 33 | .stream() 34 | .makeRequest() 35 | ) 36 | ).toMatchSnapshot(); 37 | } 38 | }); 39 | 40 | test("dump", () => { 41 | for (const job of createJobs()) { 42 | expect(job.dump()).toMatchSnapshot(); 43 | } 44 | }); 45 | 46 | test("load", async () => { 47 | for (const job of createJobs()) { 48 | const req1 = await requestObject(load(job.dump()).makeRequest()); 49 | const req2 = await requestObject(job.makeRequest()); 50 | expect(req1).toEqual(req2); 51 | } 52 | }); 53 | 54 | test("json_object", async () => { 55 | expect( 56 | await requestObject( 57 | openai({ apiKey: "" }) 58 | .chat("gpt-4o-mini") 59 | .messages([user("hi")]) 60 | .responseFormat({ type: "json_object" }) 61 | .makeRequest() 62 | ) 63 | ).toMatchSnapshot(); 64 | }); 65 | 66 | test("tool", async () => { 67 | const weatherTool = tool("get_current_weather") 68 | .description("Get the current weather in a given location") 69 | .parameters( 70 | z.object({ 71 | location: z.string(), 72 | unit: z.enum(["celsius", "fahrenheit"]).optional(), 73 | }) 74 | ); 75 | 76 | for (const job of createJobs()) { 77 | expect( 78 | await requestObject( 79 | job 80 | .tool(weatherTool) 81 | .messages([user("What's the weather like in Boston today?")]) 82 | .makeRequest() 83 | ) 84 | ).toMatchSnapshot(); 85 | } 86 | }); 87 | 88 | test("jsonSchema", async () => { 89 | const personSchema = z.object({ 90 | name: z.string(), 91 | age: z.number(), 92 | }); 93 | 94 | for (const job of createJobs()) { 95 | expect( 96 | await requestObject( 97 | job 98 | .messages([ 99 | user("generate a person with name and age in json format"), 100 | ]) 101 | .jsonSchema(personSchema, "person") 102 | .makeRequest() 103 | ) 104 | ).toMatchSnapshot(); 105 | } 106 | }); 107 | -------------------------------------------------------------------------------- /test/embedding.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "bun:test"; 2 | import { openai, ollama, voyage, load } from "../src"; 3 | import { requestObject } from "./utils"; 4 | 5 | function createJobs() { 6 | return [ 7 | ollama().embedding("nomic-embed-text").value("hi"), 8 | openai({ apiKey: "" }).embedding("text-embedding-ada-002").value("hi"), 9 | voyage({ apiKey: "" }).embedding("voyage-3-lite").value("hi"), 10 | ]; 11 | } 12 | 13 | test("embeddings", async () => { 14 | const jobs = createJobs(); 15 | for (const job of jobs) { 16 | expect(await requestObject(job.makeRequest())).toMatchSnapshot(); 17 | } 18 | }); 19 | 20 | test("dump", () => { 21 | const jobs = createJobs(); 22 | for (const job of jobs) { 23 | expect(job.dump()).toMatchSnapshot(); 24 | } 25 | }); 26 | 27 | test("load", async () => { 28 | const jobs = createJobs(); 29 | for (const job of jobs) { 30 | const req1 = await requestObject(load(job.dump()).makeRequest!()); 31 | const req2 = await requestObject(job.makeRequest()); 32 | expect(req1).toEqual(req2); 33 | } 34 | }); 35 | -------------------------------------------------------------------------------- /test/image.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "bun:test"; 2 | import { openai, fal, load } from "../src"; 3 | import { requestObject } from "./utils"; 4 | 5 | function createJobs() { 6 | // prettier-ignore 7 | return [ 8 | fal({ apiKey: "" }).image("fal-ai/flux/dev").n(3).prompt("A cute baby sea otter"), 9 | openai({ apiKey: "" }).image("dall-e-2").n(3).prompt("A cute baby sea otter"), 10 | ]; 11 | } 12 | 13 | test("image", async () => { 14 | const jobs = createJobs(); 15 | for (const job of jobs) { 16 | expect(await requestObject(job.makeRequest())).toMatchSnapshot(); 17 | } 18 | }); 19 | 20 | test("dump", () => { 21 | const jobs = createJobs(); 22 | for (const job of jobs) { 23 | expect(job.dump()).toMatchSnapshot(); 24 | } 25 | }); 26 | 27 | test("load", async () => { 28 | const jobs = createJobs(); 29 | for (const job of jobs) { 30 | const req1 = await requestObject(load(job.dump()).makeRequest!()); 31 | const req2 = await requestObject(job.makeRequest()); 32 | expect(req1).toEqual(req2); 33 | } 34 | }); 35 | -------------------------------------------------------------------------------- /test/models.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "bun:test"; 2 | import { openai, ollama, anthropic, load } from "../src"; 3 | import { requestObject } from "./utils"; 4 | 5 | function createJobs() { 6 | return [ 7 | ollama().models(), 8 | openai({ apiKey: "" }).models(), 9 | anthropic({ apiKey: "" }).models(), 10 | ]; 11 | } 12 | 13 | test("models", async () => { 14 | for (const job of createJobs()) { 15 | expect(await requestObject(job.makeRequest())).toMatchSnapshot(); 16 | } 17 | }); 18 | 19 | test("dump", () => { 20 | for (const job of createJobs()) { 21 | expect(job.dump()).toMatchSnapshot(); 22 | } 23 | }); 24 | 25 | test("load", async () => { 26 | for (const job of createJobs()) { 27 | const req1 = await requestObject(load(job.dump()).makeRequest!()); 28 | const req2 = await requestObject(job.makeRequest()); 29 | expect(req1).toEqual(req2); 30 | } 31 | }); 32 | -------------------------------------------------------------------------------- /test/utils.ts: -------------------------------------------------------------------------------- 1 | export async function requestObject(request: Request) { 2 | return { 3 | url: request.url, 4 | method: request.method, 5 | headers: Object.fromEntries(request.headers as any), 6 | body: await request.text(), 7 | }; 8 | } 9 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | // Enable latest features 4 | "lib": ["ESNext", "DOM"], 5 | "target": "ESNext", 6 | "module": "ESNext", 7 | "moduleDetection": "force", 8 | "jsx": "react-jsx", 9 | "allowJs": true, 10 | 11 | // Bundler mode 12 | "moduleResolution": "bundler", 13 | "allowImportingTsExtensions": true, 14 | "verbatimModuleSyntax": true, 15 | "noEmit": true, 16 | 17 | // Best practices 18 | "strict": true, 19 | "skipLibCheck": true, 20 | "noFallthroughCasesInSwitch": true, 21 | 22 | // Some stricter flags (disabled by default) 23 | "noUnusedLocals": false, 24 | "noUnusedParameters": false, 25 | "noPropertyAccessFromIndexSignature": false, 26 | 27 | "paths": { 28 | "~/*": ["./src/*"] 29 | }, 30 | }, 31 | "exclude": ["dist"] 32 | } 33 | --------------------------------------------------------------------------------