├── .cspell.yaml ├── .editorconfig ├── .env.example ├── .gitattributes ├── .gitignore ├── .husky └── pre-commit ├── .lintstagedrc ├── .node-version ├── .npmrc ├── .prettierignore ├── .prettierrc ├── .vscode └── launch.json ├── LICENSE ├── README.md ├── doc ├── ernie.md ├── hunyuan.md ├── minimax.md ├── qwen.md ├── spark.md └── vyro.md ├── eslintrc.config.cjs ├── package.json ├── samples ├── data │ ├── cat.png │ └── cat1.png ├── ernie │ ├── chat.ts │ ├── embeddings.ts │ └── stream.ts ├── gemini │ ├── chat │ │ ├── chat.ts │ │ └── stream.ts │ └── models │ │ ├── list.ts │ │ └── retrieve.ts ├── hunyuan │ ├── chat.ts │ └── stream.ts ├── minimax │ ├── audio │ │ └── speech.ts │ ├── chat │ │ ├── create.ts │ │ └── stream.ts │ └── embeddings.ts ├── qwen │ ├── chat │ │ ├── create.ts │ │ ├── stream.ts │ │ └── tool_calls.ts │ ├── chat_vl │ │ ├── create.ts │ │ └── image.ts │ ├── completions │ │ ├── create.ts │ │ └── stream.ts │ ├── embeddings.ts │ └── images │ │ └── generate.ts ├── shared.ts ├── spark │ ├── chat.ts │ ├── images.ts │ └── stream.ts └── vyro │ └── images │ ├── createVariation.ts │ ├── edit.ts │ ├── generate.ts │ ├── restoration.ts │ └── upscale.ts ├── src ├── ernie │ ├── index.ts │ ├── resources │ │ ├── chat │ │ │ ├── chat.ts │ │ │ ├── completions.ts │ │ │ └── index.ts │ │ ├── embeddings.ts │ │ └── index.ts │ └── util.ts ├── gemini │ ├── index.ts │ ├── resource.ts │ └── resources │ │ ├── chat │ │ ├── chat.ts │ │ ├── completions.ts │ │ └── index.ts │ │ ├── index.ts │ │ └── models.ts ├── hunyuan │ ├── index.ts │ ├── resource.ts │ └── resources │ │ ├── chat │ │ ├── chat.ts │ │ ├── completions.ts │ │ └── index.ts │ │ └── index.ts ├── index.ts ├── minimax │ ├── error.ts │ ├── index.ts │ └── resources │ │ ├── audio │ │ ├── audio.ts │ │ ├── index.ts │ │ └── speech.ts │ │ ├── chat │ │ ├── chat.ts │ │ ├── completions.ts │ │ └── index.ts │ │ ├── embeddings.ts │ │ └── index.ts ├── qwen │ ├── dashscope │ │ ├── index.ts │ │ ├── resolvers │ │ │ ├── chat.ts │ │ │ ├── completions.ts │ │ │ ├── embeddings.ts │ │ │ └── index.ts │ │ └── types │ │ │ ├── chat.ts │ │ │ ├── completions.ts │ │ │ ├── embeddings.ts │ │ │ ├── index.ts │ │ │ └── openai.ts │ ├── index.ts │ └── resources │ │ ├── chat │ │ ├── chat.ts │ │ ├── completions.ts │ │ └── index.ts │ │ ├── completions.ts │ │ ├── embeddings.ts │ │ ├── images.ts │ │ └── index.ts ├── resource.ts ├── shims │ └── node.ts ├── spark │ ├── index.ts │ ├── resource.ts │ └── resources │ │ ├── chat │ │ ├── chat.ts │ │ ├── completions.ts │ │ └── index.ts │ │ ├── images.ts │ │ └── index.ts ├── streaming.ts ├── util.ts └── vyro │ ├── index.ts │ ├── resource.ts │ └── resources │ ├── images.ts │ └── index.ts ├── tsconfig.json ├── tsconfig.node.json └── vite.config.mts /.cspell.yaml: -------------------------------------------------------------------------------- 1 | version: "0.2" 2 | ignorePaths: 3 | - archive 4 | - dist 5 | - node_modules 6 | dictionaries: 7 | - en_US 8 | - softwareTerms 9 | - bash 10 | - node 11 | - npm 12 | - html 13 | - css 14 | - typescript 15 | - filetypes 16 | words: 17 | - aigc 18 | - aistudio 19 | - baichuan 20 | - chatcompletion 21 | - chatglm 22 | - dashscope 23 | - embo 24 | - erniebot 25 | - hunyuan 26 | - inpaint 27 | - inpainted 28 | - jiti 29 | - lineart 30 | - logprobs 31 | - longcontext 32 | - openai 33 | - openpose 34 | - permissioning 35 | - postpublish 36 | - qwen 37 | - qwenai 38 | - sdxl 39 | - trivago 40 | - tsdoc 41 | - vyro 42 | - wanx 43 | - wechat 44 | - wechaty 45 | - Weightage 46 | - yiyan 47 | ignoreWords: 48 | - zhengxs 49 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # EditorConfig is awesome: https://EditorConfig.org 2 | 3 | root = true 4 | 5 | [*] 6 | charset = utf-8 7 | indent_size = 2 8 | indent_style = space 9 | indent_size = 2 10 | tab_width = 2 11 | continuation_indent_size = 4 # IntelliJ family IDEs 12 | insert_final_newline = true 13 | trim_trailing_whitespace = true 14 | 15 | [*.md] 16 | trim_trailing_whitespace = false 17 | 18 | [Makefile] 19 | indent_size = 4 20 | indent_style = tab 21 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # 百度飞桨平台分发的文心一言大模型 2 | # See https://aistudio.baidu.com/index/accessToken 3 | EB_API_KEY="xxxxxxxxxxxxxxxxxxxxxxxxxxxx" 4 | 5 | # 阿里通义千问大模型 6 | # See https://help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key 7 | QWEN_API_KEY="sk-xxxxxxxxxxxxxxxxxxxxx" 8 | 9 | # 讯飞星火认知大模型 10 | # See https://xinghuo.xfyun.cn/sparkapi 11 | SPARK_APP_ID="xxxxxx" 12 | SPARK_API_KEY="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" 13 | SPARK_API_SECRET="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" 14 | 15 | # 腾讯混元大模型 16 | # see https://console.cloud.tencent.com/cam/capi 17 | HUNYUAN_APP_ID="xxxxxx" 18 | HUNYUAN_SECRET_ID="AKIDxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" 19 | HUNYUAN_SECRET_KEY="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" 20 | 21 | # Imagine Art 22 | # see https://platform.imagine.art/dashboard 23 | VYRO_API_KEY="vk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" 24 | 25 | # Minimax 26 | # See https://api.minimax.chat/user-center/basic-information/interface-key 27 | MINIMAX_API_ORG="xxxxxxxx" 28 | MINIMAX_API_KEY="eyJhxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" 29 | 30 | # Google Gemini AI 31 | # Documentation: 32 | # https://ai.google.dev/tutorials/ai-studio_quickstart 33 | # https://makersuite.google.com/app/apikey 34 | GEMINI_API_KEY="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" 35 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | pnpm-lock.yaml merge=text 2 | shrinkwrap.yaml merge=binary 3 | npm-shrinkwrap.json merge=binary 4 | yarn.lock merge=binary 5 | 6 | *.json linguist-language=JSON-with-Comments 7 | 8 | * text=auto eol=lf 9 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_store 2 | .history 3 | 4 | node_modules 5 | 6 | # local env files 7 | .env.local 8 | .env.*.local 9 | 10 | # Log files 11 | npm-debug.log* 12 | yarn-debug.log* 13 | yarn-error.log* 14 | pnpm-debug.log* 15 | 16 | # Editor directories and files 17 | .idea 18 | # .vscode 19 | *.suo 20 | *.ntvs* 21 | *.njsproj 22 | *.sln 23 | *.sw? 24 | 25 | # build artifacts 26 | dist 27 | dist-* 28 | temp/ 29 | # doc/ 30 | 31 | .env 32 | .cache 33 | *.tsbuildinfo 34 | *.sqlite 35 | package/ 36 | 37 | yarn.lock 38 | package-lock.json 39 | -------------------------------------------------------------------------------- /.husky/pre-commit: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | . "$(dirname -- "$0")/_/husky.sh" 3 | 4 | npx lint-staged 5 | -------------------------------------------------------------------------------- /.lintstagedrc: -------------------------------------------------------------------------------- 1 | { 2 | "*.ts": [ 3 | "prettier --check" 4 | ], 5 | "*.md": [ 6 | "prettier --check" 7 | ] 8 | } 9 | -------------------------------------------------------------------------------- /.node-version: -------------------------------------------------------------------------------- 1 | 18.x.x 2 | -------------------------------------------------------------------------------- /.npmrc: -------------------------------------------------------------------------------- 1 | package-lock=false 2 | 3 | registry=https://registry.npmmirror.com 4 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | dist/ 2 | dist-*/ 3 | temp/ 4 | node_modules 5 | -------------------------------------------------------------------------------- /.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "semi": true, 3 | "singleQuote": true, 4 | "printWidth": 120, 5 | "arrowParens": "avoid", 6 | "trailingComma": "all", 7 | "plugins": [ 8 | "@trivago/prettier-plugin-sort-imports" 9 | ], 10 | "importOrder": [ 11 | "^node:(.*)$", 12 | "", 13 | "^@/(.*)$", 14 | "^[./]" 15 | ], 16 | "importOrderParserPlugins": [ 17 | "typescript", 18 | "explicitResourceManagement" 19 | ], 20 | "importOrderSeparation": true, 21 | "importOrderCaseInsensitive": true, 22 | "importOrderSortSpecifiers": true, 23 | "overrides": [ 24 | { 25 | "files": ".prettierrc", 26 | "options": { 27 | "parser": "json" 28 | } 29 | } 30 | ] 31 | } 32 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // 使用 IntelliSense 了解相关属性。 3 | // 悬停以查看现有属性的描述。 4 | // 欲了解更多信息,请访问: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "name": "Run Files", 9 | "type": "node", 10 | "request": "launch", 11 | "program": "${file}", 12 | "runtimeArgs": ["--no-warnings", "--require", "dotenv/config"], 13 | "runtimeExecutable": "tsx", 14 | "console": "integratedTerminal", 15 | "internalConsoleOptions": "neverOpen", 16 | "skipFiles": ["/**", "${workspaceFolder}/node_modules/**"] 17 | }, 18 | { 19 | "name": "Watch Files", 20 | "type": "node", 21 | "request": "launch", 22 | "program": "${file}", 23 | "runtimeArgs": ["--no-warnings", "--require", "dotenv/config", "--watch"], 24 | "runtimeExecutable": "tsx", 25 | "console": "integratedTerminal", 26 | "internalConsoleOptions": "neverOpen", 27 | "skipFiles": ["/**", "${workspaceFolder}/node_modules/**"] 28 | } 29 | ] 30 | } 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 XianSen Zheng 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 | 3 |

AI SDK

4 | 5 | 集成 百度文心一言,阿里通义千问,腾讯混元助手 和 讯飞星火认知 等国内大模型的 API,并且适配 OpenAI 的输入与输出。 6 | 7 | [![][npm-types-shield]][npm-types-link] 8 | [![][npm-release-shield]][npm-release-link] 9 | [![][npm-downloads-shield]][npm-downloads-link] 10 | [![][github-releasedate-shield]][github-releasedate-link]
11 | [![][github-contributors-shield]][github-contributors-link] 12 | [![][github-forks-shield]][github-forks-link] 13 | [![][github-stars-shield]][github-stars-link] 14 | [![][github-issues-shield]][github-issues-link] 15 | [![][github-license-shield]][github-license-link] 16 | 17 | [Report Bug][github-issues-link] · [Request Feature][github-issues-link] 18 | 19 | ![](https://raw.githubusercontent.com/andreasbm/readme/master/assets/lines/rainbow.png) 20 | 21 |
22 | 23 |
24 | 目录树 25 | 26 | #### TOC 27 | 28 | - [✨ 功能特性](#-功能特性) 29 | - [📖 使用文档](#-使用文档) 30 | - [📦 安装](#-安装) 31 | - [🔗 更多工具](#-更多工具) 32 | - [🤝 参与贡献](#-参与贡献) 33 | 34 |
35 | 36 |
37 | 38 | ## ✨ 功能特性 39 | 40 | - 🚀 **快速开始**: 简单易用,只需一次学习,即可快速接入各种大模型。 41 | - 💻 **多模型集成**: 集成了多种国内大模型,为用户提供丰富的对话模型选择,满足不同场景的需求。 42 | - 💎 **体验友好**: 无需深入学习,只需提供必要的 API Key,剩下的交给 SDK 处理。 43 | - 🔌 **完美适配**: 适配 OpenAI 的输入与输出格式,确保与其他模型的对话无缝对接。 44 | 45 |
46 | 47 | [![][back-to-top]](#readme-top) 48 | 49 |
50 | 51 | ## 📖 使用文档 52 | 53 | - [文心一言](./doc/ernie.md) 54 | - [通义千问](./doc/qwen.md) 55 | - [讯飞星火](./doc/spark.md) 56 | - [混元助手](./doc/hunyuan.md) 57 | - [Minimax](./doc/minimax.md) 58 | - [ImagineArt](./doc/vyro.md) 59 | 60 |
61 | 62 | [![][back-to-top]](#readme-top) 63 | 64 |
65 | 66 | ## 📦 安装 67 | 68 | 要安装 `@zhengxs/ai`,请运行以下命令: 69 | 70 | ```bash 71 | $ pnpm install @zhengxs/ai 72 | ``` 73 | 74 |
75 | 76 | [![][back-to-top]](#readme-top) 77 | 78 |
79 | 80 | ## 👋 使用 81 | 82 | 在这里获取你的 [accessToken](https://aistudio.baidu.com/index/accessToken) 值。 83 | 84 | ```ts 85 | import { ErnieAI } from '@zhengxs/ai'; 86 | 87 | const client = new ErnieAI({ 88 | apiKey: 'My API Key', // defaults to process.env["EB_API_KEY"] 89 | }); 90 | 91 | async function main() { 92 | const chatCompletion = await client.chat.completions.create({ 93 | model: 'ernie-bot-turbo', 94 | messages: [{ role: 'user', content: 'Say this is a test' }], 95 | }); 96 | } 97 | 98 | main(); 99 | ``` 100 | 101 | ### 支持流式 102 | 103 | 使用与 OpenAI 的 SDK 完全一致。 104 | 105 | ```ts 106 | import { ErnieAI } from '@zhengxs/ai'; 107 | 108 | const client = new ErnieAI(); 109 | 110 | async function main() { 111 | const stream = await client.chat.completions.create({ 112 | model: 'ernie-bot-turbo', 113 | messages: [{ role: 'user', content: 'Say this is a test' }], 114 | stream: true, 115 | }); 116 | 117 | for await (const chunk of stream) { 118 | process.stdout.write(chunk.choices[0]?.delta?.content || ''); 119 | } 120 | } 121 | 122 | main(); 123 | ``` 124 | 125 | ## ⌨️ 本地开发 126 | 127 | 可以使用 GitHub Codespaces 进行在线开发: 128 | 129 | [![][github-codespace-shield]][github-codespace-link] 130 | 131 | 或者使用以下命令进行本地开发: 132 | 133 | ```bash 134 | $ git clone https://github.com/zhengxs2018/ai.git 135 | $ cd ai 136 | $ pnpm install 137 | $ pnpm task 138 | ``` 139 | 140 | **注意** 可以使用 task 直接运行示例的 ts 文件,如 `pnpm task ai/samples/qwen/chat/create.ts`。 141 | 142 |
143 | 144 | [![][back-to-top]](#readme-top) 145 | 146 |
147 | 148 | ## 🔗 更多工具 149 | 150 | - **[🤖 wechaty-plugin-assistant](https://github.com/zhengxs2018/wechaty-plugin-assistant)** - 只需三步,就可以快速实现一个智能对话机器人。 151 | 152 |
153 | 154 | [![][back-to-top]](#readme-top) 155 | 156 |
157 | 158 | ## 🤝 参与贡献 159 | 160 | 我们非常欢迎各种形式的贡献。如果你对贡献代码感兴趣,可以查看我们的 GitHub [Issues][github-issues-link] 大展身手,向我们展示你的奇思妙想。 161 | 162 | [![][pr-welcome-shield]][pr-welcome-link] 163 | 164 | [![][github-contrib-shield]][github-contrib-link] 165 | 166 |
167 | 168 | [![][back-to-top]](#readme-top) 169 | 170 |
171 | 172 | ## 🕘 Star History 173 | 174 | [![Star History Chart](https://api.star-history.com/svg?repos=zhengxs2018/ai&type=Date)](https://star-history.com/#zhengxs2018/ai&Date) 175 | 176 |
177 | 178 | [![][back-to-top]](#readme-top) 179 | 180 |
181 | 182 | --- 183 | 184 | #### 📝 License 185 | 186 | Copyright © 2023 [zhengxs2018][profile-link].
187 | This project is [MIT](./LICENSE) licensed. 188 | 189 |
190 | 191 | [![][back-to-top]](#readme-top) 192 | 193 |
194 | 195 | [profile-link]: https://github.com/zhengxs2018 196 | [back-to-top]: https://img.shields.io/badge/-BACK_TO_TOP-black?style=flat-square 197 | [aliyun-dashscope-model-list]: https://help.aliyun.com/zh/dashscope/developer-reference/model-square/ 198 | [npm-release-shield]: https://img.shields.io/npm/v/@zhengxs/ai?color=369eff&labelColor=black&logo=npm&logoColor=white&style=flat-square 199 | [npm-release-link]: https://www.npmjs.com/package/@zhengxs/ai 200 | [npm-downloads-shield]: https://img.shields.io/npm/dt/@zhengxs/ai?labelColor=black&style=flat-square 201 | [npm-downloads-link]: https://www.npmjs.com/package/@zhengxs/ai 202 | [npm-types-shield]: https://img.shields.io/npm/types/@zhengxs/ai?labelColor=black&style=flat-square 203 | [npm-types-link]: https://www.npmjs.com/package/@zhengxs/ai 204 | [github-issues-link]: https://github.com/zhengxs2018/ai/issues 205 | [pr-welcome-shield]: https://img.shields.io/badge/%F0%9F%A4%AF%20PR%20WELCOME-%E2%86%92-ffcb47?labelColor=black&style=for-the-badge 206 | [pr-welcome-link]: https://github.com/zhengxs2018/ai/pulls 207 | [github-contrib-shield]: https://contrib.rocks/image?repo=zhengxs2018%2Fai 208 | [github-contrib-link]: https://github.com/zhengxs2018/ai/graphs/contributors 209 | [github-codespace-shield]: https://github.com/codespaces/badge.svg 210 | [github-codespace-link]: https://codespaces.new/zhengxs2018/ai 211 | [npm-release-shield]: https://img.shields.io/npm/v/@zhengxs/ai?color=369eff&labelColor=black&logo=npm&logoColor=white&style=flat-square 212 | [npm-release-link]: https://www.npmjs.com/package/@zhengxs/ai 213 | [github-releasedate-shield]: https://img.shields.io/github/release-date/zhengxs2018/ai?labelColor=black&style=flat-square 214 | [github-releasedate-link]: https://github.com/zhengxs2018/ai/releases 215 | [github-contributors-shield]: https://img.shields.io/github/contributors/zhengxs2018/ai?color=c4f042&labelColor=black&style=flat-square 216 | [github-contributors-link]: https://github.com/zhengxs2018/ai/graphs/contributors 217 | [github-forks-shield]: https://img.shields.io/github/forks/zhengxs2018/ai?color=8ae8ff&labelColor=black&style=flat-square 218 | [github-forks-link]: https://github.com/zhengxs2018/ai/network/members 219 | [github-stars-shield]: https://img.shields.io/github/stars/zhengxs2018/ai?color=ffcb47&labelColor=black&style=flat-square 220 | [github-stars-link]: https://github.com/zhengxs2018/ai/network/stargazers 221 | [github-issues-shield]: https://img.shields.io/github/issues/zhengxs2018/ai?color=ff80eb&labelColor=black&style=flat-square 222 | [github-issues-link]: https://github.com/zhengxs2018/ai/issues 223 | [github-license-shield]: https://img.shields.io/github/license/zhengxs2018/ai?color=white&labelColor=black&style=flat-square 224 | [github-license-link]: https://github.com/zhengxs2018/ai/blob/main/LICENSE 225 | -------------------------------------------------------------------------------- /doc/ernie.md: -------------------------------------------------------------------------------- 1 | # 文心一言 2 | 3 | 在这里获取你的 [accessToken](https://aistudio.baidu.com/index/accessToken) 值。 4 | 5 | ## 通用对话 6 | 7 | 当前支持 `ernie-bot | ernie-bot-turbo | ernie-bot-4 | ernie-bot-8k` 模型。 8 | 9 | ```ts 10 | import { ErnieAI } from '@zhengxs/ai'; 11 | 12 | const client = new ErnieAI({ 13 | apiKey: 'My API Key', // defaults to process.env["EB_API_KEY"] 14 | }); 15 | 16 | async function main() { 17 | const chatCompletion = await client.chat.completions.create({ 18 | model: 'ernie-bot-turbo', 19 | messages: [{ role: 'user', content: 'Say this is a test' }], 20 | }); 21 | } 22 | 23 | main(); 24 | ``` 25 | 26 | ### 支持流式 27 | 28 | ```ts 29 | import { ErnieAI } from '@zhengxs/ai'; 30 | 31 | const client = new ErnieAI(); 32 | 33 | async function main() { 34 | const stream = await client.chat.completions.create({ 35 | model: 'ernie-bot-turbo', 36 | messages: [{ role: 'user', content: 'Say this is a test' }], 37 | stream: true, 38 | }); 39 | 40 | for await (const chunk of stream) { 41 | process.stdout.write(chunk.choices[0]?.delta?.content || ''); 42 | } 43 | } 44 | 45 | main(); 46 | ``` 47 | 48 | ## 语义向量 49 | 50 | 仅支持 `ernie-text-embedding` 模型。 51 | 52 | ```ts 53 | import { ErnieAI } from '@zhengxs/ai'; 54 | 55 | const client = new ErnieAI(); 56 | 57 | async function main() { 58 | const response = await client.embeddings.create({ 59 | model: 'ernie-text-embedding', 60 | input: 'Your text string goes here', 61 | }); 62 | 63 | console.log(response.data[0].embedding); 64 | } 65 | 66 | main(); 67 | ``` 68 | -------------------------------------------------------------------------------- /doc/hunyuan.md: -------------------------------------------------------------------------------- 1 | # 混元助手 2 | 3 | 在这里获取你的 [API密钥](https://console.cloud.tencent.com/cam/capi) 。 4 | 5 | ## 通用对话 6 | 7 | 当前支持 `hunyuan` 模型。 8 | 9 | ```ts 10 | import { HunYuanAI } from '@zhengxs/ai'; 11 | 12 | const client = new HunYuanAI({ 13 | appId: 'My APP ID', // defaults to process.env["HUNYUAN_APP_ID"] 14 | secretId: 'My Secret ID', // defaults to process.env["HUNYUAN_SECRET_ID"] 15 | secretKey: 'My Secret Key', // defaults to process.env["HUNYUAN_SECRET_KEY"] 16 | }); 17 | 18 | async function main() { 19 | const chatCompletion = await client.chat.completions.create({ 20 | model: 'hunyuan', 21 | messages: [{ role: 'user', content: 'Say this is a test' }], 22 | }); 23 | } 24 | 25 | main(); 26 | ``` 27 | 28 | ### 支持流式 29 | 30 | ```ts 31 | import { HunYuanAI } from '@zhengxs/ai'; 32 | 33 | const client = new HunYuanAI(); 34 | 35 | async function main() { 36 | const stream = await client.chat.completions.create({ 37 | model: 'hunyuan', 38 | messages: [{ role: 'user', content: 'Say this is a test' }], 39 | stream: true, 40 | }); 41 | 42 | for await (const chunk of stream) { 43 | process.stdout.write(chunk.choices[0]?.delta?.content || ''); 44 | } 45 | } 46 | 47 | main(); 48 | ``` 49 | -------------------------------------------------------------------------------- /doc/minimax.md: -------------------------------------------------------------------------------- 1 | # Minimax 2 | 3 | 在这里获取你的 [apiKey](https://api.minimax.chat/user-center/basic-information/interface-key) 。 4 | 5 | ## 通用对话 6 | 7 | 当前支持 `abab5-chat | abab5.5-chat | abab5-chat-pro` 模型。 8 | 9 | ```ts 10 | import { MinimaxAI } from '@zhengxs/ai'; 11 | 12 | const client = new MinimaxAI({ 13 | orgId: 'My API ORG', // defaults to process.env["MINIMAX_API_ORG"] 14 | apiKey: 'My API Key', // defaults to process.env["MINIMAX_API_KEY"] 15 | }); 16 | 17 | async function main() { 18 | const chatCompletion = await client.chat.completions.create({ 19 | model: 'abab5-chat', 20 | messages: [{ role: 'user', content: 'Say this is a test' }], 21 | }); 22 | } 23 | 24 | main(); 25 | ``` 26 | 27 | ### 支持流式 28 | 29 | ```ts 30 | import { MinimaxAI } from '@zhengxs/ai'; 31 | 32 | const client = new MinimaxAI(); 33 | 34 | async function main() { 35 | const stream = await client.chat.completions.create({ 36 | model: 'abab5-chat', 37 | messages: [{ role: 'user', content: 'Say this is a test' }], 38 | stream: true, 39 | }); 40 | 41 | for await (const chunk of stream) { 42 | process.stdout.write(chunk.choices[0]?.delta?.content || ''); 43 | } 44 | } 45 | 46 | main(); 47 | ``` 48 | 49 | ## 语义向量 50 | 51 | 目前仅支持 `embo-01` 模型 52 | 53 | > [!IMPORTANT] 54 | > MiniMax 使用时需要区分是用于 `db` 存储,还是 `query` 查询,默认是 `query`。 55 | 56 | ```ts 57 | import { MinimaxAI } from '@zhengxs/ai'; 58 | 59 | const client = new MinimaxAI(); 60 | 61 | async function main() { 62 | const response = await client.embeddings.create({ 63 | model: 'embo-01', 64 | type: 'query', // Default query 65 | input: 'Your text string goes here', 66 | }); 67 | 68 | console.log(response.data[0].embedding); 69 | } 70 | 71 | main(); 72 | ``` 73 | 74 | ## 语音合成 75 | 76 | 目前仅支持 `embo-01` 模型 77 | 78 | > [!IMPORTANT] 79 | > MiniMax 使用时需要区分是用于 `db` 存储,还是 `query` 查询,默认是 `query`。 80 | 81 | ```ts 82 | import { createWriteStream } from 'node:fs'; 83 | import path from 'node:path'; 84 | import { Readable } from 'node:stream'; 85 | 86 | import { MinimaxAI } from '@zhengxs/ai'; 87 | 88 | const client = new MinimaxAI(); 89 | 90 | async function main() { 91 | const response = await ai.audio.speech.create({ 92 | model: 'speech-01', 93 | input: '推荐一些美食', 94 | voice: 'male-qn-qingse', 95 | }); 96 | 97 | // 文件保存路径 98 | const savePath = path.resolve('path/to/audio.mp3'); 99 | 100 | // 写入文件内容 101 | Readable.fromWeb(data).pipe(createWriteStream(savePath)); 102 | } 103 | 104 | main(); 105 | ``` 106 | -------------------------------------------------------------------------------- /doc/qwen.md: -------------------------------------------------------------------------------- 1 | # 通义千问 2 | 3 | 在这里获取你的 4 | [apiKey](https://help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key) 5 | 值。 6 | 7 | ## 通用对话 8 | 9 | 当前支持大部分千问,以及其他系列的同类模型。 10 | 11 | ```ts 12 | import { QWenAI } from '@zhengxs/ai'; 13 | 14 | const client = new QWenAI({ 15 | apiKey: 'My API Key', // defaults to process.env["QWEN_API_KEY"] 16 | }); 17 | 18 | async function main() { 19 | const chatCompletion = await client.chat.completions.create({ 20 | model: 'qwen-turbo', 21 | messages: [{ role: 'user', content: 'Say this is a test' }], 22 | }); 23 | } 24 | 25 | main(); 26 | ``` 27 | 28 | ### 支持流式 29 | 30 | ```ts 31 | import { QWenAI } from '@zhengxs/ai'; 32 | 33 | const client = new QWenAI(); 34 | 35 | async function main() { 36 | const stream = await client.chat.completions.create({ 37 | model: 'qwen-turbo', 38 | messages: [{ role: 'user', content: 'Say this is a test' }], 39 | stream: true, 40 | }); 41 | 42 | for await (const chunk of stream) { 43 | process.stdout.write(chunk.choices[0]?.delta?.content || ''); 44 | } 45 | } 46 | 47 | main(); 48 | ``` 49 | 50 | ## 通义千问 VL 51 | 52 | [通义千问VL](https://help.aliyun.com/zh/dashscope/developer-reference/qwen-vl-plus) 开源视觉理解大模型Qwen-VL于2023年12月1日发布重大更新,不仅大幅提升通用OCR、视觉推理、中文文本理解基础能力,还能处理各种分辨率和规格的图像,甚至能“看图做题”。 53 | 54 | ```js 55 | import { createWriteStream } from 'node:fs'; 56 | import path from 'node:path'; 57 | 58 | import { QWenAI } from '@zhengxs/ai'; 59 | 60 | const client = new QWenAI(); 61 | 62 | async function main() { 63 | const chatCompletion = await client.chat.completions.create({ 64 | model: 'qwen-vl-plus', 65 | messages: [ 66 | { 67 | role: 'user', 68 | content: '你好', 69 | }, 70 | ], 71 | }); 72 | 73 | console.log(chatCompletion.choices[0].message.content); 74 | } 75 | 76 | main(); 77 | ``` 78 | 79 | ### 携带图片 80 | 81 | 你可以使用数组携带图片和文字 82 | 83 | > [!WARNING] 84 | > 目前发现携带的图片如果无法访问,会直接抛 400 错误 85 | 86 | ```js 87 | import { createWriteStream } from 'node:fs'; 88 | import path from 'node:path'; 89 | 90 | import { QWenAI } from '@zhengxs/ai'; 91 | 92 | const client = new QWenAI(); 93 | 94 | async function main() { 95 | const chatCompletion = await client.chat.completions.create({ 96 | model: 'qwen-vl-plus', 97 | messages: [ 98 | { 99 | role: 'user', 100 | content: [ 101 | { 102 | type: 'image_url', 103 | image_url: { url: 'https://www.baidu.com/img/PCfb_5bf082d29588c07f842ccde3f97243ea.png' }, 104 | }, 105 | { type: 'text', text: '这是什么图?' }, 106 | ], 107 | }, 108 | ], 109 | }); 110 | 111 | console.log(chatCompletion.choices[0].message.content); 112 | //=> "这是一张百度的logo图片。" 113 | } 114 | 115 | main(); 116 | ``` 117 | 118 | ## 语义向量 119 | 120 | 支持 `text-embedding-v1` 和 `text-embedding-v2` 模型。 121 | 122 | ```ts 123 | import { QWenAI } from '@zhengxs/ai'; 124 | 125 | const client = new QWenAI(); 126 | 127 | const embedding = await client.embeddings.create({ 128 | model: 'text-embedding-v1', 129 | input: ['推荐一些美食', '给我讲个故事'], 130 | }); 131 | 132 | console.log(embedding.data); 133 | ``` 134 | 135 | ## 文生图 136 | 137 | 支持 `wanx-v1`, `stable-diffusion-v1.5` 和 `stable-diffusion-xl` 模型; 138 | 139 | ```js 140 | import path from 'node:path'; 141 | 142 | import { QWenAI } from '@zhengxs/ai'; 143 | 144 | const client = new QWenAI(); 145 | 146 | async function main() { 147 | const images = await client.images.generate({ 148 | prompt: 'cat', 149 | }); 150 | 151 | for (const image of images.data) { 152 | console.log(image.url); 153 | } 154 | } 155 | 156 | main(); 157 | ``` 158 | -------------------------------------------------------------------------------- /doc/spark.md: -------------------------------------------------------------------------------- 1 | # 讯飞星火 2 | 3 | 通过 [星火API](https://xinghuo.xfyun.cn/sparkapi) 申请服务,进入 **服务管理 -> 模型版本首页**,通过模型首页拿到 **服务接口认证信息**。 4 | 5 | > [!IMPORTANT] 6 | > 注意:不同模型的 **服务接口认证信息** 是独立的。 7 | 8 | ## 在 Node.js 中使用 9 | 10 | 星火模型使用的是 **WebSocket** 技术,而 SDK 为了适配 Edge 环境,使用原生的 11 | [globalThis.WebSocket][mdn:WebSocket] 对象. 12 | 13 | 所以在 Node.js 环境,需要安装使用 [ws][npm:ws] 模块填充当前环境。 14 | 15 | ### 安装前置依赖 16 | 17 | ```sh 18 | # With NPM 19 | $ npm i -S ws 20 | 21 | # With YARN 22 | $ yarn add ws 23 | 24 | # With PNPM 25 | $ pnpm add ws 26 | ``` 27 | 28 | ### 填充到全局 29 | 30 | ```js 31 | import { WebSocket } from 'ws'; 32 | 33 | // 需要在 new SparkAI 之前调用 34 | globalThis.WebSocket = WebSocket; 35 | ``` 36 | 37 | ## 通用对话 38 | 39 | 当前支持 `spark-1.5 | spark-2 | spark-3` 模型。 40 | 41 | ```ts 42 | import { SparkAI } from '@zhengxs/ai'; 43 | 44 | const client = new SparkAI({ 45 | appId: 'My App ID', // defaults to process.env["SPARK_APP_ID"] 46 | apiKey: 'My API Key', // defaults to process.env["SPARK_API_KEY"] 47 | apiSecret: 'My API Secret', // defaults to process.env["SPARK_API_SECRET"] 48 | }); 49 | 50 | async function main() { 51 | const chatCompletion = await client.chat.completions.create({ 52 | model: 'spark-1.5', 53 | messages: [{ role: 'user', content: 'Say this is a test' }], 54 | }); 55 | } 56 | 57 | main(); 58 | ``` 59 | 60 | ### 支持流式 61 | 62 | ```ts 63 | import { SparkAI } from '@zhengxs/ai'; 64 | 65 | const client = new SparkAI(); 66 | 67 | async function main() { 68 | const stream = await client.chat.completions.create({ 69 | model: 'spark-1.5', 70 | messages: [{ role: 'user', content: 'Say this is a test' }], 71 | stream: true, 72 | }); 73 | 74 | for await (const chunk of stream) { 75 | process.stdout.write(chunk.choices[0]?.delta?.content || ''); 76 | } 77 | } 78 | 79 | main(); 80 | ``` 81 | 82 | ## 图片生成 83 | 84 | 因为没有资格,虽然有实现,但无法测试。 85 | 86 | [mdn:WebSocket]: https://developer.mozilla.org/zh-CN/docs/Web/API/WebSocket 87 | [npm:ws]: https://github.com/websockets/ws 88 | -------------------------------------------------------------------------------- /doc/vyro.md: -------------------------------------------------------------------------------- 1 | # Vyro 2 | 3 | 可以通过其自家产品 [imagine.art](https://www.imagine.art) 体验,在 [这里](https://platform.imagine.art/dashboard) 获取你的 API Key。 4 | 5 | > [!TIP] 6 | > 此服务不支持聊天 7 | 8 | ## 文生图 9 | 10 | 支持 `imagine-v5 | anime-v5 | imagine-v4.1 | imagine-v4 | imagine-v3 | imagine-v1 | realistic | anime | portrait | sdxl-1.0` 模型; 11 | 12 | ```js 13 | import path from 'node:path'; 14 | import { Readable } from 'node:stream'; 15 | import { createWriteStream } from 'node:fs'; 16 | 17 | import { VYroAI } from '@zhengxs/ai'; 18 | 19 | const client = new VYroAI({ 20 | apiKey: 'My API Key', // defaults to process.env["VYRO_API_KEY"] 21 | }); 22 | 23 | async function main() { 24 | const image = await client.images.generate({ 25 | model: 'imagine-v5', 26 | prompt: 'cat', 27 | }); 28 | 29 | const data = image.data[0].binary! 30 | 31 | // 图片保存路径 32 | const savePath = path.resolve('path/to/file.png'); 33 | 34 | // 写入文件内容 35 | Readable.fromWeb(data).pipe(createWriteStream(savePath)); 36 | } 37 | 38 | main(); 39 | ``` 40 | 41 | ### 在 Edge 环境中使用 42 | 43 | 以 Next.js 为例 44 | 45 | ```js 46 | // src/app/api/images/generate.ts 47 | 48 | export async function POST(req: Request) { 49 | const { prompt } = await req.json(); 50 | 51 | const response = await client.images.generate({ 52 | model: 'imagine-v5', 53 | prompt: prompt, 54 | }); 55 | 56 | const image = response.data[0].binary!; 57 | 58 | // Respond with the stream 59 | return new Response(image as globalThis.ReadableStream, { 60 | headers: { 61 | 'Content-Type': 'image/png', 62 | }, 63 | }); 64 | } 65 | ``` 66 | 67 | 在浏览器中使用 68 | 69 | ```js 70 | const blob = await fetch('http://localhost:3000/api/images/generate', { 71 | method: 'POST', 72 | headers: { 73 | accept: '*/*', 74 | 'content-type': 'application/json;', 75 | }, 76 | body: JSON.stringify({ prompt: 'cat' }), 77 | }).then(response => response.blob()); 78 | 79 | // 转为 image url 80 | const imageUrl = URL.createObjectURL(blob); 81 | 82 | // 创建或查询 img 元素 83 | const imageElement = document.createElement('img'); 84 | 85 | // 显示图片 86 | imageElement.src = imageUrl; 87 | 88 | document.body.prepend(imageElement); 89 | ``` 90 | -------------------------------------------------------------------------------- /eslintrc.config.cjs: -------------------------------------------------------------------------------- 1 | // @ts-check 2 | const { defineConfig } = require('eslint-define-config'); 3 | 4 | module.exports = defineConfig({ 5 | root: true, 6 | parserOptions: { 7 | sourceType: 'module', 8 | ecmaVersion: 'latest', 9 | }, 10 | overrides: [ 11 | { 12 | extends: ['plugin:@typescript-eslint/recommended'], 13 | plugins: ['@typescript-eslint', 'import', 'tsdoc'], 14 | parser: '@typescript-eslint/parser', 15 | parserOptions: { 16 | ecmaVersion: 'latest', 17 | sourceType: 'module', 18 | }, 19 | files: ['*.ts'], 20 | rules: { 21 | '@typescript-eslint/ban-ts-comment': 'off', 22 | '@typescript-eslint/no-non-null-assertion': 'off', 23 | '@typescript-eslint/no-explicit-any': 'off', 24 | '@typescript-eslint/no-namespace': 'off', 25 | 'tsdoc/syntax': 'warn', 26 | }, 27 | }, 28 | { 29 | files: ['test', '__test__', '*.{spec,test}.ts'], 30 | rules: { 31 | '@typescript-eslint/no-var-requires': 'off', 32 | 'tsdoc/syntax': 'off', 33 | }, 34 | }, 35 | ], 36 | ignorePatterns: ['dist'], 37 | }); 38 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@zhengxs/ai", 3 | "version": "0.6.2", 4 | "description": "llm sdk", 5 | "keywords": [ 6 | "ai", 7 | "llm", 8 | "qwen", 9 | "ernie-bot", 10 | "hunyuan", 11 | "minimax" 12 | ], 13 | "author": "zhengxs2018 ", 14 | "homepage": "https://github.com/zhengxs2018/ai#readme", 15 | "repository": { 16 | "type": "git", 17 | "url": "https://github.com/zhengxs2018/ai" 18 | }, 19 | "bugs": { 20 | "url": "https://github.com/zhengxs2018/ai/issues" 21 | }, 22 | "license": "MIT", 23 | "type": "commonjs", 24 | "main": "./dist/index.cjs", 25 | "module": "./dist/index.mjs", 26 | "types": "./dist-types/index.d.ts", 27 | "sideEffects": [ 28 | "./dist/shims/node.cjs", 29 | "./dist/shims/node.mjs" 30 | ], 31 | "exports": { 32 | "./package.json": "./package.json", 33 | "./shims/node": { 34 | "import": "./dist/shims/node.mjs", 35 | "require": "./dist/shims/node.cjs", 36 | "types": "./dist-types/shims/node.d.ts" 37 | }, 38 | ".": { 39 | "import": "./dist/index.mjs", 40 | "require": "./dist/index.cjs", 41 | "types": "./dist-types/index.d.ts" 42 | } 43 | }, 44 | "typesVersions": { 45 | "*": { 46 | "*": [ 47 | "./dist-types/*", 48 | "./dist-types/index.d.ts" 49 | ] 50 | } 51 | }, 52 | "files": [ 53 | "dist", 54 | "dist-types" 55 | ], 56 | "publishConfig": { 57 | "access": "public", 58 | "directory": "package", 59 | "registry": "https://registry.npmjs.org" 60 | }, 61 | "clean-publish": { 62 | "withoutPublish": true, 63 | "tempDir": "package" 64 | }, 65 | "scripts": { 66 | "prepare": "husky install", 67 | "task": "tsx --no-warnings -r dotenv/config", 68 | "inspect": "tsx --no-warnings --inspect -r dotenv/config", 69 | "build": "vite build", 70 | "lint": "eslint src samples vite.config.mts", 71 | "lint:fix": "eslint src samples vite.config.mts --fix --fix-type [problem,suggestion]", 72 | "fmt": "prettier src samples vite.config.mts doc README.md --log-level warn", 73 | "prepublishOnly": "pnpm build && rm -rf ./package && clean-publish", 74 | "postpublish": "rm -rf ./package" 75 | }, 76 | "engines": { 77 | "node": ">=18.0.0" 78 | }, 79 | "packageManager": "pnpm@9.1.2", 80 | "optionalDependencies": { 81 | "ws": "^8.17.0" 82 | }, 83 | "dependencies": { 84 | "openai": "^4.47.1" 85 | }, 86 | "devDependencies": { 87 | "@trivago/prettier-plugin-sort-imports": "^4.3.0", 88 | "@types/node": "^20.12.12", 89 | "@types/ws": "^8.5.10", 90 | "@typescript-eslint/eslint-plugin": "^7.10.0", 91 | "@typescript-eslint/parser": "^7.10.0", 92 | "clean-publish": "^5.0.0", 93 | "dotenv": "^16.4.5", 94 | "eslint": "^9.3.0", 95 | "eslint-config-prettier": "^9.1.0", 96 | "eslint-define-config": "^2.1.0", 97 | "eslint-plugin-import": "^2.29.1", 98 | "eslint-plugin-prettier": "^5.1.3", 99 | "eslint-plugin-tsdoc": "^0.2.17", 100 | "husky": "^9.0.11", 101 | "lint-staged": "^15.2.4", 102 | "prettier": "^3.2.5", 103 | "tsx": "^4.11.0", 104 | "typescript": "~5.4.5", 105 | "vite": "^5.2.11", 106 | "vite-plugin-checker": "^0.6.4", 107 | "vite-plugin-dts": "^3.9.1", 108 | "vite-plugin-externalize-deps": "^0.8.0", 109 | "ws": "^8.17.0" 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /samples/data/cat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhengxs2018/ai/286a1fda20af3eeaf130ff51cd0e611e6b3760f9/samples/data/cat.png -------------------------------------------------------------------------------- /samples/data/cat1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhengxs2018/ai/286a1fda20af3eeaf130ff51cd0e611e6b3760f9/samples/data/cat1.png -------------------------------------------------------------------------------- /samples/ernie/chat.ts: -------------------------------------------------------------------------------- 1 | import { ErnieAI } from '../../src'; 2 | 3 | const ernie = new ErnieAI(); 4 | 5 | async function main() { 6 | const chatCompletion = await ernie.chat.completions.create({ 7 | model: 'ernie-bot', 8 | messages: [{ role: 'user', content: 'Say this is a test' }], 9 | }); 10 | 11 | console.log(chatCompletion.choices[0].message.content); 12 | } 13 | 14 | main(); 15 | -------------------------------------------------------------------------------- /samples/ernie/embeddings.ts: -------------------------------------------------------------------------------- 1 | import { ErnieAI } from '../../src'; 2 | 3 | const ernie = new ErnieAI(); 4 | 5 | async function main() { 6 | const embedding = await ernie.embeddings.create({ 7 | model: 'ernie-text-embedding', 8 | input: ['推荐一些美食', '给我讲个故事'], 9 | }); 10 | 11 | console.log(embedding); 12 | } 13 | 14 | main(); 15 | -------------------------------------------------------------------------------- /samples/ernie/stream.ts: -------------------------------------------------------------------------------- 1 | import { ErnieAI } from '../../src'; 2 | 3 | const ernie = new ErnieAI(); 4 | 5 | async function main() { 6 | const stream = await ernie.chat.completions.create({ 7 | stream: true, 8 | model: 'ernie-bot', 9 | messages: [{ role: 'user', content: 'Say this is a test' }], 10 | }); 11 | 12 | for await (const chunk of stream) { 13 | console.log(chunk.choices[0]?.delta?.content || ''); 14 | } 15 | } 16 | 17 | main(); 18 | -------------------------------------------------------------------------------- /samples/gemini/chat/chat.ts: -------------------------------------------------------------------------------- 1 | import { GeminiAI } from '../../../src'; 2 | 3 | const ai = new GeminiAI(); 4 | 5 | async function main() { 6 | const chatCompletion = await ai.chat.completions.create({ 7 | model: 'gemini-pro', 8 | messages: [{ role: 'user', content: 'Say this is a test' }], 9 | }); 10 | 11 | console.log(chatCompletion.choices[0].message.content); 12 | } 13 | 14 | main(); 15 | -------------------------------------------------------------------------------- /samples/gemini/chat/stream.ts: -------------------------------------------------------------------------------- 1 | import { GeminiAI } from '../../../src'; 2 | 3 | const client = new GeminiAI(); 4 | 5 | async function main() { 6 | const stream = await client.chat.completions.create({ 7 | stream: true, 8 | model: 'gemini-pro', 9 | messages: [{ role: 'user', content: 'Say this is a test' }], 10 | }); 11 | 12 | for await (const chunk of stream) { 13 | console.log(chunk.choices[0]?.delta?.content || ''); 14 | } 15 | } 16 | 17 | main(); 18 | -------------------------------------------------------------------------------- /samples/gemini/models/list.ts: -------------------------------------------------------------------------------- 1 | import { GeminiAI } from '../../../src'; 2 | 3 | const ai = new GeminiAI(); 4 | 5 | async function main() { 6 | const list = await ai.models.list(); 7 | 8 | for await (const model of list) { 9 | console.log(model); 10 | } 11 | } 12 | 13 | main(); 14 | -------------------------------------------------------------------------------- /samples/gemini/models/retrieve.ts: -------------------------------------------------------------------------------- 1 | import { GeminiAI } from '../../../src'; 2 | 3 | const ai = new GeminiAI(); 4 | 5 | async function main() { 6 | const model = await ai.models.retrieve('gemini-pro'); 7 | 8 | console.log(model); 9 | } 10 | 11 | main(); 12 | -------------------------------------------------------------------------------- /samples/hunyuan/chat.ts: -------------------------------------------------------------------------------- 1 | import { HunYuanAI } from '../../src'; 2 | 3 | const ai = new HunYuanAI(); 4 | 5 | async function main() { 6 | const chatCompletion = await ai.chat.completions.create({ 7 | model: 'hunyuan', 8 | messages: [{ role: 'user', content: 'Say this is a test' }], 9 | }); 10 | 11 | console.log(chatCompletion.choices[0].message.content); 12 | } 13 | 14 | main(); 15 | -------------------------------------------------------------------------------- /samples/hunyuan/stream.ts: -------------------------------------------------------------------------------- 1 | import { HunYuanAI } from '../../src'; 2 | 3 | const qwenai = new HunYuanAI(); 4 | 5 | async function main() { 6 | const stream = await qwenai.chat.completions.create({ 7 | stream: true, 8 | model: 'hunyuan', 9 | messages: [{ role: 'user', content: 'Say this is a test' }], 10 | }); 11 | 12 | for await (const chunk of stream) { 13 | console.log(chunk.choices[0]?.delta?.content || ''); 14 | } 15 | } 16 | 17 | main(); 18 | -------------------------------------------------------------------------------- /samples/minimax/audio/speech.ts: -------------------------------------------------------------------------------- 1 | import { MinimaxAI } from '../../../src'; 2 | import { saveFile } from '../../shared'; 3 | 4 | const ai = new MinimaxAI(); 5 | 6 | async function main() { 7 | const response = await ai.audio.speech.create({ 8 | model: 'speech-01', 9 | input: '推荐一些美食', 10 | voice: 'male-qn-qingse', 11 | }); 12 | 13 | saveFile(response.body, 'minimax-audio-test.mp3'); 14 | } 15 | 16 | main(); 17 | -------------------------------------------------------------------------------- /samples/minimax/chat/create.ts: -------------------------------------------------------------------------------- 1 | import { MinimaxAI } from '../../../src'; 2 | 3 | const ai = new MinimaxAI(); 4 | 5 | async function main() { 6 | const chatCompletion = await ai.chat.completions.create({ 7 | model: 'abab5.5-chat', 8 | messages: [{ role: 'user', content: 'Say this is a test' }], 9 | }); 10 | 11 | console.log(chatCompletion.choices[0].message.content); 12 | } 13 | 14 | main(); 15 | -------------------------------------------------------------------------------- /samples/minimax/chat/stream.ts: -------------------------------------------------------------------------------- 1 | import { MinimaxAI } from '../../../src'; 2 | 3 | const ai = new MinimaxAI(); 4 | 5 | async function main() { 6 | const stream = await ai.chat.completions.create({ 7 | stream: true, 8 | model: 'abab5.5-chat', 9 | messages: [{ role: 'user', content: 'Say this is a test' }], 10 | }); 11 | 12 | for await (const chunk of stream) { 13 | console.log(chunk.choices[0]?.delta?.content || ''); 14 | } 15 | } 16 | 17 | main(); 18 | -------------------------------------------------------------------------------- /samples/minimax/embeddings.ts: -------------------------------------------------------------------------------- 1 | import { MinimaxAI } from '../../src'; 2 | 3 | const ai = new MinimaxAI(); 4 | 5 | async function main() { 6 | const embedding = await ai.embeddings.create({ 7 | model: 'embo-01', 8 | input: ['推荐一些美食', '给我讲个故事'], 9 | }); 10 | 11 | console.log(embedding); 12 | } 13 | 14 | main(); 15 | -------------------------------------------------------------------------------- /samples/qwen/chat/create.ts: -------------------------------------------------------------------------------- 1 | import { QWenAI } from '../../../src'; 2 | 3 | const qwenai = new QWenAI(); 4 | 5 | async function main() { 6 | try { 7 | const chatCompletion = await qwenai.chat.completions.create({ 8 | stream: true, 9 | model: 'llama2-7b-chat-v2', 10 | messages: [{ role: 'user', content: 'Say this is a test' }], 11 | }); 12 | 13 | console.dir(chatCompletion, { 14 | depth: 5, 15 | }); 16 | } catch (error) { 17 | console.error(error); 18 | } 19 | } 20 | 21 | main(); 22 | -------------------------------------------------------------------------------- /samples/qwen/chat/stream.ts: -------------------------------------------------------------------------------- 1 | import { QWenAI } from '../../../src'; 2 | 3 | const qwenai = new QWenAI(); 4 | 5 | async function main() { 6 | const stream = await qwenai.chat.completions.create({ 7 | model: 'llama2-7b-chat-v2', 8 | messages: [{ role: 'user', content: 'Say this is a test' }], 9 | stream: true, 10 | }); 11 | 12 | for await (const chunk of stream) { 13 | console.dir(chunk, { 14 | depth: 5, 15 | }); 16 | } 17 | } 18 | 19 | main(); 20 | -------------------------------------------------------------------------------- /samples/qwen/chat/tool_calls.ts: -------------------------------------------------------------------------------- 1 | import { QWenAI } from '../../../src'; 2 | 3 | const qwenai = new QWenAI(); 4 | 5 | // 模拟天气查询工具 6 | // 北京天气如何? => 北京今天是晴天 7 | function get_current_weather(location: string) { 8 | return `${location}今天是晴天`; 9 | } 10 | 11 | // 查询当前时间的工具 12 | // 现在几点了? => 当前时间:2024-04-15 17:15:18 13 | function get_current_time() { 14 | // 格式化当前日期和时间 15 | const formatted_time = new Date().toLocaleString(); 16 | // 返回格式化后的当前时间 17 | return `当前时间:${formatted_time}`; 18 | } 19 | 20 | async function main() { 21 | const chatCompletion = await qwenai.chat.completions.create({ 22 | // 当前支持的模型包括 23 | // - qwen-turbo 24 | // - qwen-plus 25 | // - qwen-max 26 | // - qwen-max-longcontext 27 | model: 'qwen-turbo', 28 | messages: [{ role: 'user', content: '北京天气如何?' }], 29 | tools: [ 30 | // 工具1 获取当前时刻的时间 31 | { 32 | type: 'function', 33 | function: { 34 | name: 'get_current_time', 35 | description: '当你想知道现在的时间时非常有用。', 36 | parameters: {}, // 因为获取当前时间无需输入参数,因此parameters为空字典 37 | }, 38 | }, 39 | // 工具2 获取指定城市的天气 40 | { 41 | type: 'function', 42 | function: { 43 | name: 'get_current_weather', 44 | description: '当你想查询指定城市的天气时非常有用。', 45 | parameters: { 46 | // 查询天气时需要提供位置,因此参数设置为location 47 | type: 'object', 48 | properties: { 49 | location: { 50 | type: 'string', 51 | description: '城市或县区,比如北京市、杭州市、余杭区等。', 52 | }, 53 | }, 54 | }, 55 | // @ts-expect-error 56 | required: ['location'], 57 | }, 58 | }, 59 | ], 60 | }); 61 | 62 | const choice = chatCompletion.choices[0]; 63 | 64 | if (choice.finish_reason === 'tool_calls') { 65 | const tool = choice.message.tool_calls![0]; 66 | 67 | if (tool.function.name === 'get_current_weather') { 68 | const args = JSON.parse(tool.function.arguments); 69 | const location = args['properties']['location']; 70 | console.log('get_current_weather', get_current_weather(location)); 71 | } else if (tool.function.name === 'get_current_time') { 72 | console.log('get_current_time', get_current_time()); 73 | } else { 74 | console.warn('unknown tool', tool); 75 | } 76 | } else { 77 | console.log('Assistant: ', choice.message.content); 78 | } 79 | } 80 | 81 | main(); 82 | -------------------------------------------------------------------------------- /samples/qwen/chat_vl/create.ts: -------------------------------------------------------------------------------- 1 | import { QWenAI } from '../../../src'; 2 | 3 | const qwenai = new QWenAI(); 4 | 5 | async function main() { 6 | const chatCompletion = await qwenai.chat.completions.create({ 7 | model: 'qwen-vl-plus', 8 | messages: [ 9 | { 10 | role: 'user', 11 | content: '你好', 12 | }, 13 | ], 14 | }); 15 | 16 | console.log(chatCompletion.choices[0].message.content); 17 | } 18 | 19 | main(); 20 | -------------------------------------------------------------------------------- /samples/qwen/chat_vl/image.ts: -------------------------------------------------------------------------------- 1 | import { QWenAI } from '../../../src'; 2 | 3 | const client = new QWenAI(); 4 | 5 | async function main() { 6 | const chatCompletion = await client.chat.completions.create({ 7 | model: 'qwen-vl-v1', 8 | messages: [ 9 | { 10 | role: 'user', 11 | content: [ 12 | { 13 | type: 'image_url', 14 | image_url: { url: 'https://www.baidu.com/img/PCfb_5bf082d29588c07f842ccde3f97243ea.png' }, 15 | }, 16 | { type: 'text', text: '这是什么图?' }, 17 | ], 18 | }, 19 | ], 20 | }); 21 | 22 | console.log(chatCompletion.choices[0].message.content); 23 | } 24 | 25 | main(); 26 | -------------------------------------------------------------------------------- /samples/qwen/completions/create.ts: -------------------------------------------------------------------------------- 1 | import { QWenAI } from '../../../src'; 2 | 3 | const qwenai = new QWenAI(); 4 | 5 | async function main() { 6 | const completion = await qwenai.completions.create({ 7 | model: 'llama2-7b-chat-v2', 8 | prompt: 'Say this is a test', 9 | }); 10 | 11 | console.log(completion); 12 | } 13 | 14 | main(); 15 | -------------------------------------------------------------------------------- /samples/qwen/completions/stream.ts: -------------------------------------------------------------------------------- 1 | import { QWenAI } from '../../../src'; 2 | 3 | const qwenai = new QWenAI(); 4 | 5 | async function main() { 6 | const completion = await qwenai.completions.create({ 7 | model: 'llama2-7b-chat-v2', 8 | prompt: 'Say this is a test', 9 | stream: true, 10 | }); 11 | 12 | for await (const chunk of completion) { 13 | console.log(chunk); 14 | } 15 | } 16 | 17 | main(); 18 | -------------------------------------------------------------------------------- /samples/qwen/embeddings.ts: -------------------------------------------------------------------------------- 1 | import { QWenAI } from '../../src'; 2 | 3 | const client = new QWenAI(); 4 | 5 | async function main() { 6 | const embedding = await client.embeddings.create({ 7 | model: 'text-embedding-v1', 8 | input: ['推荐一些美食', '给我讲个故事'], 9 | }); 10 | 11 | console.log(embedding.data); 12 | } 13 | 14 | main(); 15 | -------------------------------------------------------------------------------- /samples/qwen/images/generate.ts: -------------------------------------------------------------------------------- 1 | import { QWenAI } from '../../../src'; 2 | import { downloadImage } from '../../shared'; 3 | 4 | const ai = new QWenAI(); 5 | 6 | async function main() { 7 | const { data } = await ai.images.generate({ 8 | prompt: 'cat', 9 | }); 10 | 11 | for await (const image of data) { 12 | downloadImage(image.url!); 13 | } 14 | } 15 | 16 | main(); 17 | -------------------------------------------------------------------------------- /samples/shared.ts: -------------------------------------------------------------------------------- 1 | import { randomUUID } from 'node:crypto'; 2 | import { createReadStream, createWriteStream, mkdirSync } from 'node:fs'; 3 | import path from 'node:path'; 4 | import { Readable } from 'node:stream'; 5 | import { ReadableStream } from 'node:stream/web'; 6 | 7 | export const dataDir = path.join(__dirname, 'data'); 8 | 9 | export function getTestFile(filename: string) { 10 | return createReadStream(path.join(dataDir, filename)); 11 | } 12 | 13 | export const cacheDir = path.join(__dirname, '..', '.cache'); 14 | 15 | mkdirSync(cacheDir, { recursive: true }); 16 | 17 | export function saveFile(binary?: globalThis.ReadableStream | ReadableStream | null, name?: string) { 18 | if (!binary) return; 19 | 20 | const filename = path.resolve(cacheDir, name || `${randomUUID()}.png`); 21 | 22 | Readable.fromWeb(binary as ReadableStream).pipe(createWriteStream(filename)); 23 | } 24 | 25 | export async function downloadImage(url: string, name?: string) { 26 | const data = await fetch(url).then(response => response.body); 27 | saveFile(data, name); 28 | } 29 | -------------------------------------------------------------------------------- /samples/spark/chat.ts: -------------------------------------------------------------------------------- 1 | import { SparkAI } from '../../src'; 2 | 3 | const api = new SparkAI(); 4 | 5 | async function main() { 6 | const chatCompletion = await api.chat.completions.create({ 7 | model: 'spark-1.5', 8 | messages: [{ role: 'user', content: 'Say this is a test' }], 9 | }); 10 | 11 | console.log(chatCompletion.choices[0].message.content); 12 | } 13 | 14 | main(); 15 | -------------------------------------------------------------------------------- /samples/spark/images.ts: -------------------------------------------------------------------------------- 1 | import { SparkAI } from '../../src'; 2 | 3 | const spark = new SparkAI(); 4 | 5 | async function main() { 6 | const image = await spark.images.generate({ 7 | prompt: 'cat', 8 | }); 9 | 10 | console.log(image); 11 | } 12 | 13 | main(); 14 | -------------------------------------------------------------------------------- /samples/spark/stream.ts: -------------------------------------------------------------------------------- 1 | import { SparkAI } from '../../src'; 2 | 3 | const ai = new SparkAI(); 4 | 5 | async function main() { 6 | const stream = await ai.chat.completions.create({ 7 | stream: true, 8 | model: 'spark-1.5', 9 | messages: [{ role: 'user', content: 'Say this is a test' }], 10 | }); 11 | 12 | for await (const chunk of stream) { 13 | console.log(chunk.choices[0]?.delta?.content || ''); 14 | } 15 | } 16 | 17 | main(); 18 | -------------------------------------------------------------------------------- /samples/vyro/images/createVariation.ts: -------------------------------------------------------------------------------- 1 | import { VYroAI } from '../../../src'; 2 | import { getTestFile, saveFile } from '../../shared'; 3 | 4 | const ai = new VYroAI(); 5 | 6 | async function main() { 7 | const image = await ai.images.createVariation({ 8 | prompt: 'add color', 9 | image: getTestFile('cat.png'), 10 | }); 11 | 12 | saveFile(image.data[0].binary!); 13 | } 14 | 15 | main(); 16 | -------------------------------------------------------------------------------- /samples/vyro/images/edit.ts: -------------------------------------------------------------------------------- 1 | import { VYroAI } from '../../../src'; 2 | import { getTestFile, saveFile } from '../../shared'; 3 | 4 | const ai = new VYroAI(); 5 | 6 | async function main() { 7 | const image = await ai.images.edit({ 8 | prompt: 'Add a Santa hat to this cat', 9 | image: getTestFile('cat.png'), 10 | }); 11 | 12 | saveFile(image.data[0].binary!); 13 | } 14 | 15 | main(); 16 | -------------------------------------------------------------------------------- /samples/vyro/images/generate.ts: -------------------------------------------------------------------------------- 1 | import { VYroAI } from '../../../src'; 2 | import { saveFile } from '../../shared'; 3 | 4 | const ai = new VYroAI(); 5 | 6 | async function main() { 7 | const image = await ai.images.generate({ 8 | prompt: 'cat', 9 | }); 10 | 11 | saveFile(image.data[0].binary!); 12 | } 13 | 14 | main(); 15 | -------------------------------------------------------------------------------- /samples/vyro/images/restoration.ts: -------------------------------------------------------------------------------- 1 | import { VYroAI } from '../../../src'; 2 | import { getTestFile, saveFile } from '../../shared'; 3 | 4 | const ai = new VYroAI(); 5 | 6 | async function main() { 7 | const image = await ai.images.restoration({ 8 | prompt: 'add color', 9 | image: getTestFile('cat.png'), 10 | mask: getTestFile('cat1.png'), 11 | }); 12 | 13 | saveFile(image.data[0].binary!); 14 | } 15 | 16 | main(); 17 | -------------------------------------------------------------------------------- /samples/vyro/images/upscale.ts: -------------------------------------------------------------------------------- 1 | import { VYroAI } from '../../../src'; 2 | import { getTestFile, saveFile } from '../../shared'; 3 | 4 | const ai = new VYroAI(); 5 | 6 | async function main() { 7 | const image = await ai.images.upscale({ 8 | image: getTestFile('cat.png'), 9 | }); 10 | 11 | saveFile(image.data[0].binary!); 12 | } 13 | 14 | main(); 15 | -------------------------------------------------------------------------------- /src/ernie/index.ts: -------------------------------------------------------------------------------- 1 | import type { Agent } from 'node:http'; 2 | 3 | import { APIClient, type DefaultQuery, type Fetch, type FinalRequestOptions, type Headers } from 'openai/core'; 4 | 5 | import * as API from './resources'; 6 | 7 | export interface ErnieAIOptions { 8 | baseURL?: string; 9 | apiKey?: string; 10 | timeout?: number | undefined; 11 | httpAgent?: Agent; 12 | fetch?: Fetch | undefined; 13 | 14 | /** 15 | * Default headers to include with every request to the API. 16 | * 17 | * These can be removed in individual requests by explicitly setting the 18 | * header to `undefined` or `null` in request options. 19 | */ 20 | defaultHeaders?: Headers; 21 | 22 | /** 23 | * Default query parameters to include with every request to the API. 24 | * 25 | * These can be removed in individual requests by explicitly setting the 26 | * param to `undefined` in request options. 27 | */ 28 | defaultQuery?: DefaultQuery; 29 | } 30 | 31 | export class ErnieAI extends APIClient { 32 | protected apiKey: string; 33 | 34 | private _options: ErnieAIOptions; 35 | 36 | constructor(options: ErnieAIOptions = {}) { 37 | const { 38 | apiKey = process.env.EB_API_KEY || '', 39 | baseURL = 'https://aistudio.baidu.com/llm/lmapi/v1', 40 | timeout = 30000, 41 | fetch = globalThis.fetch, 42 | httpAgent = undefined, 43 | ...rest 44 | } = options; 45 | 46 | super({ 47 | baseURL, 48 | timeout, 49 | fetch, 50 | httpAgent, 51 | ...rest, 52 | }); 53 | 54 | this._options = options; 55 | 56 | this.apiKey = apiKey; 57 | } 58 | 59 | chat = new API.Chat(this); 60 | 61 | embeddings = new API.Embeddings(this); 62 | 63 | protected override authHeaders() { 64 | return { 65 | Authorization: `token ${this.apiKey}`, 66 | }; 67 | } 68 | 69 | protected override defaultHeaders(opts: FinalRequestOptions): Headers { 70 | return { 71 | ...super.defaultHeaders(opts), 72 | ...this._options.defaultHeaders, 73 | }; 74 | } 75 | 76 | protected override defaultQuery(): DefaultQuery | undefined { 77 | return this._options.defaultQuery; 78 | } 79 | } 80 | 81 | export namespace ErnieAI { 82 | export type Chat = API.Chat; 83 | export type ChatModel = API.ChatModel; 84 | export type ChatCompletionCreateParams = API.ChatCompletionCreateParams; 85 | export type ChatCompletionCreateParamsNonStreaming = API.ChatCompletionCreateParamsNonStreaming; 86 | export type ChatCompletionCreateParamsStreaming = API.ChatCompletionCreateParamsStreaming; 87 | 88 | export type EmbeddingCreateParams = API.EmbeddingCreateParams; 89 | } 90 | 91 | export default ErnieAI; 92 | -------------------------------------------------------------------------------- /src/ernie/resources/chat/chat.ts: -------------------------------------------------------------------------------- 1 | import { APIResource } from '../../../resource'; 2 | import { Completions } from './completions'; 3 | 4 | export class Chat extends APIResource { 5 | completions = new Completions(this._client); 6 | } 7 | -------------------------------------------------------------------------------- /src/ernie/resources/chat/completions.ts: -------------------------------------------------------------------------------- 1 | import OpenAI, { APIError, OpenAIError } from 'openai'; 2 | import { Stream } from 'openai/streaming'; 3 | 4 | import { APIResource } from '../../../resource'; 5 | import { ensureArray } from '../../../util'; 6 | 7 | export class Completions extends APIResource { 8 | protected endpoints: Record = { 9 | 'ernie-bot': '/chat/completions', 10 | 'ernie-bot-turbo': '/chat/eb-instant', 11 | 'ernie-bot-4': '/chat/completions_pro', 12 | 'ernie-bot-8k': '/chat/ernie_bot_8k', 13 | }; 14 | 15 | /** 16 | * Creates a model response for the given chat conversation. 17 | * 18 | * 文心一言 由于分发在不同的平台,所以有不同的文档 19 | * 百度云的响应和 OpenAI 的比较类似,但授权没有 AI Studio 方便 20 | * 之前 AI Studio 的文档是有文档的,但现在不知道去哪了 21 | * 参考: 22 | * - https://cloud.baidu.com/doc/WENXINWORKSHOP/s/jlil56u11 23 | * - https://github.com/PaddlePaddle/ERNIE-Bot-SDK/blob/develop/erniebot/backends/aistudio.py 24 | */ 25 | create(body: ChatCompletionCreateParamsNonStreaming, options?: OpenAI.RequestOptions): Promise; 26 | create( 27 | body: ChatCompletionCreateParamsStreaming, 28 | options?: OpenAI.RequestOptions, 29 | ): Promise>; 30 | 31 | async create(params: ChatCompletionCreateParams, options?: OpenAI.RequestOptions) { 32 | const { model = 'ernie-bot', ...body } = Completions.buildCreateParams(params); 33 | 34 | const endpoint = this.endpoints[model]; 35 | 36 | if (!endpoint) { 37 | throw new OpenAIError(`Invalid model: ${model}`); 38 | } 39 | 40 | const stream = body.stream; 41 | 42 | const headers = { 43 | ...options?.headers, 44 | // Note: 如果是 stream 的话,需要设置 Accept 为 text/event-stream 45 | Accept: stream ? 'text/event-stream' : 'application/json', 46 | }; 47 | 48 | const response: Response = await this._client.post(endpoint, { 49 | ...options, 50 | body, 51 | headers, 52 | // 文心一言的响应内容被包裹了一层,需要解构并转换为 OpenAI 的格式 53 | // 设置 __binaryResponse 为 true, 是为了让 client 返回原始的 response 54 | stream: false, 55 | __binaryResponse: true, 56 | }); 57 | 58 | if (stream) { 59 | const controller = new AbortController(); 60 | 61 | options?.signal?.addEventListener('abort', () => { 62 | controller.abort(); 63 | }); 64 | 65 | return Completions.fromOpenAIStream(model, Stream.fromSSEResponse(response, controller), controller); 66 | } 67 | 68 | return Completions.fromResponse(model, await response.json()); 69 | } 70 | 71 | static buildCreateParams(params: ChatCompletionCreateParams): ChatCompletions.ChatCompletionCreateParams { 72 | const { messages = [], presence_penalty, user, stop, ...rest } = params; 73 | 74 | const head = messages[0]; 75 | 76 | // 文心一言的 system 是独立字段 77 | //(1)长度限制1024个字符 78 | //(2)如果使用functions参数,不支持设定人设system 79 | const system = head && head.role === 'system' ? head.content : undefined; 80 | 81 | // 移除 system 角色的消息 82 | if (system) { 83 | messages.splice(0, 1); 84 | } 85 | 86 | const data: ChatCompletions.ChatCompletionCreateParams = { 87 | ...rest, 88 | messages, 89 | }; 90 | 91 | if (system) { 92 | data.system = system; 93 | } 94 | 95 | if (user) { 96 | data.user_id = user; 97 | } 98 | 99 | if (presence_penalty) { 100 | data.penalty_score = presence_penalty; 101 | } 102 | 103 | if (stop) { 104 | data.stop = ensureArray(stop); 105 | } 106 | 107 | return data; 108 | } 109 | 110 | static fromResponse(model: string, data: ChatCompletions.APIResponse): OpenAI.ChatCompletion { 111 | Completions.assert(data); 112 | 113 | const result = data.result; 114 | 115 | const choice: OpenAI.ChatCompletion.Choice = { 116 | index: 0, 117 | message: { 118 | role: 'assistant', 119 | content: result.result, 120 | }, 121 | logprobs: null, 122 | finish_reason: 'stop', 123 | }; 124 | 125 | // TODO 需要确认 is_truncated 是否和 is_end 互斥 126 | // TODO 需要确认 functions 是否响应式不一样 127 | if (result.is_end) { 128 | choice.finish_reason = 'stop'; 129 | } else if (result.is_truncated) { 130 | choice.finish_reason = 'length'; 131 | } else if (result.need_clear_history) { 132 | choice.finish_reason = 'content_filter'; 133 | } 134 | 135 | return { 136 | id: result.id, 137 | model: model, 138 | choices: [choice], 139 | created: parseInt(result.created, 10), 140 | object: 'chat.completion', 141 | usage: result.usage, 142 | }; 143 | } 144 | 145 | static fromOpenAIStream( 146 | model: string, 147 | stream: Stream, 148 | controller: AbortController, 149 | ): Stream { 150 | async function* iterator(): AsyncIterator { 151 | for await (const chunk of stream) { 152 | Completions.assert(chunk); 153 | 154 | // TODO 某些情况下,文心一言的 result 只有 id,需要排查情况 155 | const data = chunk.result; 156 | 157 | const choice: OpenAI.ChatCompletionChunk.Choice = { 158 | index: 0, 159 | delta: { 160 | role: 'assistant', 161 | content: data.result || '', 162 | }, 163 | finish_reason: null, 164 | }; 165 | 166 | // TODO 需要确认 is_truncated 是否和 is_end 互斥 167 | // TODO 需要确认 functions 是否响应式不一样 168 | if (data.is_end) { 169 | choice.finish_reason = 'stop'; 170 | } else if (data.is_truncated) { 171 | choice.finish_reason = 'length'; 172 | } else if (data.need_clear_history) { 173 | choice.finish_reason = 'content_filter'; 174 | } 175 | 176 | yield { 177 | id: data.id, 178 | model, 179 | choices: [choice], 180 | object: 'chat.completion.chunk', 181 | created: parseInt(data.created, 10), 182 | // openai-node 上 已经有讨论添加 usage 的问题 183 | // 文心一言是有提供的,这里主要是为了向前兼容 184 | usage: data.usage, 185 | }; 186 | } 187 | } 188 | 189 | return new Stream(iterator, controller); 190 | } 191 | 192 | /** 193 | * 构建错误 194 | * 195 | * @param code - 196 | * @param message - 197 | * @returns 错误 198 | */ 199 | static makeAPIError(code: number, message: string) { 200 | const error = { code, message }; 201 | 202 | switch (code) { 203 | case 2: 204 | return APIError.generate(500, error, message, {}); 205 | case 6: // permission error 206 | case 111: // token expired 207 | return APIError.generate(403, error, message, {}); 208 | case 17: 209 | case 18: 210 | case 19: 211 | case 40407: 212 | return APIError.generate(429, error, message, {}); 213 | case 110: // invalid token 214 | case 40401: // invalid token 215 | return APIError.generate(401, error, message, {}); 216 | case 336003: // invalid parameter 217 | return APIError.generate(400, error, message, {}); 218 | case 336100: // try again 219 | return APIError.generate(500, error, message, {}); 220 | default: 221 | return APIError.generate(undefined, error, message, {}); 222 | } 223 | } 224 | 225 | /** 226 | * 如果 code 不为 0,抛出 APIError 227 | * 228 | * @param code - 229 | * @param message - 230 | */ 231 | static assert(resp: ChatCompletions.APIResponse) { 232 | if (resp.errorCode === 0) return; 233 | 234 | throw Completions.makeAPIError(resp.errorCode, resp.errorMsg); 235 | } 236 | } 237 | 238 | export interface ChatCompletionCreateParamsNonStreaming 239 | extends Pick< 240 | OpenAI.ChatCompletionCreateParamsNonStreaming, 241 | 'messages' | 'functions' | 'temperature' | 'top_p' | 'presence_penalty' | 'stream' | 'stop' | 'user' 242 | > { 243 | model: ChatModel; 244 | disable_search?: boolean | null; 245 | enable_citation?: boolean | null; 246 | } 247 | 248 | export interface ChatCompletionCreateParamsStreaming 249 | extends Pick< 250 | OpenAI.ChatCompletionCreateParamsStreaming, 251 | 'messages' | 'functions' | 'temperature' | 'top_p' | 'presence_penalty' | 'stream' | 'stop' | 'user' 252 | > { 253 | model: ChatModel; 254 | disable_search?: boolean | null; 255 | enable_citation?: boolean | null; 256 | } 257 | 258 | export type ChatCompletionCreateParams = ChatCompletionCreateParamsNonStreaming | ChatCompletionCreateParamsStreaming; 259 | 260 | export type ChatModel = 'ernie-bot' | 'ernie-bot-turbo' | 'ernie-bot-4' | 'ernie-bot-8k'; 261 | 262 | export namespace ChatCompletions { 263 | export interface ChatCompletionCreateParams { 264 | /** 265 | * 模型名称 266 | */ 267 | model: ChatModel; 268 | 269 | /** 270 | * 是否强制关闭实时搜索功能,默认 false,表示不关闭 271 | * 272 | * @defaultValue false 273 | */ 274 | disable_search?: boolean | null; 275 | 276 | /** 277 | * 是否开启上角标返回,说明: 278 | * (1)开启后,有概率触发搜索溯源信息search_info,search_info内容见响应参数介绍 279 | * (2)默认false,不开启 280 | * 281 | * @defaultValue false 282 | */ 283 | enable_citation?: boolean | null; 284 | 285 | /** 286 | * 模型人设,主要用于人设设定,例如,你是xxx公司制作的AI助手,说明: 287 | * (1)长度限制1024个字符 288 | * (2)如果使用 functions 参数,不支持设定人设 system 289 | * 290 | * @remarks OpenAI 是通过 messages 的 role 来区分的 291 | */ 292 | system?: string | null; 293 | 294 | /** 295 | * 聊天上下文信息 296 | * 297 | * @remarks 不支持 system 角色 298 | */ 299 | messages: OpenAI.ChatCompletionCreateParams['messages']; 300 | 301 | /** 302 | * 一个可触发函数的描述列表 303 | */ 304 | functions?: OpenAI.ChatCompletionCreateParams['functions']; 305 | 306 | /** 307 | * 内容随机性 308 | * 309 | * 说明: 310 | * (1)较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定 311 | * (2)默认0.8,范围 (0, 1.0],不能为0 312 | * (3)建议该参数和 top_p 只设置1个 313 | * (4)建议 top_p 和 temperature 不要同时更改 314 | */ 315 | temperature?: number | null; 316 | 317 | /** 318 | * 生成文本的多样性 319 | * 320 | * 说明: 321 | * (1)影响输出文本的多样性,取值越大,生成文本的多样性越强 322 | * (2)默认0.8,取值范围 [0, 1.0] 323 | * (3)建议该参数和 temperature 只设置1个 324 | * (4)建议 top_p 和 temperature 不要同时更改 325 | */ 326 | top_p?: number | null; 327 | 328 | /** 329 | * 330 | * 通过对已生成的token增加惩罚,减少重复生成的现象。说明: 331 | * (1)值越大表示惩罚越大 332 | * (2)默认1.0,取值范围:[1.0, 2.0] 333 | * 334 | * @remarks 在 OpenAI 中,参数名为 presence_penalty 335 | */ 336 | penalty_score?: number | null; 337 | 338 | /** 339 | * 是否以流式接口的形式返回数据,默认 false 340 | */ 341 | stream?: boolean | null; 342 | 343 | /** 344 | * 生成停止标识,当模型生成结果以stop中某个元素结尾时,停止文本生成。说明: 345 | * (1)每个元素长度不超过20字符 346 | * (2)最多4个元素 347 | */ 348 | stop?: string | string[] | undefined; 349 | 350 | /** 351 | * 表示最终用户的唯一标识符,可以监视和检测滥用行为,防止接口恶意调用 352 | * 353 | * @remarks OpenAI 中是通过 user 区分 354 | */ 355 | user_id?: string | undefined; 356 | } 357 | 358 | export type ChatCompletion = { 359 | id: string; 360 | result: string; 361 | created: string; 362 | is_end: boolean; 363 | is_truncated: boolean; 364 | need_clear_history: boolean; 365 | usage: OpenAI.CompletionUsage; 366 | }; 367 | 368 | export type APIResponse = { 369 | errorCode: number; 370 | errorMsg: string; 371 | result: ChatCompletion; 372 | }; 373 | } 374 | -------------------------------------------------------------------------------- /src/ernie/resources/chat/index.ts: -------------------------------------------------------------------------------- 1 | export { Chat } from './chat'; 2 | export { 3 | type ChatModel, 4 | type ChatCompletionCreateParams, 5 | type ChatCompletionCreateParamsNonStreaming, 6 | type ChatCompletionCreateParamsStreaming, 7 | Completions, 8 | } from './completions'; 9 | -------------------------------------------------------------------------------- /src/ernie/resources/embeddings.ts: -------------------------------------------------------------------------------- 1 | import OpenAI, { APIError, OpenAIError } from 'openai'; 2 | import { type RequestOptions } from 'openai/core'; 3 | 4 | import { APIResource } from '../../resource'; 5 | 6 | export class Embeddings extends APIResource { 7 | protected endpoints: Record = { 8 | 'ernie-text-embedding': '/embeddings/embedding-v1', 9 | }; 10 | 11 | /** 12 | * Creates an embedding vector representing the input text. 13 | * 14 | * See https://cloud.baidu.com/doc/WENXINWORKSHOP/s/alj562vvu 15 | */ 16 | async create(params: EmbeddingCreateParams, options?: RequestOptions): Promise { 17 | const { model, user, input } = params; 18 | const endpoint = this.endpoints[model]; 19 | 20 | if (!endpoint) { 21 | throw new OpenAIError(`Invalid model: ${model}`); 22 | } 23 | 24 | const body = { 25 | input, 26 | user_id: user, 27 | }; 28 | 29 | const response: Response = await this._client.post(endpoint, { 30 | body, 31 | ...options, 32 | __binaryResponse: true, 33 | }); 34 | 35 | return Embeddings.fromResponse(model, await response.json()); 36 | } 37 | 38 | static fromResponse(model: EmbeddingModel, data: CreateEmbeddingResponse): OpenAI.CreateEmbeddingResponse { 39 | Embeddings.assert(data); 40 | 41 | const { result } = data; 42 | 43 | return { 44 | data: result.data, 45 | model: model, 46 | object: 'list', 47 | usage: result.usage, 48 | }; 49 | } 50 | 51 | /** 52 | * 如果 code 不为 0,抛出 APIError 53 | * 54 | * @param code - 55 | * @param message - 56 | */ 57 | static assert(resp: CreateEmbeddingResponse) { 58 | if (resp.errorCode === 0) return; 59 | 60 | const error = { code: resp.errorCode, message: resp.errorMsg }; 61 | 62 | throw APIError.generate(undefined, error, undefined, undefined); 63 | } 64 | } 65 | 66 | export type EmbeddingModel = 'ernie-text-embedding'; 67 | 68 | export interface EmbeddingCreateParams { 69 | /** 70 | * 输入文本 71 | */ 72 | input: string | Array | Array | Array>; 73 | 74 | /** 75 | * 模型 76 | */ 77 | model: EmbeddingModel; 78 | 79 | /** 80 | * 用户 ID 81 | */ 82 | user?: string; 83 | } 84 | 85 | type CreateEmbeddingResponse = { 86 | errorCode: number; 87 | errorMsg: string; 88 | result: OpenAI.CreateEmbeddingResponse; 89 | }; 90 | -------------------------------------------------------------------------------- /src/ernie/resources/index.ts: -------------------------------------------------------------------------------- 1 | export * from './chat/index'; 2 | 3 | export { Embeddings, type EmbeddingCreateParams } from './embeddings'; 4 | -------------------------------------------------------------------------------- /src/ernie/util.ts: -------------------------------------------------------------------------------- 1 | import { APIError } from 'openai'; 2 | 3 | /** 4 | * 构建错误 5 | * 6 | * @param code - 7 | * @param message - 8 | * @returns 错误 9 | */ 10 | export function makeAPIError(code: number, message: string) { 11 | const error = { code, message }; 12 | 13 | switch (code) { 14 | case 2: 15 | return APIError.generate(500, error, message, {}); 16 | case 6: // permission error 17 | case 111: // token expired 18 | return APIError.generate(403, error, message, {}); 19 | case 17: 20 | case 18: 21 | case 19: 22 | case 40407: 23 | return APIError.generate(429, error, message, {}); 24 | case 110: // invalid token 25 | case 40401: // invalid token 26 | return APIError.generate(401, error, message, {}); 27 | case 336003: // invalid parameter 28 | return APIError.generate(400, error, message, {}); 29 | case 336100: // try again 30 | return APIError.generate(500, error, message, {}); 31 | default: 32 | return APIError.generate(undefined, error, message, {}); 33 | } 34 | } 35 | 36 | /** 37 | * 如果 code 不为 0,抛出 APIError 38 | * 39 | * @param code - 40 | * @param message - 41 | */ 42 | export function assertNonZero(code: number, message: string) { 43 | if (code === 0) return; 44 | 45 | throw makeAPIError(code, message); 46 | } 47 | -------------------------------------------------------------------------------- /src/gemini/index.ts: -------------------------------------------------------------------------------- 1 | import type { Agent } from 'node:http'; 2 | 3 | import { APIClient, type DefaultQuery, type Fetch, type FinalRequestOptions, type Headers } from 'openai/core'; 4 | 5 | import * as API from './resources'; 6 | 7 | const BASE_URL = 'https://generativelanguage.googleapis.com/v1'; 8 | 9 | export interface GeminiAIOptions { 10 | baseURL?: string; 11 | apiKey?: string; 12 | timeout?: number | undefined; 13 | httpAgent?: Agent; 14 | fetch?: Fetch | undefined; 15 | defaultHeaders?: Headers; 16 | defaultQuery?: DefaultQuery; 17 | } 18 | 19 | export class GeminiAI extends APIClient { 20 | apiKey: string; 21 | 22 | private _options: GeminiAIOptions; 23 | 24 | constructor(options: GeminiAIOptions = {}) { 25 | const { 26 | apiKey = process.env.GEMINI_API_KEY || '', 27 | baseURL = process.env.GEMINI_BASE_URL || BASE_URL, 28 | timeout = 30000, 29 | fetch = globalThis.fetch, 30 | httpAgent = undefined, 31 | ...rest 32 | } = options; 33 | 34 | super({ 35 | baseURL, 36 | timeout, 37 | fetch, 38 | httpAgent, 39 | ...rest, 40 | }); 41 | 42 | this._options = options; 43 | 44 | this.apiKey = apiKey; 45 | } 46 | 47 | chat = new API.Chat(this); 48 | 49 | models = new API.Models(this); 50 | 51 | protected override defaultHeaders(opts: FinalRequestOptions): Headers { 52 | return { 53 | ...super.defaultHeaders(opts), 54 | ...this._options.defaultHeaders, 55 | }; 56 | } 57 | 58 | protected override defaultQuery(): DefaultQuery | undefined { 59 | return { 60 | ...this._options.defaultQuery, 61 | key: this.apiKey, 62 | }; 63 | } 64 | } 65 | 66 | export namespace GeminiAI { 67 | export type ChatModel = API.ChatModel; 68 | export type ChatCompletionCreateParams = API.ChatCompletionCreateParams; 69 | export type ChatCompletionCreateParamsStreaming = API.ChatCompletionCreateParamsStreaming; 70 | export type ChatCompletionCreateParamsNonStreaming = API.ChatCompletionCreateParamsNonStreaming; 71 | } 72 | 73 | export default GeminiAI; 74 | -------------------------------------------------------------------------------- /src/gemini/resource.ts: -------------------------------------------------------------------------------- 1 | import { GeminiAI } from './index'; 2 | 3 | export class APIResource { 4 | protected _client: GeminiAI; 5 | 6 | constructor(client: GeminiAI) { 7 | this._client = client; 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /src/gemini/resources/chat/chat.ts: -------------------------------------------------------------------------------- 1 | import { APIResource } from '../../resource'; 2 | import { Completions } from './completions'; 3 | 4 | export class Chat extends APIResource { 5 | completions = new Completions(this._client); 6 | } 7 | -------------------------------------------------------------------------------- /src/gemini/resources/chat/completions.ts: -------------------------------------------------------------------------------- 1 | import { randomUUID } from 'crypto'; 2 | import OpenAI from 'openai'; 3 | import { Stream } from 'openai/streaming'; 4 | 5 | import { ensureArray } from '../../../util'; 6 | import { APIResource } from '../../resource'; 7 | 8 | export class Completions extends APIResource { 9 | /** 10 | * Creates a model response for the given chat conversation. 11 | */ 12 | create(body: ChatCompletionCreateParamsNonStreaming, options?: OpenAI.RequestOptions): Promise; 13 | create( 14 | body: ChatCompletionCreateParamsStreaming, 15 | options?: OpenAI.RequestOptions, 16 | ): Promise>; 17 | 18 | async create( 19 | params: ChatCompletionCreateParams, 20 | options?: OpenAI.RequestOptions, 21 | ): Promise> { 22 | const { stream, model } = params; 23 | const body = this.buildCreateParams(params); 24 | const path = `/models/${model}:generateContent`; 25 | 26 | const response: Response = await this._client.post(path, { 27 | ...options, 28 | query: stream ? { alt: 'sse' } : {}, 29 | body: body as unknown as Record, 30 | stream: false, 31 | __binaryResponse: true, 32 | }); 33 | 34 | if (stream) { 35 | const controller = new AbortController(); 36 | 37 | options?.signal?.addEventListener('abort', () => { 38 | controller.abort(); 39 | }); 40 | 41 | return this.afterSSEResponse(model, response, controller); 42 | } 43 | 44 | return this.afterResponse(model, response); 45 | } 46 | 47 | protected buildCreateParams(params: ChatCompletionCreateParams) { 48 | const { messages = [], max_tokens, top_p, top_k, stop, temperature } = params; 49 | 50 | function formatContentParts(content: string | OpenAI.ChatCompletionContentPart[]) { 51 | const parts: GeminiChat.Part[] = []; 52 | 53 | if (typeof content === 'string') { 54 | parts.push({ text: content }); 55 | return parts; 56 | } 57 | 58 | for (const part of content) { 59 | if (part.type === 'text') { 60 | parts.push({ text: part.text }); 61 | } else { 62 | // TODO: Handle images 63 | // parts.push({ 64 | // inline_data: { 65 | // "mime_type": "image/jpeg", 66 | // "data": "'$(base64 -w0 image.jpg)'" 67 | // } 68 | // }); 69 | } 70 | } 71 | 72 | return parts; 73 | } 74 | 75 | function formatRole(role: string): 'user' | 'model' { 76 | return role === 'user' ? 'user' : 'model'; 77 | } 78 | 79 | const generationConfig: GeminiChat.GenerationConfig = {}; 80 | 81 | const data: GeminiChat.GenerateContentRequest = { 82 | contents: messages.map(item => { 83 | return { 84 | role: formatRole(item.role), 85 | parts: formatContentParts(item.content!), 86 | }; 87 | }), 88 | generationConfig, 89 | }; 90 | 91 | if (temperature != null) { 92 | generationConfig.temperature = temperature; 93 | } 94 | 95 | if (top_k != null) { 96 | generationConfig.topK = top_k; 97 | } 98 | 99 | if (top_p != null) { 100 | generationConfig.topP = top_p; 101 | } 102 | 103 | if (stop != null) { 104 | generationConfig.stopSequences = ensureArray(stop); 105 | } 106 | 107 | if (max_tokens != null) { 108 | generationConfig.maxOutputTokens = max_tokens; 109 | } 110 | 111 | return data; 112 | } 113 | 114 | protected async afterResponse(model: string, response: Response): Promise { 115 | const data: GeminiChat.GenerateContentResponse = await response.json(); 116 | const choices: OpenAI.ChatCompletion.Choice[] = data.candidates!.map(item => { 117 | const [part] = item.content.parts; 118 | 119 | const choice: OpenAI.ChatCompletion.Choice = { 120 | index: item.index, 121 | message: { 122 | role: 'assistant', 123 | content: part.text!, 124 | }, 125 | logprobs: null, 126 | finish_reason: 'stop', 127 | }; 128 | 129 | switch (item.finishReason) { 130 | case 'MAX_TOKENS': 131 | choice.finish_reason = 'length'; 132 | break; 133 | case 'SAFETY': 134 | case 'RECITATION': 135 | choice.finish_reason = 'content_filter'; 136 | break; 137 | default: 138 | choice.finish_reason = 'stop'; 139 | } 140 | 141 | return choice; 142 | }); 143 | 144 | return { 145 | id: randomUUID(), 146 | model: model, 147 | choices, 148 | object: 'chat.completion', 149 | created: Date.now() / 10, 150 | // TODO 需要支持 usage 151 | usage: { 152 | completion_tokens: 0, 153 | prompt_tokens: 0, 154 | total_tokens: 0, 155 | }, 156 | }; 157 | } 158 | 159 | protected afterSSEResponse( 160 | model: string, 161 | response: Response, 162 | controller: AbortController, 163 | ): Stream { 164 | const stream = Stream.fromSSEResponse(response, controller); 165 | 166 | const toChoices = (data: GeminiChat.GenerateContentResponse) => { 167 | return data.candidates!.map(item => { 168 | const [part] = item.content.parts; 169 | 170 | const choice: OpenAI.ChatCompletionChunk.Choice = { 171 | index: item.index, 172 | delta: { 173 | role: 'assistant', 174 | content: part.text || '', 175 | }, 176 | finish_reason: null, 177 | }; 178 | 179 | switch (item.finishReason) { 180 | case 'MAX_TOKENS': 181 | choice.finish_reason = 'length'; 182 | break; 183 | case 'SAFETY': 184 | case 'RECITATION': 185 | choice.finish_reason = 'content_filter'; 186 | break; 187 | default: 188 | choice.finish_reason = 'stop'; 189 | } 190 | 191 | return choice; 192 | }); 193 | }; 194 | 195 | async function* iterator(): AsyncIterator { 196 | for await (const chunk of stream) { 197 | yield { 198 | id: randomUUID(), 199 | model, 200 | choices: toChoices(chunk), 201 | object: 'chat.completion.chunk', 202 | created: Date.now() / 10, 203 | }; 204 | } 205 | } 206 | 207 | return new Stream(iterator, controller); 208 | } 209 | } 210 | 211 | export type ChatCompletionCreateParamsNonStreaming = Chat.ChatCompletionCreateParamsNonStreaming; 212 | 213 | export type ChatCompletionCreateParamsStreaming = Chat.ChatCompletionCreateParamsStreaming; 214 | 215 | export type ChatCompletionCreateParams = Chat.ChatCompletionCreateParams; 216 | 217 | export type ChatModel = Chat.ChatModel; 218 | 219 | export namespace Chat { 220 | export type ChatModel = (string & NonNullable) | 'gemini-pro'; 221 | // 支持的有点问题 222 | // | 'gemini-pro-vision'; 223 | 224 | export interface ChatCompletionCreateParamsNonStreaming extends OpenAI.ChatCompletionCreateParamsNonStreaming { 225 | model: ChatModel; 226 | top_k?: number; 227 | } 228 | 229 | export interface ChatCompletionCreateParamsStreaming extends OpenAI.ChatCompletionCreateParamsStreaming { 230 | model: ChatModel; 231 | top_k?: number | null; 232 | } 233 | 234 | export type ChatCompletionCreateParams = ChatCompletionCreateParamsNonStreaming | ChatCompletionCreateParamsStreaming; 235 | } 236 | 237 | namespace GeminiChat { 238 | export interface GenerationConfig { 239 | candidateCount?: number; 240 | stopSequences?: string[]; 241 | maxOutputTokens?: number; 242 | temperature?: number; 243 | topP?: number; 244 | topK?: number; 245 | } 246 | 247 | export interface GenerateContentCandidate { 248 | index: number; 249 | content: Content; 250 | finishReason?: 'FINISH_REASON_UNSPECIFIED' | 'STOP' | 'MAX_TOKENS' | 'SAFETY' | 'RECITATION' | 'OTHER'; 251 | finishMessage?: string; 252 | citationMetadata?: CitationMetadata; 253 | } 254 | 255 | export interface GenerateContentResponse { 256 | candidates?: GenerateContentCandidate[]; 257 | // promptFeedback?: PromptFeedback; 258 | } 259 | 260 | export interface CitationMetadata { 261 | citationSources: CitationSource[]; 262 | } 263 | 264 | export interface CitationSource { 265 | startIndex?: number; 266 | endIndex?: number; 267 | uri?: string; 268 | license?: string; 269 | } 270 | 271 | export interface InputContent { 272 | parts: string | Array; 273 | role: string; 274 | } 275 | 276 | export interface Content extends InputContent { 277 | parts: Part[]; 278 | } 279 | 280 | export type Part = TextPart | InlineDataPart; 281 | 282 | export interface TextPart { 283 | text: string; 284 | inlineData?: never; 285 | } 286 | 287 | export interface InlineDataPart { 288 | text?: never; 289 | inlineData: GeminiContentBlob; 290 | } 291 | 292 | export interface GeminiContentBlob { 293 | mimeType: string; 294 | data: string; 295 | } 296 | 297 | export interface BaseParams { 298 | generationConfig?: GenerationConfig; 299 | } 300 | 301 | export interface GenerateContentRequest extends BaseParams { 302 | contents: Content[]; 303 | } 304 | } 305 | -------------------------------------------------------------------------------- /src/gemini/resources/chat/index.ts: -------------------------------------------------------------------------------- 1 | export { Chat } from './chat'; 2 | export { 3 | type ChatModel, 4 | type ChatCompletionCreateParams, 5 | type ChatCompletionCreateParamsNonStreaming, 6 | type ChatCompletionCreateParamsStreaming, 7 | Completions, 8 | } from './completions'; 9 | -------------------------------------------------------------------------------- /src/gemini/resources/index.ts: -------------------------------------------------------------------------------- 1 | export * from './chat/index'; 2 | 3 | export { Models, type Model, ModelsPage } from './models'; 4 | -------------------------------------------------------------------------------- /src/gemini/resources/models.ts: -------------------------------------------------------------------------------- 1 | import OpenAI from 'openai'; 2 | import { type FinalRequestOptions, type PagePromise, type RequestOptions } from 'openai/core'; 3 | import { Page } from 'openai/pagination'; 4 | 5 | import { type GeminiAI } from '../../index'; 6 | import { APIResource } from '../resource'; 7 | 8 | // TODO 输出原始对象 9 | export class Models extends APIResource { 10 | /** 11 | * Retrieves a model instance, providing basic information about the model such as 12 | * the owner and permissioning. 13 | */ 14 | async retrieve(model: string, options?: RequestOptions): Promise { 15 | const item: GeminiModel = await this._client.get(`/models/${model}`, options); 16 | 17 | return { 18 | id: item.name, 19 | created: 0, 20 | object: 'model', 21 | owned_by: 'google', 22 | }; 23 | } 24 | 25 | /** 26 | * Lists the currently available models, and provides basic information about each 27 | * one such as the owner and availability. 28 | */ 29 | list(options?: RequestOptions): PagePromise { 30 | return this._client.getAPIList('/models', ModelsPage, options); 31 | } 32 | } 33 | 34 | export class ModelsPage extends Page { 35 | constructor(client: GeminiAI, response: Response, body: GeminiPageResponse, options: FinalRequestOptions) { 36 | const data: Model[] = body.models.map(item => { 37 | return { 38 | id: item.name, 39 | created: 0, 40 | object: 'model', 41 | owned_by: 'google', 42 | }; 43 | }); 44 | 45 | super(client, response, { data, object: 'list' }, options); 46 | } 47 | } 48 | 49 | interface GeminiModel { 50 | name: string; 51 | version: string; 52 | displayName: string; 53 | description: string; 54 | inputTokenLimit: string; 55 | outputTokenLimit: string; 56 | supportedGenerationMethods: string[]; 57 | } 58 | 59 | interface GeminiPageResponse { 60 | models: GeminiModel[]; 61 | } 62 | 63 | /** 64 | * Describes an OpenAI model offering that can be used with the API. 65 | */ 66 | export type Model = OpenAI.Models.Model; 67 | 68 | export namespace Models { 69 | export import Model = OpenAI.Models.Model; 70 | export import ModelsPage = OpenAI.Models.ModelsPage; 71 | } 72 | -------------------------------------------------------------------------------- /src/hunyuan/index.ts: -------------------------------------------------------------------------------- 1 | import { createHmac } from 'node:crypto'; 2 | import type { Agent } from 'node:http'; 3 | 4 | import { APIClient, type DefaultQuery, type Fetch, type FinalRequestOptions, type Headers } from 'openai/core'; 5 | 6 | import * as API from './resources'; 7 | 8 | export interface HunYuanAIOptions { 9 | baseURL?: string; 10 | appId?: string; 11 | secretId?: string; 12 | secretKey?: string; 13 | timeout?: number | undefined; 14 | httpAgent?: Agent; 15 | fetch?: Fetch | undefined; 16 | /** 17 | * Default headers to include with every request to the API. 18 | * 19 | * These can be removed in individual requests by explicitly setting the 20 | * header to `undefined` or `null` in request options. 21 | */ 22 | defaultHeaders?: Headers; 23 | 24 | /** 25 | * Default query parameters to include with every request to the API. 26 | * 27 | * These can be removed in individual requests by explicitly setting the 28 | * param to `undefined` in request options. 29 | */ 30 | defaultQuery?: DefaultQuery; 31 | } 32 | 33 | export class HunYuanAI extends APIClient { 34 | appId: number; 35 | 36 | secretId: string; 37 | secretKey: string; 38 | 39 | private _options: HunYuanAIOptions; 40 | 41 | constructor(options: HunYuanAIOptions = {}) { 42 | const { 43 | appId = process.env.HUNYUAN_APP_ID || '', 44 | secretId = process.env.HUNYUAN_SECRET_ID || '', 45 | secretKey = process.env.HUNYUAN_SECRET_KEY || '', 46 | baseURL = 'https://hunyuan.cloud.tencent.com/hyllm/v1', 47 | timeout = 30000, 48 | fetch = globalThis.fetch, 49 | httpAgent = undefined, 50 | ...rest 51 | } = options; 52 | 53 | super({ 54 | baseURL, 55 | timeout, 56 | fetch, 57 | httpAgent, 58 | ...rest, 59 | }); 60 | 61 | this._options = options; 62 | 63 | this.appId = parseInt(appId, 10); 64 | this.secretKey = secretKey; 65 | this.secretId = secretId; 66 | } 67 | 68 | chat = new API.Chat(this); 69 | 70 | protected override defaultHeaders(opts: FinalRequestOptions): Headers { 71 | return { 72 | ...super.defaultHeaders(opts), 73 | ...this._options.defaultHeaders, 74 | }; 75 | } 76 | 77 | protected override defaultQuery(): DefaultQuery | undefined { 78 | return this._options.defaultQuery; 79 | } 80 | 81 | generateAuthorization(path: string, data: Record) { 82 | const rawSessionKey = this.buildURL(path, {}).replace('https://', ''); 83 | 84 | const rawSignature: string[] = []; 85 | 86 | Object.keys(data) 87 | .sort() 88 | .forEach(key => { 89 | const value = data[key]; 90 | 91 | if (value == null) return; 92 | 93 | if (typeof value === 'object') { 94 | rawSignature.push(`${key}=${JSON.stringify(value)}`); 95 | } else { 96 | rawSignature.push(`${key}=${value}`); 97 | } 98 | }); 99 | 100 | return this.hash(`${rawSessionKey}?${rawSignature.join('&')}`); 101 | } 102 | 103 | protected hash(data: string) { 104 | const hash = createHmac('sha1', this.secretKey); 105 | return hash.update(Buffer.from(data, 'utf8')).digest('base64'); 106 | } 107 | } 108 | 109 | export namespace HunYuanAI { 110 | export type ChatModel = API.ChatModel; 111 | export type ChatCompletionCreateParams = API.ChatCompletionCreateParams; 112 | export type ChatCompletionCreateParamsStreaming = API.ChatCompletionCreateParamsStreaming; 113 | export type ChatCompletionCreateParamsNonStreaming = API.ChatCompletionCreateParamsNonStreaming; 114 | } 115 | 116 | export default HunYuanAI; 117 | -------------------------------------------------------------------------------- /src/hunyuan/resource.ts: -------------------------------------------------------------------------------- 1 | import { HunYuanAI } from './index'; 2 | 3 | export class APIResource { 4 | protected _client: HunYuanAI; 5 | 6 | constructor(client: HunYuanAI) { 7 | this._client = client; 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /src/hunyuan/resources/chat/chat.ts: -------------------------------------------------------------------------------- 1 | import { APIResource } from '../../resource'; 2 | import { Completions } from './completions'; 3 | 4 | export class Chat extends APIResource { 5 | completions = new Completions(this._client); 6 | } 7 | -------------------------------------------------------------------------------- /src/hunyuan/resources/chat/completions.ts: -------------------------------------------------------------------------------- 1 | import { createHmac } from 'node:crypto'; 2 | 3 | import OpenAI, { APIError } from 'openai'; 4 | import { Stream } from 'openai/streaming'; 5 | 6 | import { APIResource } from '../../resource'; 7 | 8 | export class Completions extends APIResource { 9 | /** 10 | * Creates a model response for the given chat conversation. 11 | * 12 | * See https://cloud.tencent.com/document/product/1729/97732 13 | */ 14 | create(body: ChatCompletionCreateParamsNonStreaming, options?: OpenAI.RequestOptions): Promise; 15 | create( 16 | body: ChatCompletionCreateParamsStreaming, 17 | options?: OpenAI.RequestOptions, 18 | ): Promise>; 19 | 20 | async create( 21 | params: ChatCompletionCreateParams, 22 | options?: OpenAI.RequestOptions, 23 | ): Promise> { 24 | const client = this._client; 25 | const { model, messages, temperature = 0.8, top_p, stream } = params; 26 | 27 | const timestamp = Math.floor(Date.now() / 1000); 28 | 29 | const body: ChatCompletions.ChatCompletionCreateParams = { 30 | app_id: client.appId, 31 | secret_id: client.secretId, 32 | timestamp: timestamp, 33 | expired: timestamp + 7200, 34 | temperature, 35 | top_p, 36 | stream: stream ? 1 : 0, 37 | messages, 38 | }; 39 | 40 | const path = '/chat/completions'; 41 | 42 | const signature = client.generateAuthorization(path, body); 43 | 44 | const response: Response = await this._client.post(path, { 45 | ...options, 46 | body, 47 | headers: { 48 | ...options?.headers, 49 | Authorization: signature, 50 | }, 51 | stream: false, 52 | __binaryResponse: true, 53 | }); 54 | 55 | if (params.stream) { 56 | const controller = new AbortController(); 57 | 58 | options?.signal?.addEventListener('abort', () => { 59 | controller.abort(); 60 | }); 61 | 62 | return Completions.fromSSEResponse(model, Stream.fromSSEResponse(response, controller), controller); 63 | } 64 | 65 | return Completions.fromResponse(model, await response.json()); 66 | } 67 | 68 | static fromSSEResponse( 69 | model: string, 70 | stream: Stream, 71 | controller: AbortController, 72 | ): Stream { 73 | async function* iterator(): AsyncIterator { 74 | for await (const chunk of stream) { 75 | if (chunk.error) { 76 | throw new APIError(undefined, chunk.error, undefined, undefined); 77 | } 78 | 79 | const message = chunk.choices[0]; 80 | 81 | const choice: OpenAI.ChatCompletionChunk.Choice = { 82 | index: 0, 83 | delta: { 84 | role: 'assistant', 85 | content: message.delta.content || '', 86 | }, 87 | finish_reason: null, 88 | }; 89 | 90 | yield { 91 | id: chunk.id, 92 | model, 93 | choices: [choice], 94 | object: 'chat.completion.chunk', 95 | created: parseInt(chunk.created, 10), 96 | }; 97 | } 98 | } 99 | 100 | return new Stream(iterator, controller); 101 | } 102 | 103 | static fromResponse(model: string, data: ChatCompletions.ChatCompletion): OpenAI.ChatCompletion { 104 | if (data.error) { 105 | throw new APIError(undefined, data.error, undefined, undefined); 106 | } 107 | 108 | const message = data.choices[0]; 109 | 110 | const choice: OpenAI.ChatCompletion.Choice = { 111 | index: 0, 112 | message: { 113 | role: 'assistant', 114 | content: message.messages.content, 115 | }, 116 | logprobs: null, 117 | finish_reason: message.finish_reason, 118 | }; 119 | 120 | return { 121 | id: data.id, 122 | model: model, 123 | choices: [choice], 124 | created: parseInt(data.created), 125 | object: 'chat.completion', 126 | usage: data.usage, 127 | }; 128 | } 129 | 130 | protected hash(data: string) { 131 | const hash = createHmac('sha1', this._client.secretKey); 132 | return hash.update(Buffer.from(data, 'utf8')).digest('base64'); 133 | } 134 | } 135 | 136 | export interface ChatCompletionCreateParamsNonStreaming extends OpenAI.ChatCompletionCreateParamsNonStreaming { 137 | model: ChatModel; 138 | } 139 | 140 | export interface ChatCompletionCreateParamsStreaming extends OpenAI.ChatCompletionCreateParamsStreaming { 141 | model: ChatModel; 142 | } 143 | 144 | export type ChatCompletionCreateParams = ChatCompletionCreateParamsNonStreaming | ChatCompletionCreateParamsStreaming; 145 | 146 | export type ChatModel = 'hunyuan'; 147 | 148 | export namespace ChatCompletions { 149 | export interface ChatCompletionCreateParams { 150 | /** 151 | * 腾讯云账号的 APPID 152 | */ 153 | app_id: number; 154 | 155 | /** 156 | * API 密钥 157 | */ 158 | secret_id: string; 159 | 160 | /** 161 | * 当前 UNIX 时间戳,单位为秒,可记录发起 API 请求的时间。 162 | */ 163 | timestamp: number; 164 | 165 | /** 166 | * 签名的有效期,是一个符合 UNIX Epoch 时间戳规范的数值,单位为秒;Expired 必须与 Timestamp 的差值小于90天 167 | */ 168 | expired: number; 169 | 170 | /** 171 | * 请求 ID,用于问题排查 172 | */ 173 | query_id?: string; 174 | 175 | /** 176 | * 内容随机性 177 | */ 178 | temperature?: number | null; 179 | 180 | /** 181 | * 生成结果的多样性 182 | */ 183 | top_p?: number | null; 184 | 185 | /** 186 | * 是否返回流式结果 187 | * 188 | * 0:同步,1:流式 (默认,协议:SSE) 189 | * 190 | * 同步请求超时:60s,如果内容较长建议使用流式 191 | */ 192 | stream?: number | null; 193 | 194 | /** 195 | * 会话内容, 按对话时间序排列,长度最多为40 196 | * 最大支持16k tokens上下文 197 | */ 198 | messages: OpenAI.ChatCompletionMessageParam[]; 199 | } 200 | 201 | export type CompletionChoicesDelta = { 202 | content: string; 203 | }; 204 | 205 | export type CompletionChoice = { 206 | finish_reason: 'stop'; 207 | /** 208 | * 内容,同步模式返回内容,流模式为 null 209 | */ 210 | messages: OpenAI.ChatCompletionMessage; 211 | /** 212 | * 内容,流模式返回内容,同步模式为 null 213 | */ 214 | delta: CompletionChoicesDelta; 215 | }; 216 | 217 | export interface ChatCompletion { 218 | choices: CompletionChoice[]; 219 | created: string; 220 | note: string; 221 | id: string; 222 | usage: OpenAI.CompletionUsage; 223 | 224 | error?: { 225 | message: string; 226 | code: number; 227 | }; 228 | } 229 | } 230 | -------------------------------------------------------------------------------- /src/hunyuan/resources/chat/index.ts: -------------------------------------------------------------------------------- 1 | export { Chat } from './chat'; 2 | export { 3 | type ChatModel, 4 | type ChatCompletionCreateParams, 5 | type ChatCompletionCreateParamsNonStreaming, 6 | type ChatCompletionCreateParamsStreaming, 7 | Completions, 8 | } from './completions'; 9 | -------------------------------------------------------------------------------- /src/hunyuan/resources/index.ts: -------------------------------------------------------------------------------- 1 | export * from './chat/index'; 2 | -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | import OpenAI from 'openai'; 2 | 3 | import ErnieAI, { ErnieAIOptions } from './ernie'; 4 | import GeminiAI, { GeminiAIOptions } from './gemini'; 5 | import HunYuanAI, { HunYuanAIOptions } from './hunyuan'; 6 | import MinimaxAI, { MinimaxAIOptions } from './minimax'; 7 | import QWenAI, { QWenAIOptions } from './qwen'; 8 | import SparkAI, { SparkAIOptions } from './spark'; 9 | import VYroAI, { VYroAIOptions } from './vyro'; 10 | 11 | export { 12 | ErnieAI, 13 | type ErnieAIOptions, 14 | GeminiAI, 15 | type GeminiAIOptions, 16 | HunYuanAI, 17 | type HunYuanAIOptions, 18 | MinimaxAI, 19 | type MinimaxAIOptions, 20 | OpenAI, 21 | QWenAI, 22 | type QWenAIOptions, 23 | SparkAI, 24 | type SparkAIOptions, 25 | VYroAI, 26 | type VYroAIOptions, 27 | }; 28 | 29 | export { 30 | OpenAIError, 31 | APIError, 32 | APIConnectionError, 33 | APIConnectionTimeoutError, 34 | APIUserAbortError, 35 | NotFoundError, 36 | ConflictError, 37 | RateLimitError, 38 | BadRequestError, 39 | AuthenticationError, 40 | InternalServerError, 41 | PermissionDeniedError, 42 | UnprocessableEntityError, 43 | } from 'openai'; 44 | 45 | export * from './resource'; 46 | export * from './streaming'; 47 | export * from './util'; 48 | 49 | export default { 50 | version: process.env.PKG_VERSION, 51 | }; 52 | -------------------------------------------------------------------------------- /src/minimax/error.ts: -------------------------------------------------------------------------------- 1 | import { APIError } from 'openai'; 2 | 3 | export type MinimaxAPIResponse = { 4 | base_resp: { 5 | status_code: number; 6 | status_msg: string; 7 | }; 8 | }; 9 | 10 | export function assertStatusCode(data: MinimaxAPIResponse) { 11 | if (data.base_resp.status_code === 0) return; 12 | 13 | const error = { 14 | code: data.base_resp.status_code, 15 | message: data.base_resp.status_msg, 16 | }; 17 | 18 | throw new APIError(undefined, error, undefined, undefined); 19 | } 20 | -------------------------------------------------------------------------------- /src/minimax/index.ts: -------------------------------------------------------------------------------- 1 | import type { Agent } from 'node:http'; 2 | 3 | import { APIClient, type DefaultQuery, type Fetch, type FinalRequestOptions, type Headers } from 'openai/core'; 4 | 5 | import * as API from './resources'; 6 | 7 | export interface MinimaxAIOptions { 8 | baseURL?: string; 9 | orgId?: string; 10 | apiKey?: string; 11 | timeout?: number | undefined; 12 | httpAgent?: Agent; 13 | fetch?: Fetch | undefined; 14 | defaultHeaders?: Headers; 15 | defaultQuery?: DefaultQuery; 16 | } 17 | 18 | export class MinimaxAI extends APIClient { 19 | protected orgId: string; 20 | protected apiKey: string; 21 | 22 | private _options: MinimaxAIOptions; 23 | 24 | constructor(options: MinimaxAIOptions = {}) { 25 | const { 26 | orgId = process.env.MINIMAX_API_ORG || '', 27 | apiKey = process.env.MINIMAX_API_KEY || '', 28 | baseURL = 'https://api.minimax.chat/v1', 29 | timeout = 30000, 30 | fetch = globalThis.fetch, 31 | httpAgent = undefined, 32 | ...rest 33 | } = options; 34 | 35 | super({ 36 | baseURL, 37 | timeout, 38 | fetch, 39 | httpAgent, 40 | ...rest, 41 | }); 42 | 43 | this._options = options; 44 | 45 | this.apiKey = apiKey; 46 | this.orgId = orgId; 47 | } 48 | 49 | audio = new API.Audio(this); 50 | 51 | chat = new API.Chat(this); 52 | 53 | embeddings = new API.Embeddings(this); 54 | 55 | protected authHeaders(): Headers { 56 | return { 57 | Authorization: `Bearer ${this.apiKey}`, 58 | }; 59 | } 60 | 61 | protected override defaultHeaders(opts: FinalRequestOptions): Headers { 62 | return { 63 | ...super.defaultHeaders(opts), 64 | ...this._options.defaultHeaders, 65 | }; 66 | } 67 | 68 | protected override defaultQuery(): DefaultQuery | undefined { 69 | return { 70 | GroupId: this.orgId, 71 | ...this._options.defaultQuery, 72 | }; 73 | } 74 | } 75 | 76 | export namespace MinimaxAI { 77 | export type Chat = API.Chat; 78 | export type ChatModel = API.ChatModel; 79 | export type ChatCompletionCreateParams = API.ChatCompletionCreateParams; 80 | export type ChatCompletionCreateParamsNonStreaming = API.ChatCompletionCreateParamsNonStreaming; 81 | export type ChatCompletionCreateParamsStreaming = API.ChatCompletionCreateParamsStreaming; 82 | 83 | export type Embeddings = API.Embeddings; 84 | export type EmbeddingCreateParams = API.EmbeddingCreateParams; 85 | 86 | export type Audio = API.Audio; 87 | } 88 | 89 | export default MinimaxAI; 90 | -------------------------------------------------------------------------------- /src/minimax/resources/audio/audio.ts: -------------------------------------------------------------------------------- 1 | // File generated from our OpenAPI spec by Stainless. 2 | import { APIResource } from '../../../resource'; 3 | import { Speech } from './speech'; 4 | 5 | export class Audio extends APIResource { 6 | speech = new Speech(this._client); 7 | } 8 | -------------------------------------------------------------------------------- /src/minimax/resources/audio/index.ts: -------------------------------------------------------------------------------- 1 | export { Audio } from './audio'; 2 | export { type SpeechCreateParams, type SpeechModel, Speech } from './speech'; 3 | -------------------------------------------------------------------------------- /src/minimax/resources/audio/speech.ts: -------------------------------------------------------------------------------- 1 | import { OpenAIError } from 'openai'; 2 | import { type RequestOptions } from 'openai/core'; 3 | 4 | import { APIResource } from '../../../resource'; 5 | import { assertStatusCode, type MinimaxAPIResponse } from '../../error'; 6 | 7 | export class Speech extends APIResource { 8 | protected resources: Record< 9 | SpeechModel, 10 | { 11 | model: string; 12 | endpoint: string; 13 | resposne_type: 'json' | 'binary' | 'stream'; 14 | } 15 | > = { 16 | 'speech-01': { 17 | model: 'speech-01', 18 | endpoint: '/text_to_speech', 19 | resposne_type: 'binary', 20 | }, 21 | 'speech-01-pro': { 22 | model: 'speech-01', 23 | endpoint: '/t2a_pro', 24 | resposne_type: 'json', 25 | }, 26 | // Note: 返回的是 SSE 流数据 27 | // 'speech-01-stream': { 28 | // model: 'speech-01', 29 | // endpoint: '/tts/stream', 30 | // resposne_type: 'stream', 31 | // }, 32 | }; 33 | 34 | /** 35 | * Generates audio from the input text. 36 | * 37 | * See https://api.minimax.chat/document/guides/T2A-model/tts 38 | */ 39 | create(params: Speech.SpeechCreateParams, options?: RequestOptions): Promise; 40 | 41 | create( 42 | params: Speech.SpeechCreateParams, 43 | options: RequestOptions & { 44 | __binaryResponse: false; 45 | }, 46 | ): Promise; 47 | 48 | async create( 49 | params: Speech.SpeechCreateParams, 50 | options?: RequestOptions, 51 | ): Promise { 52 | const { input, voice, ...rest } = params; 53 | 54 | const resource = this.resources[params.model]; 55 | if (!resource) { 56 | throw new OpenAIError(`Invalid model: ${params.model}`); 57 | } 58 | 59 | const body: Record = { 60 | ...rest, 61 | text: input, 62 | model: resource.model, 63 | }; 64 | 65 | if (voice) { 66 | body.voice_id = voice; 67 | } 68 | 69 | const response: Response = await this._client.post(resource.endpoint, { 70 | ...options, 71 | body, 72 | __binaryResponse: true, 73 | }); 74 | 75 | // Note: pro 模型返回 json 76 | if (options?.__binaryResponse || resource.resposne_type === 'binary' || resource.resposne_type === 'stream') 77 | return response; 78 | 79 | return response.json().then((data: Speech.AudioCreateResponse) => { 80 | assertStatusCode(data); 81 | 82 | return fetch(data.audio_file); 83 | }); 84 | } 85 | } 86 | 87 | export type SpeechModel = Speech.SpeechModel; 88 | 89 | export type SpeechCreateParams = Speech.SpeechCreateParams; 90 | 91 | export namespace Speech { 92 | export type SpeechModel = (string & NonNullable) | 'speech-01' | 'speech-01-pro'; 93 | 94 | export interface SpeechCreateParams { 95 | /** 96 | * One of the available [TTS models](https://api.minimax.chat/document/guides/T2A-model/tts) 97 | */ 98 | model: SpeechModel; 99 | 100 | /** 101 | * The text to generate audio for. 102 | */ 103 | input: string; 104 | 105 | /** 106 | * The voice to use when generating the audio. 107 | * 108 | * - 青涩青年音色(male-qn-qingse) 109 | * - 精英青年音色(male-qn-jingying) 110 | * - 霸道青年音色(male-qn-badao) 111 | * - 青年大学生音色(male-qn-daxuesheng) 112 | * - 少女音色(female-shaonv) 113 | * - 御姐音色(female-yujie) 114 | * - 成熟女性音色(female-chengshu) 115 | * - 甜美女性音色(female-tianmei) 116 | * - 男性主持人(presenter_male) 117 | * - 女性主持人(presenter_female) 118 | * - 男性有声书1(audiobook_male_1) 119 | * - 男性有声书2(audiobook_male_2) 120 | * - 女性有声书1(audiobook_female_1) 121 | * - 女性有声书2(audiobook_female_2) 122 | * - 青涩青年音色-beta(male-qn-qingse-jingpin) 123 | * - 精英青年音色-beta(male-qn-jingying-jingpin) 124 | * - 霸道青年音色-beta(male-qn-badao-jingpin) 125 | * - 青年大学生音色-beta(male-qn-daxuesheng-jingpin) 126 | * - 少女音色-beta(female-shaonv-jingpin) 127 | * - 御姐音色-beta(female-yujie-jingpin) 128 | * - 成熟女性音色-beta(female-chengshu-jingpin) 129 | * - 甜美女性音色-beta(female-tianmei-jingpin) 130 | */ 131 | voice: 132 | | (string & NonNullable) 133 | | 'male-qn-qingse' 134 | | 'male-qn-jingying' 135 | | 'male-qn-badao' 136 | | 'male-qn-daxuesheng' 137 | | 'female-shaonv' 138 | | 'female-yujie' 139 | | 'female-chengshu' 140 | | 'female-tianmei' 141 | | 'presenter_male' 142 | | 'presenter_female' 143 | | 'audiobook_male_1' 144 | | 'audiobook_male_2' 145 | | 'audiobook_female_1' 146 | | 'audiobook_female_2' 147 | | 'male-qn-qingse-jingpin' 148 | | 'male-qn-jingying-jingpin' 149 | | 'male-qn-badao-jingpin' 150 | | 'male-qn-daxuesheng-jingpin' 151 | | 'female-shaonv-jingpin' 152 | | 'female-yujie-jingpin' 153 | | 'female-chengshu-jingpin' 154 | | 'female-tianmei-jingpin'; 155 | 156 | /** 157 | * The speed of the generated audio. 158 | * 159 | * Range: 0.5 - 2.0 160 | * 161 | * @defaultValue 1.0 162 | */ 163 | speed?: number; 164 | 165 | /** 166 | * The vol of the generated audio. 167 | * 168 | * 169 | * Range: 0~1 170 | * 171 | * @defaultValue 1.0 172 | */ 173 | vol?: number; 174 | 175 | /** 176 | * The pitch of the generated audio. 177 | * 178 | * Range: 0~1 179 | * 180 | * @defaultValue 0 181 | */ 182 | pitch?: number; 183 | 184 | /** 185 | * 生成声音的采样率。t2a_pro 可用 186 | * 187 | * Range: [16000, 24000] 188 | * 189 | * @defaultValue 24000 190 | */ 191 | audio_sample_rate?: number; 192 | 193 | /** 194 | * 生成声音的比特率. t2a_pro 可用 195 | * 196 | * Range: [32000, 64000,128000] 197 | * 198 | * @defaultValue 128000 199 | */ 200 | bitrate?: number; 201 | 202 | /** 203 | * The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`. 204 | */ 205 | response_format?: 'mp3' | 'opus' | 'aac' | 'flac'; 206 | } 207 | 208 | export interface AudioCreateResponse extends MinimaxAPIResponse { 209 | audio_file: string; 210 | subtitle_file: string; 211 | trace_id: string; 212 | extra_info: { 213 | audio_length: number; 214 | audio_sample_rate: number; 215 | audio_size: number; 216 | bitrate: number; 217 | word_count: number; 218 | invisible_character_ratio: number; 219 | }; 220 | } 221 | } 222 | -------------------------------------------------------------------------------- /src/minimax/resources/chat/chat.ts: -------------------------------------------------------------------------------- 1 | import { APIResource } from '../../../resource'; 2 | import { Completions } from './completions'; 3 | 4 | export class Chat extends APIResource { 5 | completions = new Completions(this._client); 6 | } 7 | -------------------------------------------------------------------------------- /src/minimax/resources/chat/completions.ts: -------------------------------------------------------------------------------- 1 | import OpenAI, { APIError, OpenAIError } from 'openai'; 2 | import { Stream } from 'openai/streaming'; 3 | 4 | import { APIResource } from '../../../resource'; 5 | import { iterMessages, SSEDecoder } from '../../../streaming'; 6 | import { assertStatusCode } from '../../error'; 7 | 8 | export class Completions extends APIResource { 9 | protected resources: Record< 10 | ChatModel, 11 | { 12 | model: ChatModel; 13 | endpoint: string; 14 | } 15 | > = { 16 | 'abab5-chat': { 17 | model: 'abab5-chat', 18 | endpoint: '/text/chatcompletion', 19 | }, 20 | 'abab5.5-chat': { 21 | model: 'abab5.5-chat', 22 | endpoint: '/text/chatcompletion', 23 | }, 24 | 'abab5.5-chat-pro': { 25 | model: 'abab5.5-chat', 26 | endpoint: '/text/chatcompletion_pro', 27 | }, 28 | }; 29 | 30 | protected system = 31 | 'MM智能助理是一款由MiniMax自研的,没有调用其他产品的接口的大型语言模型。MiniMax是一家中国科技公司,一直致力于进行大模型相关的研究。'; 32 | 33 | /** 34 | * Creates a model response for the given chat conversation. 35 | * 36 | * See https://api.minimax.chat/document/guides/chat-model/chat/api 37 | */ 38 | create(body: ChatCompletionCreateParamsNonStreaming, options?: OpenAI.RequestOptions): Promise; 39 | create( 40 | body: ChatCompletionCreateParamsStreaming, 41 | options?: OpenAI.RequestOptions, 42 | ): Promise>; 43 | 44 | async create(params: ChatCompletionCreateParams, options?: OpenAI.RequestOptions) { 45 | const resource = this.resources[params.model]; 46 | 47 | if (!resource) { 48 | throw new OpenAIError(`Invalid model: ${params.model}`); 49 | } 50 | 51 | const body = this.buildCreateParams(params); 52 | 53 | const response: Response = await this._client.post(resource.endpoint, { 54 | ...options, 55 | body: { ...body, model: resource.model }, 56 | stream: false, 57 | __binaryResponse: true, 58 | }); 59 | 60 | if (body.stream) { 61 | const controller = new AbortController(); 62 | 63 | options?.signal?.addEventListener('abort', () => { 64 | controller.abort(); 65 | }); 66 | 67 | return Completions.fromSSEResponse(params.model, response, controller); 68 | } 69 | 70 | return Completions.fromResponse(params.model, await response.json()); 71 | } 72 | 73 | protected buildCreateParams(params: ChatCompletionCreateParams): ChatCompletions.ChatCompletionCreateParams { 74 | const { model, messages = [], max_tokens, ...rest } = params; 75 | 76 | const data: ChatCompletions.ChatCompletionCreateParams = { 77 | model, 78 | messages: [], 79 | ...rest, 80 | }; 81 | 82 | if (max_tokens) { 83 | data.tokens_to_generate = max_tokens; 84 | } 85 | 86 | const head = messages[0]; 87 | 88 | // minimax 的 system 是独立字段 89 | const system = head && head.role === 'system' ? head.content : null; 90 | 91 | // 移除 system 角色的消息 92 | if (system) { 93 | messages.splice(0, 1); 94 | } 95 | 96 | if (model === 'abab5.5-chat-pro') { 97 | data.bot_setting = [ 98 | { 99 | bot_name: 'MM智能助理', 100 | content: system || this.system, 101 | }, 102 | ]; 103 | data.reply_constraints = { 104 | sender_type: 'BOT', 105 | sender_name: 'MM智能助理', 106 | }; 107 | } else { 108 | data.role_meta = { 109 | bot_name: 'MM智能助理', 110 | user_name: '用户', 111 | }; 112 | data.prompt = system || this.system; 113 | } 114 | 115 | data.messages = messages.map(item => { 116 | switch (item.role) { 117 | case 'assistant': 118 | return { 119 | sender_type: 'BOT', 120 | text: item.content as string, 121 | }; 122 | default: { 123 | const message: ChatCompletions.ChatMessage = { 124 | sender_type: 'USER', 125 | text: item.content as string, 126 | }; 127 | 128 | if (model == 'abab5.5-chat-pro') { 129 | message.sender_name = '用户'; 130 | } 131 | 132 | return message; 133 | } 134 | } 135 | }); 136 | 137 | if (params.stream) { 138 | data['use_standard_sse'] = true; 139 | } 140 | 141 | return data; 142 | } 143 | 144 | static fromResponse(model: ChatModel, data: ChatCompletions.ChatCompletion): OpenAI.ChatCompletion { 145 | assertStatusCode(data); 146 | 147 | return { 148 | id: data.id, 149 | model: data.model, 150 | choices: data.choices.map((choice, index) => { 151 | const { finish_reason } = choice; 152 | 153 | if (model === 'abab5.5-chat-pro') { 154 | return { 155 | index: index, 156 | message: { 157 | role: 'assistant', 158 | content: choice.messages[0].text, 159 | }, 160 | logprobs: null, 161 | finish_reason, 162 | }; 163 | } 164 | 165 | return { 166 | index: index, 167 | message: { 168 | role: 'assistant', 169 | content: choice.text, 170 | }, 171 | logprobs: null, 172 | finish_reason, 173 | }; 174 | }), 175 | created: data.created, 176 | object: 'chat.completion', 177 | usage: data.usage, 178 | }; 179 | } 180 | 181 | static fromSSEResponse( 182 | model: ChatModel, 183 | response: Response, 184 | controller: AbortController, 185 | ): Stream { 186 | let consumed = false; 187 | const decoder = new SSEDecoder(); 188 | 189 | function transform(data: ChatCompletions.ChatCompletionChunk): OpenAI.ChatCompletionChunk { 190 | return { 191 | id: data.request_id, 192 | model: model, 193 | choices: data.choices.map((choice, index) => { 194 | const { finish_reason = null } = choice; 195 | 196 | if (model === 'abab5.5-chat-pro') { 197 | const content = choice.messages[0].text; 198 | 199 | return { 200 | index: index, 201 | delta: { 202 | role: 'assistant', 203 | content: finish_reason === 'stop' ? '' : content, 204 | }, 205 | finish_reason: finish_reason, 206 | }; 207 | } 208 | 209 | return { 210 | index: index, 211 | delta: { 212 | role: 'assistant', 213 | content: choice.delta, 214 | }, 215 | finish_reason: finish_reason, 216 | }; 217 | }), 218 | object: 'chat.completion.chunk', 219 | created: data.created, 220 | }; 221 | } 222 | 223 | async function* iterator(): AsyncIterator { 224 | if (consumed) { 225 | throw new Error('Cannot iterate over a consumed stream, use `.tee()` to split the stream.'); 226 | } 227 | consumed = true; 228 | let done = false; 229 | try { 230 | for await (const sse of iterMessages(response, decoder, controller)) { 231 | if (done) continue; 232 | 233 | if (sse.data.startsWith('[DONE]')) { 234 | done = true; 235 | continue; 236 | } 237 | 238 | if (sse.event === null) { 239 | let data; 240 | 241 | try { 242 | data = JSON.parse(sse.data); 243 | } catch (e) { 244 | console.error(`Could not parse message into JSON:`, sse.data); 245 | console.error(`From chunk:`, sse.raw); 246 | throw e; 247 | } 248 | 249 | if (data && data.code) { 250 | throw new APIError(undefined, data, undefined, undefined); 251 | } 252 | 253 | yield transform(data); 254 | } 255 | } 256 | done = true; 257 | } catch (e) { 258 | // If the user calls `stream.controller.abort()`, we should exit without throwing. 259 | if (e instanceof Error && e.name === 'AbortError') return; 260 | throw e; 261 | } finally { 262 | // If the user `break`s, abort the ongoing request. 263 | if (!done) controller.abort(); 264 | } 265 | } 266 | 267 | return new Stream(iterator, controller); 268 | } 269 | } 270 | 271 | export interface ChatCompletionCreateParamsNonStreaming extends OpenAI.ChatCompletionCreateParamsNonStreaming { 272 | model: ChatModel; 273 | } 274 | 275 | export interface ChatCompletionCreateParamsStreaming extends OpenAI.ChatCompletionCreateParamsStreaming { 276 | model: ChatModel; 277 | } 278 | 279 | export type ChatCompletionCreateParams = ChatCompletionCreateParamsNonStreaming | ChatCompletionCreateParamsStreaming; 280 | 281 | export type ChatModel = 'abab5-chat' | 'abab5.5-chat' | 'abab5.5-chat-pro'; 282 | 283 | export namespace ChatCompletions { 284 | export type ChatMessage = { 285 | sender_type: 'USER' | 'BOT' | 'FUNCTION'; 286 | sender_name?: string; 287 | text: string; 288 | }; 289 | 290 | export interface ChatCompletionCreateParams { 291 | /** 292 | * 模型名称 293 | */ 294 | model: ChatModel; 295 | 296 | /** 297 | * 对话背景、人物或功能设定 298 | * 299 | * 和 bot_setting 互斥 300 | */ 301 | prompt?: string | null; 302 | 303 | /** 304 | * 对话 meta 信息 305 | * 306 | * 和 bot_setting 互斥 307 | */ 308 | role_meta?: { 309 | /** 310 | * 用户代称 311 | */ 312 | user_name: string; 313 | /** 314 | * AI 代称 315 | */ 316 | bot_name: string; 317 | }; 318 | 319 | /** 320 | * pro 模式下,可以设置 bot 的名称和内容 321 | * 322 | * 和 prompt 互斥 323 | */ 324 | bot_setting?: { 325 | bot_name: string; 326 | content: string; 327 | }[]; 328 | 329 | /** 330 | * pro 模式下,设置模型回复要求 331 | */ 332 | reply_constraints?: { 333 | sender_type: string; 334 | sender_name: string; 335 | }; 336 | 337 | /** 338 | * 对话内容 339 | */ 340 | messages: ChatMessage[]; 341 | 342 | /** 343 | * 如果为 true,则表明设置当前请求为续写模式,回复内容为传入 messages 的最后一句话的续写; 344 | * 345 | * 此时最后一句发送者不限制 USER,也可以为 BOT。 346 | */ 347 | continue_last_message?: boolean | null; 348 | 349 | /** 350 | * 内容随机性 351 | */ 352 | temperature?: number | null; 353 | 354 | /** 355 | * 生成文本的多样性 356 | */ 357 | top_p?: number | null; 358 | 359 | /** 360 | * 最大生成token数,需要注意的是,这个参数并不会影响模型本身的生成效果, 361 | * 362 | * 而是仅仅通过以截断超出的 token 的方式来实现功能需要保证输入上文的 token 个数和这个值加一起小于 6144 或者 16384,否则请求会失败 363 | */ 364 | tokens_to_generate?: number | null; 365 | 366 | /** 367 | * 对输出中易涉及隐私问题的文本信息进行脱敏, 368 | * 369 | * 目前包括但不限于邮箱、域名、链接、证件号、家庭住址等,默认 false,即开启脱敏 370 | */ 371 | skip_info_mask?: boolean | null; 372 | 373 | /** 374 | * 对输出中易涉及隐私问题的文本信息进行打码, 375 | * 376 | * 目前包括但不限于邮箱、域名、链接、证件号、家庭住址等,默认true,即开启打码 377 | */ 378 | mask_sensitive_info?: boolean | null; 379 | 380 | /** 381 | * 生成多少个结果;不设置默认为1,最大不超过4。 382 | * 383 | * 由于 beam_width 生成多个结果,会消耗更多 token。 384 | */ 385 | beam_width?: number | null; 386 | 387 | /** 388 | * 是否以流式接口的形式返回数据,默认 false 389 | */ 390 | stream?: boolean | null; 391 | 392 | /** 393 | * 是否使用标准 SSE 格式,设置为 true 时, 394 | * 流式返回的结果将以两个换行为分隔符。 395 | * 396 | * 只有在 stream=true 时,此参数才会生效。 397 | */ 398 | use_standard_sse?: boolean | null; 399 | } 400 | 401 | export type ChatCompletionChoice = { 402 | index?: number; 403 | text: string; 404 | messages: { 405 | sender_type: 'BOT'; 406 | sender_name: string; 407 | text: string; 408 | }[]; 409 | finish_reason: 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call'; 410 | }; 411 | 412 | export interface ChatCompletion { 413 | id: string; 414 | created: number; 415 | model: ChatModel; 416 | reply: string; 417 | choices: ChatCompletionChoice[]; 418 | usage: { 419 | /** 420 | * Number of tokens in the generated completion. 421 | */ 422 | completion_tokens: number; 423 | 424 | /** 425 | * Number of tokens in the prompt. 426 | */ 427 | prompt_tokens: number; 428 | 429 | /** 430 | * Total number of tokens used in the request (prompt + completion). 431 | */ 432 | total_tokens: number; 433 | }; 434 | input_sensitive: boolean; 435 | output_sensitive: boolean; 436 | base_resp: { 437 | status_code: number; 438 | status_msg: string; 439 | }; 440 | } 441 | 442 | export type ChatCompletionChunkChoice = { 443 | index: number; 444 | delta: string; 445 | messages: { 446 | sender_type: 'BOT'; 447 | sender_name: string; 448 | text: string; 449 | }[]; 450 | finish_reason: 'stop' | 'length' | 'content_filter' | 'function_call' | null; 451 | }; 452 | 453 | export interface ChatCompletionChunk { 454 | request_id: string; 455 | created: number; 456 | model: ChatModel; 457 | reply: string; 458 | choices: ChatCompletionChunkChoice[]; 459 | usage: { 460 | total_tokens: number; 461 | }; 462 | input_sensitive: false; 463 | output_sensitive: false; 464 | base_resp: { 465 | status_code: number; 466 | status_msg: string; 467 | }; 468 | } 469 | } 470 | -------------------------------------------------------------------------------- /src/minimax/resources/chat/index.ts: -------------------------------------------------------------------------------- 1 | export { Chat } from './chat'; 2 | export { 3 | type ChatModel, 4 | type ChatCompletionCreateParams, 5 | type ChatCompletionCreateParamsNonStreaming, 6 | type ChatCompletionCreateParamsStreaming, 7 | Completions, 8 | } from './completions'; 9 | -------------------------------------------------------------------------------- /src/minimax/resources/embeddings.ts: -------------------------------------------------------------------------------- 1 | import OpenAI from 'openai'; 2 | import { type RequestOptions } from 'openai/core'; 3 | 4 | import { APIResource } from '../../resource'; 5 | import { assertStatusCode } from '../error'; 6 | 7 | export class Embeddings extends APIResource { 8 | /** 9 | * Creates an embedding vector representing the input text. 10 | * 11 | * See https://api.minimax.chat/document/guides/Embeddings 12 | */ 13 | async create(params: EmbeddingCreateParams, options?: RequestOptions): Promise { 14 | const { model, input, type = 'query' } = params; 15 | 16 | const response: Response = await this._client.post('/embeddings', { 17 | body: { 18 | model, 19 | texts: input, 20 | type, 21 | }, 22 | ...options, 23 | __binaryResponse: true, 24 | }); 25 | 26 | const data: CreateEmbeddingResponse = await response.json(); 27 | 28 | assertStatusCode(data); 29 | 30 | return { 31 | data: data.vectors.map((embedding, index) => { 32 | return { 33 | embedding, 34 | index: index, 35 | object: 'embedding', 36 | }; 37 | }), 38 | model: model, 39 | object: 'list', 40 | usage: { 41 | prompt_tokens: data.total_tokens, 42 | total_tokens: data.total_tokens, 43 | }, 44 | }; 45 | } 46 | } 47 | 48 | export interface EmbeddingCreateParams extends OpenAI.EmbeddingCreateParams { 49 | /** 50 | * 模型 51 | */ 52 | model: 'embo-01'; 53 | 54 | /** 55 | * 首先通过db生成目标内容的向量并存储到向量数据库中,之后通过query生成检索文本的向量。 56 | */ 57 | type?: 'db' | 'query'; 58 | } 59 | 60 | type CreateEmbeddingResponse = { 61 | vectors: number[][]; 62 | total_tokens: number; 63 | base_resp: { 64 | status_code: number; 65 | status_msg: string; 66 | }; 67 | }; 68 | -------------------------------------------------------------------------------- /src/minimax/resources/index.ts: -------------------------------------------------------------------------------- 1 | export * from './chat/index'; 2 | export * from './audio/audio'; 3 | 4 | export { Embeddings, type EmbeddingCreateParams } from './embeddings'; 5 | -------------------------------------------------------------------------------- /src/qwen/dashscope/index.ts: -------------------------------------------------------------------------------- 1 | export * from './resolvers'; 2 | export * from './types'; 3 | -------------------------------------------------------------------------------- /src/qwen/dashscope/resolvers/chat.ts: -------------------------------------------------------------------------------- 1 | import OpenAI, { APIError } from 'openai'; 2 | import { _iterSSEMessages, Stream } from 'openai/streaming'; 3 | 4 | import { DashscopeChat, OpenAIChatCompatibility } from '../types'; 5 | import { isMultiModal, toCompletionUsage } from './completions'; 6 | 7 | export function fromChatCompletionMessages( 8 | messages: OpenAI.ChatCompletionMessageParam[], 9 | ): OpenAI.ChatCompletionMessageParam[] { 10 | return messages.map(message => { 11 | if (Array.isArray(message.content)) { 12 | message.content.forEach(part => { 13 | if (part.type === 'image_url') { 14 | // @ts-expect-error 15 | part.image = part.image_url.url; 16 | 17 | // @ts-expect-error 18 | delete part.image_url; 19 | } 20 | 21 | // @ts-expect-error 22 | delete part.type; 23 | }); 24 | } else { 25 | message.content = [ 26 | // @ts-expect-error 27 | { text: message.content! }, 28 | ]; 29 | } 30 | 31 | return message; 32 | }); 33 | } 34 | 35 | export function fromChatCompletionTextMessages( 36 | messages: OpenAI.ChatCompletionMessageParam[], 37 | ): OpenAI.ChatCompletionMessageParam[] { 38 | return messages.map(message => { 39 | if (Array.isArray(message.content)) { 40 | const part = message.content.find(c => c.type === 'text') as OpenAI.ChatCompletionContentPartText; 41 | return { role: message.role, content: part.text } as OpenAI.ChatCompletionMessageParam; 42 | } 43 | 44 | return message; 45 | }); 46 | } 47 | 48 | export function fromChatCompletionCreateParams( 49 | params: OpenAIChatCompatibility.ChatCompletionCreateParams, 50 | ): DashscopeChat.ChatCompletionCreateParams { 51 | const { model, messages, raw, response_format, stream_options = {}, ...parameters } = params; 52 | 53 | const result: DashscopeChat.ChatCompletionCreateParams = { 54 | model, 55 | input: { 56 | messages: [], 57 | }, 58 | parameters, 59 | }; 60 | 61 | if (raw === true) { 62 | result.input.messages = messages; 63 | } else if (isMultiModal(model)) { 64 | result.input.messages = fromChatCompletionMessages(messages); 65 | } else { 66 | result.input.messages = fromChatCompletionTextMessages(messages); 67 | } 68 | 69 | if (params.tools) { 70 | result.parameters!.result_format = 'message'; 71 | } else { 72 | if (response_format && response_format.type) { 73 | result.parameters!.result_format = response_format.type; 74 | } 75 | 76 | if (params.stream) { 77 | const incremental_output = stream_options?.incremental_output ?? true; 78 | result.parameters!.incremental_output = incremental_output; 79 | } 80 | } 81 | 82 | return result; 83 | } 84 | 85 | export function toChatCompletionFinishReason(reason?: DashscopeChat.ResponseFinish | null, stream?: boolean) { 86 | if (reason === 'null' || !reason) { 87 | return (stream ? null : 'stop') as 'stop'; 88 | } 89 | 90 | return reason; 91 | } 92 | 93 | export function toChatCompletion( 94 | params: DashscopeChat.ChatCompletionCreateParams, 95 | response: DashscopeChat.ChatCompletion, 96 | ): OpenAI.ChatCompletion { 97 | const { model } = params; 98 | const { output, usage } = response; 99 | 100 | const choice: OpenAI.ChatCompletion.Choice = { 101 | index: 0, 102 | message: { 103 | role: 'assistant', 104 | content: '', 105 | }, 106 | logprobs: null, 107 | finish_reason: 'stop', 108 | }; 109 | 110 | // Note: `params.parameters.result_format=message` 111 | if (output.choices) { 112 | const { message, finish_reason } = output.choices[0]; 113 | 114 | choice.message = { 115 | role: message.role, 116 | content: message.content, 117 | }; 118 | 119 | if (finish_reason === 'tool_calls') { 120 | choice.finish_reason = 'tool_calls'; 121 | 122 | choice.message.tool_calls = message.tool_calls; 123 | } else { 124 | choice.finish_reason = toChatCompletionFinishReason(finish_reason, true); 125 | } 126 | } else { 127 | choice.message.content = output.text; 128 | choice.finish_reason = toChatCompletionFinishReason(output.finish_reason); 129 | } 130 | 131 | return { 132 | id: response.request_id, 133 | model: model, 134 | choices: [choice], 135 | created: Math.floor(Date.now() / 1000), 136 | object: 'chat.completion', 137 | usage: toCompletionUsage(usage), 138 | }; 139 | } 140 | 141 | function toCompletionChunk( 142 | params: DashscopeChat.ChatCompletionCreateParams, 143 | chunk: DashscopeChat.ChatCompletion, 144 | ): OpenAI.ChatCompletionChunk { 145 | const output = chunk.output; 146 | 147 | const choice: OpenAI.ChatCompletionChunk.Choice = { 148 | index: 0, 149 | delta: { 150 | role: 'assistant', 151 | content: '', 152 | }, 153 | finish_reason: null, 154 | }; 155 | 156 | // Note: work in `params.parameters.result_format=message` 157 | if (output.choices) { 158 | const { message, finish_reason } = output.choices[0]; 159 | 160 | choice.delta = { 161 | role: message.role, 162 | content: message.content, 163 | }; 164 | 165 | if (finish_reason === 'tool_calls') { 166 | choice.finish_reason = 'tool_calls'; 167 | choice.delta.tool_calls = message.tool_calls as OpenAI.ChatCompletionChunk.Choice.Delta.ToolCall[]; 168 | } else { 169 | choice.finish_reason = toChatCompletionFinishReason(finish_reason, true); 170 | } 171 | } else { 172 | choice.delta.content = output.text; 173 | choice.finish_reason = toChatCompletionFinishReason(output.finish_reason, true); 174 | } 175 | 176 | return { 177 | id: chunk.request_id, 178 | model: params.model, 179 | choices: [choice], 180 | object: 'chat.completion.chunk', 181 | created: Math.floor(Date.now() / 1000), 182 | }; 183 | } 184 | 185 | export function toChatCompletionStream( 186 | params: DashscopeChat.ChatCompletionCreateParams, 187 | response: Response, 188 | controller: AbortController, 189 | ): Stream { 190 | let consumed = false; 191 | async function* iterator(): AsyncIterator { 192 | if (consumed) { 193 | throw new Error('Cannot iterate over a consumed stream, use `.tee()` to split the stream.'); 194 | } 195 | consumed = true; 196 | let done = false; 197 | try { 198 | for await (const sse of _iterSSEMessages(response, controller)) { 199 | if (done) continue; 200 | 201 | if (sse.data.startsWith('[DONE]')) { 202 | done = true; 203 | continue; 204 | } 205 | 206 | if (sse.event === 'result') { 207 | let message; 208 | 209 | try { 210 | message = JSON.parse(sse.data); 211 | } catch (e) { 212 | console.error(`Could not parse message into JSON:`, sse.data); 213 | console.error(`From chunk:`, sse.raw); 214 | throw e; 215 | } 216 | 217 | if (message && message.code) { 218 | throw new APIError(undefined, message, undefined, undefined); 219 | } 220 | 221 | yield toCompletionChunk(params, message); 222 | } 223 | } 224 | done = true; 225 | } catch (e) { 226 | // If the user calls `stream.controller.abort()`, we should exit without throwing. 227 | if (e instanceof Error && e.name === 'AbortError') return; 228 | throw e; 229 | } finally { 230 | // If the user `break`s, abort the ongoing request. 231 | if (!done) controller.abort(); 232 | } 233 | } 234 | 235 | return new Stream(iterator, controller); 236 | } 237 | -------------------------------------------------------------------------------- /src/qwen/dashscope/resolvers/completions.ts: -------------------------------------------------------------------------------- 1 | import OpenAI, { APIError } from 'openai'; 2 | import { _iterSSEMessages, Stream } from 'openai/streaming'; 3 | 4 | import type { DashscopeCompletions, OpenAICompletionsCompatibility } from '../types'; 5 | 6 | export function isMultiModal(model: string): boolean { 7 | return model.startsWith('qwen-vl'); 8 | } 9 | 10 | export function getCompletionCreateEndpoint(model: string) { 11 | return isMultiModal(model) 12 | ? '/services/aigc/multimodal-generation/generation' 13 | : '/services/aigc/text-generation/generation'; 14 | } 15 | 16 | export function fromCompletionCreateParams( 17 | params: OpenAICompletionsCompatibility.CompletionCreateParams, 18 | ): DashscopeCompletions.CompletionCreateParams { 19 | const { model, prompt, response_format, stream_options, ...parameters } = params; 20 | 21 | const result: DashscopeCompletions.CompletionCreateParams = { 22 | model, 23 | input: { prompt }, 24 | parameters, 25 | }; 26 | 27 | if (response_format && response_format.type) { 28 | result.parameters!.result_format = response_format.type; 29 | } 30 | 31 | if (params.stream) { 32 | const { incremental_output } = stream_options || {}; 33 | result.parameters!.incremental_output = incremental_output ?? true; 34 | } 35 | 36 | return result; 37 | } 38 | 39 | export function toCompletionFinishReason(reason?: DashscopeCompletions.ResponseFinish | null, stream?: boolean) { 40 | if (reason === 'null' || !reason) { 41 | return (stream ? null : 'stop') as 'stop'; 42 | } 43 | 44 | return reason; 45 | } 46 | 47 | export function toCompletionUsage(usage: DashscopeCompletions.CompletionUsage): OpenAI.CompletionUsage { 48 | // hack: 部分模型不存在 total tokens? 49 | // 如:llama2-7b-chat-v2 50 | const { output_tokens, input_tokens, total_tokens = output_tokens + input_tokens } = usage; 51 | 52 | return { 53 | completion_tokens: output_tokens, 54 | prompt_tokens: input_tokens, 55 | total_tokens: total_tokens, 56 | }; 57 | } 58 | 59 | export function toCompletion( 60 | params: DashscopeCompletions.CompletionCreateParams, 61 | response: DashscopeCompletions.Completion, 62 | stream?: boolean, 63 | ): OpenAI.Completion { 64 | const { model } = params; 65 | const { output, usage } = response; 66 | 67 | const choice: OpenAI.CompletionChoice = { 68 | index: 0, 69 | text: output.text, 70 | logprobs: null, 71 | finish_reason: toCompletionFinishReason(output.finish_reason, stream), 72 | }; 73 | 74 | return { 75 | id: response.request_id, 76 | model: model, 77 | choices: [choice], 78 | created: Math.floor(Date.now() / 1000), 79 | object: 'text_completion', 80 | usage: toCompletionUsage(usage), 81 | }; 82 | } 83 | 84 | export function toCompletionStream( 85 | params: DashscopeCompletions.CompletionCreateParams, 86 | response: Response, 87 | controller: AbortController, 88 | ): Stream { 89 | let consumed = false; 90 | async function* iterator(): AsyncIterator { 91 | if (consumed) { 92 | throw new Error('Cannot iterate over a consumed stream, use `.tee()` to split the stream.'); 93 | } 94 | 95 | consumed = true; 96 | let done = false; 97 | try { 98 | for await (const sse of _iterSSEMessages(response, controller)) { 99 | if (done) continue; 100 | 101 | if (sse.data.startsWith('[DONE]')) { 102 | done = true; 103 | continue; 104 | } 105 | 106 | if (sse.event === 'result') { 107 | let message; 108 | 109 | try { 110 | message = JSON.parse(sse.data); 111 | } catch (e) { 112 | console.error(`Could not parse message into JSON:`, sse.data); 113 | console.error(`From chunk:`, sse.raw); 114 | throw e; 115 | } 116 | 117 | if (message && message.code) { 118 | throw new APIError(undefined, message, undefined, undefined); 119 | } 120 | 121 | yield toCompletion(params, message, true); 122 | } 123 | } 124 | done = true; 125 | } catch (e) { 126 | // If the user calls `stream.controller.abort()`, we should exit without throwing. 127 | if (e instanceof Error && e.name === 'AbortError') return; 128 | throw e; 129 | } finally { 130 | // If the user `break`s, abort the ongoing request. 131 | if (!done) controller.abort(); 132 | } 133 | } 134 | 135 | return new Stream(iterator, controller); 136 | } 137 | -------------------------------------------------------------------------------- /src/qwen/dashscope/resolvers/embeddings.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from 'openai'; 2 | 3 | import { DashscopeEmbeddings, OpenAIEmbeddingsCompatibility } from '../types'; 4 | 5 | export function fromEmbeddingCreatePrams( 6 | params: OpenAIEmbeddingsCompatibility.EmbeddingCreateParams, 7 | ): DashscopeEmbeddings.EmbeddingCreateParams { 8 | return { 9 | model: params.model, 10 | input: { 11 | texts: params.input, 12 | }, 13 | parameters: { 14 | text_type: params.type || 'query', 15 | }, 16 | }; 17 | } 18 | 19 | export function toEmbedding( 20 | params: OpenAIEmbeddingsCompatibility.EmbeddingCreateParams, 21 | response: DashscopeEmbeddings.CreateEmbeddingResponse, 22 | ): OpenAI.CreateEmbeddingResponse { 23 | const { output, usage } = response; 24 | 25 | return { 26 | object: 'list', 27 | model: params.model, 28 | data: output.embeddings.map(({ text_index, embedding }) => ({ 29 | index: text_index, 30 | embedding: embedding, 31 | object: 'embedding', 32 | })), 33 | usage: { 34 | prompt_tokens: usage.total_tokens, 35 | total_tokens: usage.total_tokens, 36 | }, 37 | }; 38 | } 39 | -------------------------------------------------------------------------------- /src/qwen/dashscope/resolvers/index.ts: -------------------------------------------------------------------------------- 1 | export * from './chat'; 2 | export * from './completions'; 3 | -------------------------------------------------------------------------------- /src/qwen/dashscope/types/chat.ts: -------------------------------------------------------------------------------- 1 | import type OpenAI from 'openai'; 2 | 3 | import type { DashscopeCompletions } from './completions'; 4 | 5 | export namespace DashscopeChat { 6 | /** 7 | * https://help.aliyun.com/zh/dashscope/developer-reference/model-square 8 | */ 9 | export type ChatModel = DashscopeCompletions.CompletionModel; 10 | 11 | export type ResponseFinish = 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call' | 'null'; 12 | 13 | export interface ChatCompletionParametersParam extends DashscopeCompletions.CompletionParametersParam { 14 | /** 15 | * 指定可供模型调用的工具列表 16 | * 17 | * 当输入多个工具时,模型会选择其中一个生成结果。 18 | * 19 | * 警告: 20 | * 21 | * - tools 暂时无法和 incremental_output 参数同时使用 22 | * - 使用 tools 时需要同时指定 result_format 为 message 23 | */ 24 | tools?: OpenAI.ChatCompletionTool[]; 25 | } 26 | 27 | export interface ChatCompletionCreateParams { 28 | model: ({} & string) | ChatModel; 29 | input: { 30 | messages: OpenAI.ChatCompletionMessageParam[]; 31 | }; 32 | parameters?: ChatCompletionParametersParam; 33 | } 34 | 35 | export namespace ChatCompletion { 36 | export interface Output { 37 | text: string; 38 | finish_reason?: ResponseFinish; 39 | choices: OpenAI.ChatCompletion.Choice[]; 40 | } 41 | } 42 | 43 | /** 44 | * 详见 [输入参数配置](https://help.aliyun.com/zh/dashscope/developer-reference/api-details) 45 | */ 46 | export interface ChatCompletion { 47 | request_id: string; 48 | usage: DashscopeCompletions.CompletionUsage; 49 | output: ChatCompletion.Output; 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/qwen/dashscope/types/completions.ts: -------------------------------------------------------------------------------- 1 | import type { OpenAI } from 'openai'; 2 | 3 | export namespace DashscopeCompletions { 4 | /** 5 | * https://help.aliyun.com/zh/dashscope/developer-reference/model-square 6 | */ 7 | export type CompletionModel = 8 | // 通义千问 9 | | 'qwen-long' 10 | | 'qwen-turbo' 11 | | 'qwen-plus' 12 | | 'qwen-max' 13 | | 'qwen-max-0428' 14 | | 'qwen-max-0403' 15 | | 'qwen-max-0107' 16 | | 'qwen-max-1201' 17 | | 'qwen-max-longcontext' 18 | // 通义千问开源系列 19 | | 'qwen-7b-chat' 20 | | 'qwen-14b-chat' 21 | | 'qwen-72b-chat' 22 | // 多模型 23 | | 'qwen-vl-v1' 24 | | 'qwen-vl-chat-v1' 25 | | 'qwen-vl-plus' 26 | // LLAMA2; 27 | | 'llama2-7b-chat-v2' 28 | | 'llama2-13b-chat-v2' 29 | // 百川 30 | | 'baichuan-7b-v1' 31 | | 'baichuan2-13b-chat-v1' 32 | | 'baichuan2-7b-chat-v1' 33 | // ChatGLM 34 | | 'chatglm3-6b' 35 | | 'chatglm-6b-v2'; 36 | 37 | export type ResponseFinish = 'stop' | 'length' | 'null'; 38 | 39 | /** 40 | * - text 旧版本的 text 41 | * - message 兼容 openai 的 message 42 | * 43 | * @defaultValue "text" 44 | */ 45 | export type ResponseFormat = 'text' | 'message'; 46 | 47 | export type CompletionParametersParam = { 48 | /** 49 | * 启用流式输出 50 | * 51 | * 默认每次输出为当前生成的整个序列,最后一次输出为最终全部生成结果 52 | * 53 | * 通过 {@link CompletionParametersParam.incremental_output incremental_output} 参数关闭。 54 | */ 55 | stream?: boolean | null; 56 | 57 | /** 58 | * 启用增量输出 59 | * 60 | * 启用 {@link ChatCompletionCreateParamsBase.stream stream} 参数时,每次输出是否每次都包含前面输出的内容。 61 | * 62 | * Warning: Function call 信息暂时不支持增量输出,开启时需要注意。 63 | * 64 | * @defaultValue false 65 | */ 66 | incremental_output?: boolean | null; 67 | 68 | /** 69 | * 生成结果的格式 70 | * 71 | * @defaultValue "text" 72 | */ 73 | result_format?: ResponseFormat; 74 | 75 | /** 76 | * 生成时,随机数的种子,用于控制模型生成的随机性。 77 | * 78 | * 如果使用相同的种子,每次运行生成的结果都将相同; 79 | * 当需要复现模型的生成结果时,可以使用相同的种子。 80 | * seed参数支持无符号64位整数类型。 81 | * 82 | * @defaultValue 1234 83 | */ 84 | seed?: number | null; 85 | 86 | /** 87 | * 用于限制模型生成token的数量,max_tokens设置的是生成上限,并不表示一定会生成这么多的token数量。最大值和默认值均为1500 88 | * 89 | * @defaultValue 1500 90 | */ 91 | max_tokens?: number | null; 92 | 93 | /** 94 | * 生成文本的多样性 95 | * 96 | * @defaultValue 0.8 97 | */ 98 | top_p?: number | null; 99 | 100 | /** 101 | * 生成时,采样候选集的大小。 102 | * 103 | * 例如, 104 | * 取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。 105 | * 取值越大,生成的随机性越高;取值越小,生成的确定性越高。 106 | * 107 | * 注意:如果top_k参数为空或者top_k的值大于100,表示不启用top_k策略,此时仅有top_p策略生效,默认是空。 108 | * 109 | * @defaultValue 80 110 | */ 111 | top_k?: number | null; 112 | 113 | /** 114 | * 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。默认为1.1。 115 | */ 116 | repetition_penalty?: number | null; 117 | 118 | /** 119 | * 用户控制模型生成时整个序列中的重复度。 120 | * 121 | * 提高时可以降低模型生成的重复度,取值范围[-2.0, 2.0]。 122 | */ 123 | presence_penalty?: number | null; 124 | 125 | /** 126 | * 内容随机性 127 | * 128 | * @defaultValue 1.0 129 | */ 130 | temperature?: number | null; 131 | 132 | /** 133 | * 生成停止标识符 134 | */ 135 | stop?: string | string[] | null; 136 | 137 | /** 138 | * 生成时,是否参考搜索的结果。 139 | * 140 | * 注意:打开搜索并不意味着一定会使用搜索结果; 141 | * 如果打开搜索,模型会将搜索结果作为prompt,进而“自行判断”是否生成结合搜索结果的文本,默认为false 142 | */ 143 | enable_search?: boolean | null; 144 | }; 145 | 146 | export interface CompletionCreateParams { 147 | model: ({} & string) | CompletionModel; 148 | input: { 149 | prompt: string; 150 | }; 151 | parameters?: DashscopeCompletions.CompletionParametersParam; 152 | } 153 | 154 | export interface CompletionUsage { 155 | output_tokens: number; 156 | input_tokens: number; 157 | total_tokens: number; 158 | } 159 | 160 | export namespace Completion { 161 | export interface Output { 162 | text: string; 163 | finish_reason: DashscopeCompletions.ResponseFinish; 164 | } 165 | } 166 | 167 | export interface Completion { 168 | request_id: string; 169 | usage: CompletionUsage; 170 | output: Completion.Output; 171 | } 172 | } 173 | -------------------------------------------------------------------------------- /src/qwen/dashscope/types/embeddings.ts: -------------------------------------------------------------------------------- 1 | export namespace DashscopeEmbeddings { 2 | export type EmbeddingModel = 3 | | 'text-embedding-v1' 4 | | 'text-embedding-async-v1' 5 | | 'text-embedding-v2' 6 | | 'text-embedding-async-v2'; 7 | 8 | export interface EmbeddingCreateParams { 9 | /** 10 | * 模型 11 | */ 12 | model: ({} & string) | EmbeddingModel; 13 | input: { 14 | /** 15 | * 输入文本 16 | */ 17 | texts: string | Array | Array | Array>; 18 | }; 19 | parameters: { 20 | /** 21 | * 文本转换为向量后可以应用于检索、聚类、分类等下游任务,对检索这类非对称任务为了达到更好的检索效果 22 | * 建议区分查询文本(query)和底库文本(document)类型, 23 | * 聚类、分类等对称任务可以不用特殊指定,采用系统默认值"document"即可 24 | * 25 | * @defaultValue 'query' 26 | */ 27 | text_type?: 'query' | 'document'; 28 | }; 29 | } 30 | 31 | export type Embedding = { 32 | text_index: number; 33 | embedding: number[]; 34 | }; 35 | 36 | export type CreateEmbeddingResponse = { 37 | request_id: string; 38 | code: string; 39 | message: string; 40 | output: { 41 | embeddings: Embedding[]; 42 | }; 43 | usage: { 44 | total_tokens: number; 45 | }; 46 | }; 47 | } 48 | -------------------------------------------------------------------------------- /src/qwen/dashscope/types/index.ts: -------------------------------------------------------------------------------- 1 | export * from './chat'; 2 | export * from './completions'; 3 | export * from './embeddings'; 4 | export * from './openai'; 5 | -------------------------------------------------------------------------------- /src/qwen/dashscope/types/openai.ts: -------------------------------------------------------------------------------- 1 | import type OpenAI from 'openai'; 2 | 3 | import type { DashscopeChat } from './chat'; 4 | import type { DashscopeCompletions } from './completions'; 5 | import { DashscopeEmbeddings } from './embeddings'; 6 | 7 | export namespace OpenAICompletionsCompatibility { 8 | export type CompletionModel = DashscopeCompletions.CompletionModel; 9 | 10 | export interface StreamOptions { 11 | /** 12 | * 启用增量输出 13 | * 14 | * 在启用流输出参数后,是否每次输出是否每次都包含前面输出的内容。 15 | * 16 | * @defaultValue true 17 | */ 18 | incremental_output?: boolean | null; 19 | } 20 | 21 | export interface CompletionCreateParamsBase 22 | extends Pick< 23 | DashscopeCompletions.CompletionParametersParam, 24 | | 'enable_search' 25 | | 'temperature' 26 | | 'presence_penalty' 27 | | 'repetition_penalty' 28 | | 'top_k' 29 | | 'top_p' 30 | | 'seed' 31 | | 'stop' 32 | | 'max_tokens' 33 | | 'stream' 34 | > { 35 | /** 36 | * 生成模型 37 | * 38 | * 内置的 {@link CompletionModel} 是经过测试的,但你可以通过 [模型列表](https://help.aliyun.com/zh/dashscope/developer-reference/model-square) 测试其他支持的模型。 39 | */ 40 | model: ({} & string) | CompletionModel; 41 | 42 | /** 43 | * 用户输入的指令,用于指导模型生成回复 44 | */ 45 | prompt: string; 46 | 47 | /** 48 | * 流输出额外参数 49 | */ 50 | stream_options?: StreamOptions | null; 51 | 52 | /** 53 | * 响应格式 54 | */ 55 | response_format?: { 56 | type?: 'text'; 57 | }; 58 | } 59 | 60 | export interface CompletionCreateParamsNonStreaming extends CompletionCreateParamsBase { 61 | /** 62 | * 启用流式输出 63 | * 64 | * 默认每次输出为当前生成的整个序列,最后一次输出为最终全部生成结果 65 | * 可以使用 {@link StreamOptions stream_options} 参数关闭。 66 | */ 67 | stream?: false | null; 68 | } 69 | 70 | export interface CompletionCreateParamsStreaming extends CompletionCreateParamsBase { 71 | /** 72 | * 启用流式输出 73 | * 74 | * 默认每次输出为当前生成的整个序列,最后一次输出为最终全部生成结果 75 | * 可以使用 {@link StreamOptions stream_options} 参数关闭。 76 | */ 77 | stream: true; 78 | } 79 | 80 | export type CompletionCreateParams = CompletionCreateParamsNonStreaming | CompletionCreateParamsStreaming; 81 | } 82 | 83 | export namespace OpenAIChatCompatibility { 84 | export type ChatModel = DashscopeChat.ChatModel; 85 | 86 | export interface ChatCompletionCreateParamsBase 87 | extends Pick< 88 | DashscopeCompletions.CompletionParametersParam, 89 | | 'enable_search' 90 | | 'temperature' 91 | | 'presence_penalty' 92 | | 'repetition_penalty' 93 | | 'top_k' 94 | | 'top_p' 95 | | 'seed' 96 | | 'stop' 97 | | 'max_tokens' 98 | | 'stream' 99 | > { 100 | /** 101 | * 聊天模型 102 | * 103 | * 内置的 {@link ChatModel} 是经过测试的,但你可以通过 [模型列表](https://help.aliyun.com/zh/dashscope/developer-reference/model-square) 测试其他支持的模型。 104 | */ 105 | model: ({} & string) | ChatModel; 106 | 107 | /** 108 | * 聊天上下文信息 109 | */ 110 | messages: OpenAI.ChatCompletionMessageParam[]; 111 | 112 | /** 113 | * 指定可供模型调用的工具列表 114 | * 115 | * 当输入多个工具时,模型会选择其中一个生成结果。 116 | */ 117 | tools?: OpenAI.ChatCompletionTool[]; 118 | 119 | /** 120 | * SDK 内部有特殊的多模型消息适配机制 121 | * 122 | * 设置为 true 可以直接采用外部传递的消息格式 123 | */ 124 | raw?: boolean | null; 125 | 126 | /** 127 | * 流输出额外参数 128 | */ 129 | stream_options?: OpenAICompletionsCompatibility.StreamOptions | null; 130 | 131 | /** 132 | * 响应格式 133 | */ 134 | response_format?: { 135 | type?: 'text'; 136 | }; 137 | } 138 | 139 | export interface ChatCompletionCreateParamsNonStreaming extends ChatCompletionCreateParamsBase { 140 | /** 141 | * 启用流式输出 142 | * 143 | * 默认每次输出为当前生成的整个序列,最后一次输出为最终全部生成结果 144 | * 可以使用 {@link ChatCompletionCreateParamsBase.stream_options stream_options} 参数关闭。 145 | */ 146 | stream?: false | null; 147 | } 148 | 149 | export interface ChatCompletionCreateParamsStreaming extends ChatCompletionCreateParamsBase { 150 | /** 151 | * 启用流式输出 152 | * 153 | * 默认每次输出为当前生成的整个序列,最后一次输出为最终全部生成结果 154 | * 可以使用 {@link ChatCompletionCreateParamsBase.stream_options stream_options} 参数关闭。 155 | */ 156 | stream: true; 157 | } 158 | 159 | export type ChatCompletionCreateParams = ChatCompletionCreateParamsNonStreaming | ChatCompletionCreateParamsStreaming; 160 | } 161 | 162 | export namespace OpenAIEmbeddingsCompatibility { 163 | export interface EmbeddingCreateParams { 164 | /** 165 | * 模型 166 | */ 167 | model: ({} & string) | DashscopeEmbeddings.EmbeddingModel; 168 | 169 | /** 170 | * 输入文本 171 | */ 172 | input: string | Array | Array | Array>; 173 | 174 | /** 175 | * 文本转换为向量后可以应用于检索、聚类、分类等下游任务,对检索这类非对称任务为了达到更好的检索效果 176 | * 建议区分查询文本(query)和底库文本(document)类型, 177 | * 聚类、分类等对称任务可以不用特殊指定,采用系统默认值"document"即可 178 | * 179 | * @defaultValue 'query' 180 | */ 181 | type?: 'query' | 'document'; 182 | } 183 | } 184 | -------------------------------------------------------------------------------- /src/qwen/index.ts: -------------------------------------------------------------------------------- 1 | import type { Agent } from 'node:http'; 2 | 3 | import { APIError } from 'openai'; 4 | import { APIClient, type DefaultQuery, type Fetch, type FinalRequestOptions, type Headers } from 'openai/core'; 5 | 6 | import * as API from './resources'; 7 | 8 | export interface QWenAIOptions { 9 | baseURL?: string; 10 | apiKey?: string; 11 | timeout?: number | undefined; 12 | httpAgent?: Agent; 13 | fetch?: Fetch | undefined; 14 | /** 15 | * Default headers to include with every request to the API. 16 | * 17 | * These can be removed in individual requests by explicitly setting the 18 | * header to `undefined` or `null` in request options. 19 | */ 20 | defaultHeaders?: Headers; 21 | 22 | /** 23 | * Default query parameters to include with every request to the API. 24 | * 25 | * These can be removed in individual requests by explicitly setting the 26 | * param to `undefined` in request options. 27 | */ 28 | defaultQuery?: DefaultQuery; 29 | } 30 | 31 | /** 32 | * 基于阿里云 [DashScope 灵积模型服务](https://help.aliyun.com/zh/dashscope/product-overview/product-introduction) 的接口封装 33 | * 34 | * @deprecated 请重点关注阿里云的 [OpenAI接口兼容](https://help.aliyun.com/zh/dashscope/developer-reference/compatibility-of-openai-with-dashscope/) 计划。 35 | */ 36 | export class QWenAI extends APIClient { 37 | protected apiKey: string; 38 | 39 | private _options: QWenAIOptions; 40 | 41 | constructor(options: QWenAIOptions = {}) { 42 | const { 43 | apiKey = process.env.QWEN_API_KEY || '', 44 | baseURL = 'https://dashscope.aliyuncs.com/api/v1/', 45 | timeout = 30000, 46 | fetch = globalThis.fetch, 47 | httpAgent = undefined, 48 | ...rest 49 | } = options; 50 | 51 | super({ 52 | baseURL, 53 | timeout, 54 | fetch, 55 | httpAgent, 56 | ...rest, 57 | }); 58 | 59 | this._options = options; 60 | 61 | this.apiKey = apiKey; 62 | } 63 | 64 | chat = new API.Chat(this); 65 | completions = new API.Completions(this); 66 | 67 | embeddings = new API.Embeddings(this); 68 | 69 | images = new API.Images(this); 70 | 71 | protected override authHeaders() { 72 | return { 73 | Authorization: `Bearer ${this.apiKey}`, 74 | }; 75 | } 76 | 77 | protected override defaultHeaders(opts: FinalRequestOptions): Headers { 78 | return { 79 | ...super.defaultHeaders(opts), 80 | ...this._options.defaultHeaders, 81 | }; 82 | } 83 | 84 | protected override defaultQuery(): DefaultQuery | undefined { 85 | return this._options.defaultQuery; 86 | } 87 | 88 | async fetchWithTimeout( 89 | url: RequestInfo, 90 | init: RequestInit | undefined, 91 | ms: number, 92 | controller: AbortController, 93 | ): Promise { 94 | const response = await super.fetchWithTimeout(url, init, ms, controller); 95 | 96 | if (response.ok) { 97 | return response; 98 | } 99 | 100 | const contentType = response.headers.get('content-type') || ''; 101 | const precessedMessage = await response.text().then(text => { 102 | // id:1 103 | // event:error 104 | // :HTTP_STATUS/429 TOO_MANY_REQUESTS 105 | // data:{"code":"Throttling.AllocationQuota","message":"Free allocated quota exceeded.","request_id":"eafdde66-56b2-9997-a734-ecb6e22254f8"} 106 | if (contentType.includes('text/event-stream')) { 107 | const [_, message] = text.split('data:'); 108 | return message; 109 | } 110 | 111 | return text; 112 | }); 113 | 114 | return new Response(precessedMessage, { 115 | status: response.status, 116 | statusText: response.statusText, 117 | headers: response.headers, 118 | }); 119 | } 120 | 121 | protected override makeStatusError( 122 | status: number | undefined, 123 | error: Record | undefined, 124 | message: string | undefined, 125 | headers: Headers | undefined, 126 | ) { 127 | return APIError.generate(status, { error }, message, headers); 128 | } 129 | } 130 | 131 | export namespace QWenAI { 132 | export import Chat = API.Chat; 133 | export import ChatModel = API.ChatModel; 134 | export import ChatCompletionCreateParams = API.ChatCompletionCreateParams; 135 | export import ChatCompletionCreateParamsNonStreaming = API.ChatCompletionCreateParamsNonStreaming; 136 | export import ChatCompletionCreateParamsStreaming = API.ChatCompletionCreateParamsStreaming; 137 | 138 | export import Completions = API.Completions; 139 | export import CompletionModel = API.CompletionModel; 140 | export type CompletionCreateParams = API.CompletionCreateParams; 141 | export type CompletionCreateParamsStreaming = API.CompletionCreateParamsStreaming; 142 | export type CompletionCreateParamsNonStreaming = API.CompletionCreateParamsNonStreaming; 143 | 144 | export import Embeddings = API.Embeddings; 145 | export type EmbeddingModel = API.EmbeddingModel; 146 | export type EmbeddingCreateParams = API.EmbeddingCreateParams; 147 | 148 | export import Images = API.Images; 149 | export type ImageModel = API.ImageModel; 150 | export type ImageGenerateParams = API.ImageGenerateParams; 151 | } 152 | 153 | export default QWenAI; 154 | -------------------------------------------------------------------------------- /src/qwen/resources/chat/chat.ts: -------------------------------------------------------------------------------- 1 | import { APIResource } from '../../../resource'; 2 | import { OpenAIChatCompatibility } from '../../dashscope'; 3 | import { Completions } from './completions'; 4 | 5 | export class Chat extends APIResource { 6 | completions = new Completions(this._client); 7 | } 8 | 9 | export type ChatModel = OpenAIChatCompatibility.ChatModel; 10 | export type ChatCompletionCreateParams = OpenAIChatCompatibility.ChatCompletionCreateParams; 11 | export type ChatCompletionCreateParamsNonStreaming = OpenAIChatCompatibility.ChatCompletionCreateParams; 12 | export type ChatCompletionCreateParamsStreaming = OpenAIChatCompatibility.ChatCompletionCreateParamsStreaming; 13 | -------------------------------------------------------------------------------- /src/qwen/resources/chat/completions.ts: -------------------------------------------------------------------------------- 1 | import OpenAI from 'openai'; 2 | import { type Headers } from 'openai/core'; 3 | import { Stream } from 'openai/streaming'; 4 | 5 | import { APIResource } from '../../../resource'; 6 | import { 7 | fromChatCompletionCreateParams, 8 | getCompletionCreateEndpoint, 9 | type OpenAIChatCompatibility, 10 | toChatCompletion, 11 | toChatCompletionStream, 12 | } from '../../dashscope'; 13 | 14 | export class Completions extends APIResource { 15 | /** 16 | * Creates a model response for the given chat conversation. 17 | * 18 | * See https://help.aliyun.com/zh/dashscope/developer-reference/api-details 19 | */ 20 | create( 21 | body: OpenAIChatCompatibility.ChatCompletionCreateParamsNonStreaming, 22 | options?: OpenAI.RequestOptions, 23 | ): Promise; 24 | 25 | create( 26 | body: OpenAIChatCompatibility.ChatCompletionCreateParamsStreaming, 27 | options?: OpenAI.RequestOptions, 28 | ): Promise>; 29 | 30 | async create(body: OpenAIChatCompatibility.ChatCompletionCreateParams, options?: OpenAI.RequestOptions) { 31 | const headers: Headers = { 32 | ...options?.headers, 33 | }; 34 | 35 | if (body.stream) { 36 | headers['Accept'] = 'text/event-stream'; 37 | } 38 | 39 | const path = getCompletionCreateEndpoint(body.model); 40 | const params = fromChatCompletionCreateParams(body); 41 | 42 | const response: Response = await this._client.post(path, { 43 | ...options, 44 | body: params, 45 | headers: headers, 46 | // 通义千问的响应内容被包裹了一层,需要解构并转换为 OpenAI 的格式 47 | // 设置 __binaryResponse 为 true, 是为了让 client 返回原始的 response 48 | stream: false, 49 | __binaryResponse: true, 50 | }); 51 | 52 | if (body.stream) { 53 | const controller = new AbortController(); 54 | 55 | options?.signal?.addEventListener('abort', () => { 56 | controller.abort(); 57 | }); 58 | 59 | return toChatCompletionStream(params, response, controller); 60 | } 61 | 62 | return toChatCompletion(params, await response.json()); 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /src/qwen/resources/chat/index.ts: -------------------------------------------------------------------------------- 1 | export * from './chat'; 2 | -------------------------------------------------------------------------------- /src/qwen/resources/completions.ts: -------------------------------------------------------------------------------- 1 | import OpenAI from 'openai'; 2 | import { type Headers } from 'openai/core'; 3 | import { Stream } from 'openai/streaming'; 4 | 5 | import { APIResource } from '../../resource'; 6 | import { 7 | fromCompletionCreateParams, 8 | getCompletionCreateEndpoint, 9 | type OpenAICompletionsCompatibility, 10 | toCompletion, 11 | toCompletionStream, 12 | } from '../dashscope'; 13 | 14 | export class Completions extends APIResource { 15 | /** 16 | * Creates a completion for the provided prompt and parameters. 17 | */ 18 | create( 19 | body: OpenAICompletionsCompatibility.CompletionCreateParamsNonStreaming, 20 | options?: OpenAI.RequestOptions, 21 | ): Promise; 22 | create( 23 | body: OpenAICompletionsCompatibility.CompletionCreateParamsStreaming, 24 | options?: OpenAI.RequestOptions, 25 | ): Promise>; 26 | create( 27 | body: OpenAICompletionsCompatibility.CompletionCreateParamsBase, 28 | options?: OpenAI.RequestOptions, 29 | ): Promise | OpenAI.Completion>; 30 | async create( 31 | body: OpenAICompletionsCompatibility.CompletionCreateParams, 32 | options?: OpenAI.RequestOptions, 33 | ): Promise> { 34 | const headers: Headers = { 35 | ...options?.headers, 36 | }; 37 | 38 | if (body.stream) { 39 | headers['Accept'] = 'text/event-stream'; 40 | } 41 | 42 | const path = getCompletionCreateEndpoint(body.model); 43 | const params = fromCompletionCreateParams(body); 44 | 45 | const response: Response = await this._client.post(path, { 46 | ...options, 47 | body: params, 48 | headers: headers, 49 | // 通义千问的响应内容被包裹了一层,需要解构并转换为 OpenAI 的格式 50 | // 设置 __binaryResponse 为 true, 是为了让 client 返回原始的 response 51 | stream: false, 52 | __binaryResponse: true, 53 | }); 54 | 55 | if (body.stream) { 56 | const controller = new AbortController(); 57 | 58 | options?.signal?.addEventListener('abort', () => { 59 | controller.abort(); 60 | }); 61 | 62 | return toCompletionStream(params, response, controller); 63 | } 64 | 65 | return toCompletion(params, await response.json()); 66 | } 67 | } 68 | 69 | export type CompletionModel = OpenAICompletionsCompatibility.CompletionModel; 70 | export type CompletionCreateParams = OpenAICompletionsCompatibility.CompletionCreateParams; 71 | export type CompletionCreateParamsStreaming = OpenAICompletionsCompatibility.CompletionCreateParamsStreaming; 72 | export type CompletionCreateParamsNonStreaming = OpenAICompletionsCompatibility.CompletionCreateParamsNonStreaming; 73 | -------------------------------------------------------------------------------- /src/qwen/resources/embeddings.ts: -------------------------------------------------------------------------------- 1 | import OpenAI from 'openai'; 2 | import { type RequestOptions } from 'openai/core'; 3 | 4 | import { APIResource } from '../../resource'; 5 | import { DashscopeEmbeddings } from '../dashscope'; 6 | import { fromEmbeddingCreatePrams, toEmbedding } from '../dashscope/resolvers/embeddings'; 7 | 8 | export class Embeddings extends APIResource { 9 | /** 10 | * Creates an embedding vector representing the input text. 11 | * 12 | * See https://help.aliyun.com/zh/dashscope/developer-reference/generic-text-vector 13 | */ 14 | async create( 15 | params: OpenAI.EmbeddingCreateParams, 16 | options?: RequestOptions, 17 | ): Promise { 18 | const body = fromEmbeddingCreatePrams(params); 19 | 20 | const response: Response = await this._client.post('/services/embeddings/text-embedding/text-embedding', { 21 | ...options, 22 | body: body, 23 | __binaryResponse: true, 24 | }); 25 | 26 | return toEmbedding(params, await response.json()); 27 | } 28 | } 29 | 30 | export type EmbeddingModel = Embeddings.EmbeddingModel; 31 | 32 | export type EmbeddingCreateParams = Embeddings.EmbeddingCreateParams; 33 | 34 | export namespace Embeddings { 35 | export type EmbeddingModel = DashscopeEmbeddings.EmbeddingModel; 36 | export type EmbeddingCreateParams = DashscopeEmbeddings.EmbeddingCreateParams; 37 | } 38 | -------------------------------------------------------------------------------- /src/qwen/resources/images.ts: -------------------------------------------------------------------------------- 1 | import OpenAI, { OpenAIError } from 'openai'; 2 | import { type RequestOptions } from 'openai/core'; 3 | 4 | import { APIResource } from '../../resource'; 5 | 6 | export class Images extends APIResource { 7 | /** 8 | * Creates an image given a prompt. 9 | */ 10 | async generate(params: ImageGenerateParams, options: RequestOptions = {}): Promise { 11 | const client = this._client; 12 | 13 | const { headers, ...config } = options; 14 | const { model = 'wanx-v1', prompt, n = 1, cfg, ...rest } = params; 15 | 16 | const taskId = await client 17 | .post('/services/aigc/text2image/image-synthesis', { 18 | ...config, 19 | headers: { 'X-DashScope-Async': 'enable', ...headers }, 20 | body: { 21 | model, 22 | input: { 23 | prompt, 24 | }, 25 | parameters: { 26 | ...rest, 27 | scale: cfg, 28 | n, 29 | }, 30 | }, 31 | __binaryResponse: true, 32 | }) 33 | .then(res => res.json()) 34 | .then(res => res.output.task_id); 35 | 36 | return this.waitTask(taskId, options).then(images => { 37 | return { 38 | created: Date.now() / 1000, 39 | data: images, 40 | }; 41 | }); 42 | } 43 | 44 | protected async waitTask(taskId: string, options?: RequestOptions): Promise { 45 | const response = await this._client 46 | .get(`/tasks/${taskId}`, { 47 | ...options, 48 | __binaryResponse: true, 49 | }) 50 | .then(response => response.json()); 51 | 52 | const { task_status, message } = response.output; 53 | 54 | if (task_status === 'PENDING' || task_status === 'RUNNING') { 55 | return new Promise(resolve => { 56 | setTimeout(() => resolve(this.waitTask(taskId, options)), 5000); 57 | }); 58 | } 59 | 60 | if (task_status === 'SUCCEEDED') { 61 | return response.output.results.filter(result => 'url' in result) as ImageTask.Image[]; 62 | } 63 | 64 | if (task_status === 'FAILED') { 65 | throw new OpenAIError(message); 66 | } 67 | 68 | throw new OpenAIError('Unknown task status'); 69 | } 70 | } 71 | 72 | type ImageCreateTaskResponse = { 73 | request_id: string; 74 | output: { 75 | task_id: string; 76 | task_status: ImageTask.Status; 77 | code: string; 78 | message: string; 79 | }; 80 | }; 81 | 82 | type ImageTaskQueryResponse = 83 | | ImageTaskPendingResponse 84 | | ImageTaskRunningResponse 85 | | ImageTaskFinishedResponse 86 | | ImageTaskFailedResponse 87 | | ImageTaskUnknownResponse; 88 | 89 | type ImageTaskPendingResponse = { 90 | request_id: string; 91 | output: { 92 | task_id: string; 93 | task_status: 'PENDING'; 94 | task_metrics: ImageTask.Metrics; 95 | submit_time: string; 96 | scheduled_time: string; 97 | code: string; 98 | message: string; 99 | }; 100 | }; 101 | 102 | type ImageTaskRunningResponse = { 103 | request_id: string; 104 | output: { 105 | task_id: string; 106 | task_status: 'RUNNING'; 107 | task_metrics: ImageTask.Metrics; 108 | submit_time: string; 109 | scheduled_time: string; 110 | code: string; 111 | message: string; 112 | }; 113 | }; 114 | 115 | type ImageTaskFinishedResponse = { 116 | request_id: string; 117 | output: { 118 | task_id: string; 119 | task_status: 'SUCCEEDED'; 120 | task_metrics: ImageTask.Metrics; 121 | results: (ImageTask.Image | ImageTask.FailedError)[]; 122 | submit_time: string; 123 | scheduled_time: string; 124 | end_time: string; 125 | code: string; 126 | message: string; 127 | }; 128 | usage: { 129 | image_count: number; 130 | }; 131 | }; 132 | 133 | type ImageTaskFailedResponse = { 134 | request_id: string; 135 | code: string; 136 | message: string; 137 | output: { 138 | task_status: 'FAILED'; 139 | task_metrics: ImageTask.Metrics; 140 | submit_time: string; 141 | scheduled_time: string; 142 | code: string; 143 | message: string; 144 | }; 145 | }; 146 | 147 | type ImageTaskUnknownResponse = { 148 | request_id: string; 149 | output: { 150 | task_status: 'UNKNOWN'; 151 | task_metrics: ImageTask.Metrics; 152 | code: string; 153 | message: string; 154 | }; 155 | }; 156 | 157 | namespace ImageTask { 158 | export type Image = { 159 | url: string; 160 | }; 161 | 162 | export type FailedError = { 163 | code: string; 164 | message: string; 165 | }; 166 | 167 | export type Status = 'PENDING' | 'RUNNING' | 'SUCCEEDED' | 'FAILED' | 'UNKNOWN'; 168 | 169 | export type Metrics = { 170 | TOTAL: number; 171 | SUCCEEDED: number; 172 | FAILED: number; 173 | }; 174 | } 175 | 176 | export type ImageModel = Images.ImageModel; 177 | 178 | export type ImageGenerateParams = Images.ImageGenerateParams; 179 | 180 | export namespace Images { 181 | export type ImageModel = 182 | | (string & NonNullable) 183 | // 通义万相 184 | | 'wanx-v1' 185 | // Stable Diffusion 186 | | 'stable-diffusion-v1.5' 187 | | 'stable-diffusion-xl'; 188 | 189 | export interface ImageGenerateParams { 190 | /** 191 | * The model to use for image generation. 192 | * 193 | * @defaultValue wanx-v1 194 | */ 195 | model?: ImageModel | null; 196 | 197 | /** 198 | * A prompt is the text input that guides the AI in generating visual content. 199 | * It defines the textual description or concept for the image you wish to generate. 200 | * Think of it as the creative vision you want the AI to bring to life. 201 | * Crafting clear and creative prompts is crucial for achieving the desired results with Imagine's API. 202 | * For example, A serene forest with a river under the moonlight, can be a prompt. 203 | */ 204 | prompt: string; 205 | 206 | /** 207 | * The negative_prompt parameter empowers you to provide additional 208 | * guidance to the AI by specifying what you don't want in the image. 209 | * It helps refine the creative direction, ensuring that the generated 210 | * content aligns with your intentions. 211 | */ 212 | negative_prompt?: string | null; 213 | 214 | /** 215 | * The size of the generated images. 216 | * 217 | * @defaultValue 1024*1024 218 | */ 219 | size?: (string & NonNullable) | '1024*1024' | null; 220 | 221 | /** 222 | * The style of the generated images. 223 | * 224 | * - \ 摄影 225 | * - \ 人像写真 226 | * - \<3d cartoon\> 3D卡通 227 | * - \ 动画 228 | * - \ 油画 229 | * - \水彩 230 | * - \ 素描 231 | * - \ 中国画 232 | * - \ 扁平插画 233 | * - \ 默认 234 | * 235 | * 仅 wanx-v1 模型支持 236 | * 237 | * @defaultValue 238 | */ 239 | style?: 240 | | '' 241 | | '' 242 | | '<3d cartoon>' 243 | | '' 244 | | '' 245 | | '' 246 | | '' 247 | | '' 248 | | '' 249 | | '' 250 | | null; 251 | 252 | /** 253 | * The number of images to generate. Must be between 1 and 4. 254 | * 255 | * @defaultValue 1 256 | */ 257 | n?: number | null; 258 | 259 | /** 260 | * The steps parameter defines the number of operations or iterations that the 261 | * generator will perform during image creation. It can impact the complexity 262 | * and detail of the generated image. 263 | * 264 | * Range: 30-50 265 | * 266 | * 仅 StableDiffusion 模型支持 267 | * 268 | * @defaultValue 40 269 | */ 270 | steps?: number | null; 271 | 272 | /** 273 | * The cfg parameter acts as a creative control knob. 274 | * You can adjust it to fine-tune the level of artistic innovation in the image. 275 | * Lower values encourage faithful execution of the prompt, 276 | * while higher values introduce more creative and imaginative variations. 277 | * 278 | * Range: 1 - 15 279 | * 280 | * @defaultValue 10 281 | */ 282 | cfg?: number | null; 283 | 284 | /** 285 | * The seed parameter serves as the initial value for the random number generator. 286 | * By setting a specific seed value, you can ensure that the AI generates the same 287 | * image or outcome each time you use that exact seed. 288 | * 289 | * range: 1-Infinity 290 | */ 291 | seed?: number | null; 292 | 293 | /** 294 | * The format in which the generated images are returned. 295 | */ 296 | response_format?: 'url' | null; 297 | } 298 | } 299 | -------------------------------------------------------------------------------- /src/qwen/resources/index.ts: -------------------------------------------------------------------------------- 1 | export * from './chat/index'; 2 | export * from './completions'; 3 | export { Images, type ImageModel, type ImageGenerateParams } from './images'; 4 | export { Embeddings, type EmbeddingModel, type EmbeddingCreateParams } from './embeddings'; 5 | -------------------------------------------------------------------------------- /src/resource.ts: -------------------------------------------------------------------------------- 1 | import { APIClient } from 'openai/core'; 2 | 3 | export class APIResource { 4 | protected _client: Client; 5 | 6 | constructor(client: Client) { 7 | this._client = client; 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /src/shims/node.ts: -------------------------------------------------------------------------------- 1 | import { WebSocket } from 'ws'; 2 | 3 | if (!globalThis.WebSocket) { 4 | // @ts-expect-error 5 | globalThis.WebSocket = WebSocket; 6 | } 7 | -------------------------------------------------------------------------------- /src/spark/index.ts: -------------------------------------------------------------------------------- 1 | import { createHmac } from 'node:crypto'; 2 | 3 | import { APIClient, type DefaultQuery, type Fetch, type Headers } from 'openai/core'; 4 | 5 | import * as API from './resources'; 6 | 7 | export interface SparkAIOptions { 8 | baseURL?: string; 9 | appId?: string; 10 | apiKey?: string; 11 | apiSecret?: string; 12 | timeout?: number | undefined; 13 | httpAgent?: unknown; 14 | fetch?: Fetch | undefined; 15 | /** 16 | * Default headers to include with every request to the API. 17 | * 18 | * These can be removed in individual requests by explicitly setting the 19 | * header to `undefined` or `null` in request options. 20 | */ 21 | defaultHeaders?: Headers; 22 | 23 | /** 24 | * Default query parameters to include with every request to the API. 25 | * 26 | * These can be removed in individual requests by explicitly setting the 27 | * param to `undefined` in request options. 28 | */ 29 | defaultQuery?: DefaultQuery; 30 | } 31 | 32 | export class SparkAI extends APIClient { 33 | appId: string; 34 | protected apiKey: string; 35 | protected apiSecret: string; 36 | 37 | private _options: SparkAIOptions; 38 | 39 | constructor(options: SparkAIOptions = {}) { 40 | const { 41 | appId = process.env.SPARK_APP_ID || '', 42 | apiKey = process.env.SPARK_API_KEY || '', 43 | apiSecret = process.env.SPARK_API_SECRET || '', 44 | baseURL = 'https://spark-api.xf-yun.com', 45 | timeout = 30000, 46 | fetch = globalThis.fetch, 47 | httpAgent = undefined, 48 | ...rest 49 | } = options; 50 | 51 | super({ 52 | baseURL, 53 | timeout, 54 | fetch, 55 | httpAgent, 56 | ...rest, 57 | }); 58 | 59 | this._options = options; 60 | 61 | this.appId = appId; 62 | this.apiKey = apiKey; 63 | this.apiSecret = apiSecret; 64 | } 65 | 66 | chat = new API.Chat(this); 67 | 68 | images = new API.Images(this); 69 | 70 | protected override defaultQuery(): DefaultQuery | undefined { 71 | return this._options.defaultQuery; 72 | } 73 | 74 | /** 75 | * @param url - 需要签名的 URL 76 | * @param method - HTTP method 77 | * @returns 签名后的 URL 78 | */ 79 | generateAuthorizationURL(url: string | URL, method: string = 'GET'): string { 80 | const target = new URL(url, this.baseURL); 81 | 82 | const date = new Date().toUTCString(); 83 | 84 | const authorization = this.generateAuthorization({ 85 | method, 86 | path: target.pathname, 87 | host: target.host, 88 | date, 89 | }); 90 | 91 | target.searchParams.set('authorization', authorization); 92 | target.searchParams.set('host', target.host); 93 | target.searchParams.set('date', date); 94 | 95 | return target.toString(); 96 | } 97 | 98 | /** 99 | * 生成鉴权信息 100 | * 101 | * See https://www.xfyun.cn/doc/spark/general_url_authentication.html 102 | */ 103 | generateAuthorization({ method, host, path, date }: { method: string; host: string; path: string; date: string }) { 104 | // 生成签名原文 105 | const rawSignature = `host: ${host}\ndate: ${date}\n${method} ${path} HTTP/1.1`; 106 | 107 | // 生成签名,需要转为 base64 编码 108 | const signature = this.hash(rawSignature); 109 | 110 | return globalThis.btoa( 111 | `api_key="${this.apiKey}", algorithm="hmac-sha256", headers="host date request-line", signature="${signature}"`, 112 | ); 113 | } 114 | 115 | protected hash(data: string) { 116 | const sha256Hmac = createHmac('sha256', this.apiSecret); 117 | sha256Hmac.update(data); 118 | return sha256Hmac.digest('base64'); 119 | } 120 | } 121 | 122 | export namespace SparkAI { 123 | export type Chat = API.Chat; 124 | export type ChatModel = API.ChatModel; 125 | export type ChatCompletionCreateParams = API.ChatCompletionCreateParams; 126 | export type ChatCompletionCreateParamsNonStreaming = API.ChatCompletionCreateParamsNonStreaming; 127 | export type ChatCompletionCreateParamsStreaming = API.ChatCompletionCreateParamsStreaming; 128 | } 129 | 130 | export default SparkAI; 131 | -------------------------------------------------------------------------------- /src/spark/resource.ts: -------------------------------------------------------------------------------- 1 | import { SparkAI } from './index'; 2 | 3 | export class APIResource { 4 | protected _client: SparkAI; 5 | 6 | constructor(client: SparkAI) { 7 | this._client = client; 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /src/spark/resources/chat/chat.ts: -------------------------------------------------------------------------------- 1 | import { APIResource } from '../../resource'; 2 | import { Completions } from './completions'; 3 | 4 | export class Chat extends APIResource { 5 | completions = new Completions(this._client); 6 | } 7 | -------------------------------------------------------------------------------- /src/spark/resources/chat/completions.ts: -------------------------------------------------------------------------------- 1 | import OpenAI, { APIError } from 'openai'; 2 | import { RequestOptions } from 'openai/core'; 3 | import { Stream } from 'openai/streaming'; 4 | 5 | import { APIResource } from '../../resource'; 6 | 7 | export class Completions extends APIResource { 8 | protected resources: Record< 9 | ChatModel, 10 | { 11 | domain: string; 12 | url: string; 13 | } 14 | > = { 15 | 'spark-1.5': { 16 | domain: 'general', 17 | url: 'wss://spark-api.xf-yun.com/v1.1/chat', 18 | }, 19 | 'spark-2': { 20 | domain: 'generalv2', 21 | url: 'wss://spark-api.xf-yun.com/v2.1/chat', 22 | }, 23 | 'spark-3': { 24 | domain: 'generalv3', 25 | url: 'wss://spark-api.xf-yun.com/v3.1/chat', 26 | }, 27 | }; 28 | 29 | /** 30 | * Creates a model response for the given chat conversation. 31 | * 32 | * See https://help.aliyun.com/zh/dashscope/developer-reference/api-details 33 | */ 34 | create(body: ChatCompletionCreateParamsNonStreaming, options?: RequestOptions): Promise; 35 | create( 36 | body: ChatCompletionCreateParamsStreaming, 37 | options?: RequestOptions, 38 | ): Promise>; 39 | 40 | async create( 41 | params: ChatCompletionCreateParams, 42 | options?: RequestOptions, 43 | ): Promise | OpenAI.ChatCompletion> { 44 | const { model, messages, functions, user, ...rest } = params; 45 | 46 | const resource = this.resources[model]; 47 | 48 | const url = this._client.generateAuthorizationURL(resource.url, 'GET'); 49 | 50 | const body: ChatCompletions.ChatCompletionParameters = { 51 | header: { 52 | app_id: this._client.appId, 53 | }, 54 | parameter: { 55 | chat: { 56 | ...rest, 57 | domain: resource.domain, 58 | }, 59 | }, 60 | payload: { 61 | message: { 62 | text: messages, 63 | }, 64 | }, 65 | }; 66 | 67 | if (functions) { 68 | body.payload.functions = { text: functions }; 69 | } 70 | 71 | if (user) { 72 | body.header.uid = user; 73 | } 74 | 75 | const controller = new AbortController(); 76 | 77 | if (options?.signal) { 78 | options.signal.addEventListener('abort', () => { 79 | controller.abort(); 80 | }); 81 | } 82 | 83 | const ws: WebSocket = new globalThis.WebSocket(url); 84 | 85 | ws.onopen = () => { 86 | ws.send(JSON.stringify(body)); 87 | }; 88 | 89 | if (params.stream) { 90 | const readableStream = new ReadableStream({ 91 | pull(ctrl) { 92 | const encoder = new TextEncoder(); 93 | 94 | ws.onmessage = event => { 95 | const data: ChatCompletions.ChatCompletionResponse = JSON.parse(event.data); 96 | 97 | const { header, payload } = data; 98 | 99 | if (header.code !== 0) { 100 | ctrl.error(new APIError(undefined, data.header, undefined, undefined)); 101 | return; 102 | } 103 | 104 | const choices = payload.choices.text; 105 | 106 | const [message] = choices; 107 | 108 | const choice: OpenAI.ChatCompletionChunk.Choice = { 109 | index: 0, 110 | delta: { 111 | role: message.role, 112 | content: message.content, 113 | }, 114 | finish_reason: null, 115 | }; 116 | 117 | if (header.status === 2) { 118 | choice.finish_reason = 'stop'; 119 | } 120 | 121 | if (message.function_call) { 122 | choice.delta.function_call = message.function_call; 123 | } 124 | 125 | const completion: OpenAI.ChatCompletionChunk = { 126 | id: header.sid, 127 | model, 128 | choices: [choice], 129 | object: 'chat.completion.chunk', 130 | created: Date.now() / 1000, 131 | }; 132 | 133 | ctrl.enqueue(encoder.encode(JSON.stringify(completion) + '\n')); 134 | }; 135 | ws.onerror = error => { 136 | ctrl.error(error); 137 | }; 138 | }, 139 | cancel() { 140 | ws.close(); 141 | }, 142 | }); 143 | 144 | controller.signal.addEventListener('abort', () => { 145 | ws.close(); 146 | }); 147 | 148 | return Stream.fromReadableStream(readableStream, controller); 149 | } 150 | 151 | return new Promise((resolve, reject) => { 152 | ws.onmessage = event => { 153 | const data: ChatCompletions.ChatCompletionResponse = JSON.parse(event.data); 154 | 155 | const { header, payload } = data; 156 | 157 | // 2 代表完成 158 | if (header.status !== 2) return; 159 | 160 | const usage = payload.usage.text; 161 | const choices = payload.choices.text; 162 | 163 | const [message] = choices; 164 | 165 | const choice: OpenAI.ChatCompletion.Choice = { 166 | index: 0, 167 | message: { 168 | role: 'assistant', 169 | content: message.content, 170 | }, 171 | logprobs: null, 172 | finish_reason: 'stop', 173 | }; 174 | 175 | const completion: OpenAI.ChatCompletion = { 176 | id: header.sid, 177 | object: 'chat.completion', 178 | created: Date.now() / 1000, 179 | model, 180 | choices: [choice], 181 | usage: { 182 | completion_tokens: usage.completion_tokens, 183 | total_tokens: usage.total_tokens, 184 | prompt_tokens: usage.prompt_tokens, 185 | }, 186 | }; 187 | 188 | resolve(completion); 189 | }; 190 | 191 | ws.onerror = error => reject(error); 192 | }); 193 | } 194 | } 195 | 196 | export interface ChatCompletionCreateParamsNonStreaming extends OpenAI.ChatCompletionCreateParamsNonStreaming { 197 | model: ChatModel; 198 | top_k?: number | null; 199 | chat_id?: string | null; 200 | } 201 | 202 | export interface ChatCompletionCreateParamsStreaming extends OpenAI.ChatCompletionCreateParamsStreaming { 203 | model: ChatModel; 204 | top_k?: number | null; 205 | chat_id?: string | null; 206 | } 207 | 208 | export type ChatCompletionCreateParams = ChatCompletionCreateParamsNonStreaming | ChatCompletionCreateParamsStreaming; 209 | 210 | export type ChatModel = 'spark-1.5' | 'spark-2' | 'spark-3'; 211 | 212 | export namespace ChatCompletions { 213 | export type ChatCompletionParameters = { 214 | header: { 215 | app_id: string; 216 | uid?: string; 217 | }; 218 | 219 | parameter: { 220 | chat: { 221 | domain: string; 222 | temperature?: number | null; 223 | max_tokens?: number | null; 224 | top_k?: number | null; 225 | chat_id?: string | null; 226 | }; 227 | }; 228 | 229 | payload: { 230 | message: { 231 | text: OpenAI.ChatCompletionMessageParam[]; 232 | }; 233 | 234 | functions?: { 235 | text: OpenAI.ChatCompletionCreateParams.Function[]; 236 | }; 237 | }; 238 | }; 239 | 240 | export type ChatCompletionResponse = { 241 | header: { 242 | code: number; 243 | message: string; 244 | sid: string; 245 | status: number; 246 | }; 247 | payload: { 248 | choices: { 249 | status: number; 250 | seq: number; 251 | text: OpenAI.ChatCompletionMessage[]; 252 | }; 253 | usage: { 254 | text: { 255 | question_tokens: number; 256 | prompt_tokens: number; 257 | completion_tokens: number; 258 | total_tokens: number; 259 | }; 260 | }; 261 | }; 262 | }; 263 | } 264 | -------------------------------------------------------------------------------- /src/spark/resources/chat/index.ts: -------------------------------------------------------------------------------- 1 | export { Chat } from './chat'; 2 | export { 3 | type ChatModel, 4 | type ChatCompletionCreateParams, 5 | type ChatCompletionCreateParamsNonStreaming, 6 | type ChatCompletionCreateParamsStreaming, 7 | Completions, 8 | } from './completions'; 9 | -------------------------------------------------------------------------------- /src/spark/resources/images.ts: -------------------------------------------------------------------------------- 1 | import OpenAI, { APIError } from 'openai'; 2 | import { type RequestOptions } from 'openai/core'; 3 | 4 | import { APIResource } from '../resource'; 5 | 6 | // TODO: 没有权限,暂未测试 7 | export class Images extends APIResource { 8 | /** 9 | * See https://www.xfyun.cn/doc/spark/ImageGeneration.html 10 | */ 11 | async generate(params: OpenAI.ImageGenerateParams, options?: RequestOptions): Promise { 12 | const { prompt, user } = params; 13 | 14 | const body: ImagesAPI.ImageGenerateParams = { 15 | header: { 16 | app_id: this._client.appId, 17 | uid: user, 18 | }, 19 | parameter: { 20 | chat: { 21 | max_tokens: 4096, 22 | domain: 'general', 23 | temperature: 0.5, 24 | }, 25 | }, 26 | payload: { 27 | message: { 28 | text: [{ role: 'user', content: prompt }], 29 | }, 30 | }, 31 | }; 32 | 33 | const url = this._client.generateAuthorizationURL('https://spark-api.cn-huabei-1.xf-yun.com/v2.1/tti', 'POST'); 34 | 35 | const response: Response = await this._client.post(url, { 36 | ...options, 37 | body, 38 | __binaryResponse: true, 39 | }); 40 | 41 | const resp: ImagesAPI.ImageGenerateResponse = await response.json(); 42 | 43 | if (resp.header.code > 0) { 44 | throw new APIError(undefined, resp.header, undefined, undefined); 45 | } 46 | 47 | return { 48 | created: Date.now() / 1000, 49 | data: [ 50 | { 51 | // base64 encoded image 52 | url: resp.payload.choices.text[0].content, 53 | }, 54 | ], 55 | }; 56 | } 57 | } 58 | 59 | namespace ImagesAPI { 60 | export type ImageGenerateMessageParam = { 61 | role: 'user'; 62 | content: string; 63 | }; 64 | 65 | export type ImageGenerateParams = { 66 | header: { 67 | /** 68 | * 应用ID 69 | */ 70 | app_id: string; 71 | /** 72 | * 用户唯一标识 73 | */ 74 | uid?: string; 75 | }; 76 | parameter: { 77 | chat: { 78 | max_tokens: number; 79 | domain: string; 80 | temperature: number; 81 | }; 82 | }; 83 | payload: { 84 | message: { 85 | text: ImageGenerateMessageParam[]; 86 | }; 87 | }; 88 | }; 89 | 90 | type ImageGenerateAssistantMessage = { 91 | index: 0; 92 | role: 'assistant'; 93 | content: string; 94 | }; 95 | 96 | export type ImageGenerateResponse = { 97 | header: { 98 | code: number; 99 | message: string; 100 | sid: string; 101 | status: number; 102 | }; 103 | payload: { 104 | choices: { 105 | status: number; 106 | seq: number; 107 | text: ImageGenerateAssistantMessage[]; 108 | }; 109 | }; 110 | }; 111 | } 112 | -------------------------------------------------------------------------------- /src/spark/resources/index.ts: -------------------------------------------------------------------------------- 1 | export * from './chat/index'; 2 | 3 | export { Images } from './images'; 4 | -------------------------------------------------------------------------------- /src/streaming.ts: -------------------------------------------------------------------------------- 1 | import { OpenAIError } from 'openai'; 2 | 3 | export type Bytes = string | ArrayBuffer | Uint8Array | Buffer | null | undefined; 4 | 5 | export type ServerSentEvent = { 6 | event: string | null; 7 | data: string; 8 | raw: string[]; 9 | }; 10 | 11 | export async function* iterMessages( 12 | response: Response, 13 | decoder: SSEDecoder, 14 | controller: AbortController, 15 | ): AsyncGenerator { 16 | if (!response.body) { 17 | controller.abort(); 18 | throw new OpenAIError(`Attempted to iterate over a response with no body`); 19 | } 20 | 21 | const lineDecoder = new LineDecoder(); 22 | 23 | const iter = readableStreamAsyncIterable(response.body); 24 | for await (const chunk of iter) { 25 | for (const line of lineDecoder.decode(chunk)) { 26 | const sse = decoder.decode(line); 27 | if (sse) yield sse; 28 | } 29 | } 30 | 31 | for (const line of lineDecoder.flush()) { 32 | const sse = decoder.decode(line); 33 | if (sse) yield sse; 34 | } 35 | } 36 | 37 | export class SSEDecoder { 38 | private data: string[]; 39 | private event: string | null; 40 | private chunks: string[]; 41 | 42 | constructor() { 43 | this.event = null; 44 | this.data = []; 45 | this.chunks = []; 46 | } 47 | 48 | decode(line: string) { 49 | if (line.endsWith('\r')) { 50 | line = line.substring(0, line.length - 1); 51 | } 52 | 53 | if (!line) { 54 | // empty line and we didn't previously encounter any messages 55 | if (!this.event && !this.data.length) return null; 56 | 57 | const sse: ServerSentEvent = { 58 | event: this.event, 59 | data: this.data.join('\n'), 60 | raw: this.chunks, 61 | }; 62 | 63 | this.event = null; 64 | this.data = []; 65 | this.chunks = []; 66 | 67 | return sse; 68 | } 69 | 70 | this.chunks.push(line); 71 | 72 | if (line.startsWith(':')) { 73 | return null; 74 | } 75 | 76 | // eslint-disable-next-line @typescript-eslint/no-unused-vars, prefer-const 77 | let [fieldname, _, value] = partition(line, ':'); 78 | 79 | if (value.startsWith(' ')) { 80 | value = value.substring(1); 81 | } 82 | 83 | if (fieldname === 'event') { 84 | this.event = value; 85 | } else if (fieldname === 'data') { 86 | this.data.push(value); 87 | } 88 | 89 | return null; 90 | } 91 | } 92 | 93 | /** 94 | * A re-implementation of httpx's `LineDecoder` in Python that handles incrementally 95 | * reading lines from text. 96 | * 97 | * https://github.com/encode/httpx/blob/920333ea98118e9cf617f246905d7b202510941c/httpx/_decoders.py#L258 98 | */ 99 | export class LineDecoder { 100 | // prettier-ignore 101 | static NEWLINE_CHARS = new Set(['\n', '\r', '\x0b', '\x0c', '\x1c', '\x1d', '\x1e', '\x85', '\u2028', '\u2029']); 102 | static NEWLINE_REGEXP = /\r\n|[\n\r\x0b\x0c\x1c\x1d\x1e\x85\u2028\u2029]/g; 103 | 104 | buffer: string[]; 105 | trailingCR: boolean; 106 | textDecoder: any; // TextDecoder found in browsers; not typed to avoid pulling in either "dom" or "node" types. 107 | 108 | constructor() { 109 | this.buffer = []; 110 | this.trailingCR = false; 111 | } 112 | 113 | decode(chunk: Bytes): string[] { 114 | let text = this.decodeText(chunk); 115 | 116 | if (this.trailingCR) { 117 | text = '\r' + text; 118 | this.trailingCR = false; 119 | } 120 | if (text.endsWith('\r')) { 121 | this.trailingCR = true; 122 | text = text.slice(0, -1); 123 | } 124 | 125 | if (!text) { 126 | return []; 127 | } 128 | 129 | const trailingNewline = LineDecoder.NEWLINE_CHARS.has(text[text.length - 1] || ''); 130 | let lines = text.split(LineDecoder.NEWLINE_REGEXP); 131 | 132 | if (lines.length === 1 && !trailingNewline) { 133 | this.buffer.push(lines[0]!); 134 | return []; 135 | } 136 | 137 | if (this.buffer.length > 0) { 138 | lines = [this.buffer.join('') + lines[0], ...lines.slice(1)]; 139 | this.buffer = []; 140 | } 141 | 142 | if (!trailingNewline) { 143 | this.buffer = [lines.pop() || '']; 144 | } 145 | 146 | return lines; 147 | } 148 | 149 | decodeText(bytes: Bytes): string { 150 | if (bytes == null) return ''; 151 | if (typeof bytes === 'string') return bytes; 152 | 153 | // Node: 154 | if (typeof Buffer !== 'undefined') { 155 | if (bytes instanceof Buffer) { 156 | return bytes.toString(); 157 | } 158 | if (bytes instanceof Uint8Array) { 159 | return Buffer.from(bytes).toString(); 160 | } 161 | 162 | throw new OpenAIError( 163 | `Unexpected: received non-Uint8Array (${bytes.constructor.name}) stream chunk in an environment with a global "Buffer" defined, which this library assumes to be Node. Please report this error.`, 164 | ); 165 | } 166 | 167 | // Browser 168 | if (typeof TextDecoder !== 'undefined') { 169 | if (bytes instanceof Uint8Array || bytes instanceof ArrayBuffer) { 170 | this.textDecoder ??= new TextDecoder('utf8'); 171 | return this.textDecoder.decode(bytes); 172 | } 173 | 174 | throw new OpenAIError( 175 | `Unexpected: received non-Uint8Array/ArrayBuffer (${ 176 | (bytes as any).constructor.name 177 | }) in a web platform. Please report this error.`, 178 | ); 179 | } 180 | 181 | throw new OpenAIError( 182 | `Unexpected: neither Buffer nor TextDecoder are available as globals. Please report this error.`, 183 | ); 184 | } 185 | 186 | flush(): string[] { 187 | if (!this.buffer.length && !this.trailingCR) { 188 | return []; 189 | } 190 | 191 | const lines = [this.buffer.join('')]; 192 | this.buffer = []; 193 | this.trailingCR = false; 194 | return lines; 195 | } 196 | } 197 | 198 | function partition(str: string, delimiter: string): [string, string, string] { 199 | const index = str.indexOf(delimiter); 200 | if (index !== -1) { 201 | return [str.substring(0, index), delimiter, str.substring(index + delimiter.length)]; 202 | } 203 | 204 | return [str, '', '']; 205 | } 206 | 207 | /** 208 | * Most browsers don't yet have async iterable support for ReadableStream, 209 | * and Node has a very different way of reading bytes from its "ReadableStream". 210 | * 211 | * This polyfill was pulled from https://github.com/MattiasBuelens/web-streams-polyfill/pull/122#issuecomment-1627354490 212 | */ 213 | export function readableStreamAsyncIterable(stream: any): AsyncIterableIterator { 214 | if (stream[Symbol.asyncIterator]) return stream; 215 | 216 | const reader = stream.getReader(); 217 | return { 218 | async next() { 219 | try { 220 | const result = await reader.read(); 221 | if (result?.done) reader.releaseLock(); // release lock when stream becomes closed 222 | return result; 223 | } catch (e) { 224 | reader.releaseLock(); // release lock when stream becomes errored 225 | throw e; 226 | } 227 | }, 228 | async return() { 229 | const cancelPromise = reader.cancel(); 230 | reader.releaseLock(); 231 | await cancelPromise; 232 | return { done: true, value: undefined }; 233 | }, 234 | [Symbol.asyncIterator]() { 235 | return this; 236 | }, 237 | }; 238 | } 239 | -------------------------------------------------------------------------------- /src/util.ts: -------------------------------------------------------------------------------- 1 | export const castToError = (err: any): Error => { 2 | if (err instanceof Error) return err; 3 | return new Error(err); 4 | }; 5 | 6 | export function ensureArray(value: T | T[]): T[] { 7 | return Array.isArray(value) ? value : [value]; 8 | } 9 | 10 | export function random(min: number, max: number): number { 11 | return Math.floor(Math.random() * (max - min + 1)) + min; 12 | } 13 | -------------------------------------------------------------------------------- /src/vyro/index.ts: -------------------------------------------------------------------------------- 1 | import type { Agent } from 'node:http'; 2 | 3 | import { APIClient, type DefaultQuery, type Fetch, type Headers } from 'openai/core'; 4 | 5 | import * as API from './resources'; 6 | 7 | export interface VYroAIOptions { 8 | baseURL?: string; 9 | apiKey?: string; 10 | timeout?: number | undefined; 11 | httpAgent?: Agent; 12 | apiType?: (string & NonNullable) | 'api'; 13 | fetch?: Fetch | undefined; 14 | defaultHeaders?: Headers; 15 | defaultQuery?: DefaultQuery; 16 | } 17 | 18 | export class VYroAI extends APIClient { 19 | public apiType: (string & NonNullable) | 'api'; 20 | 21 | protected apiKey: string; 22 | private _options: VYroAIOptions; 23 | 24 | constructor(options: VYroAIOptions = {}) { 25 | const { 26 | apiKey = process.env.VYRO_API_KEY || '', 27 | apiType = process.env.VYRO_API_TYPE || 'api', 28 | baseURL = 'https://api.vyro.ai/v1', 29 | timeout = 30000, 30 | fetch = globalThis.fetch, 31 | httpAgent = undefined, 32 | ...rest 33 | } = options; 34 | 35 | super({ 36 | baseURL, 37 | timeout, 38 | fetch, 39 | httpAgent, 40 | ...rest, 41 | }); 42 | 43 | this._options = options; 44 | 45 | this.apiKey = apiKey; 46 | this.apiType = apiType; 47 | } 48 | 49 | images = new API.Images(this); 50 | 51 | protected override authHeaders() { 52 | return { 53 | Authorization: `Bearer ${this.apiKey}`, 54 | }; 55 | } 56 | 57 | protected override defaultHeaders(): Headers { 58 | return { 59 | ...this.authHeaders(), 60 | ...this._options.defaultHeaders, 61 | }; 62 | } 63 | 64 | protected override defaultQuery(): DefaultQuery | undefined { 65 | return this._options.defaultQuery; 66 | } 67 | } 68 | 69 | export namespace VYroAI { 70 | export type Images = API.Images; 71 | export type ImageGenerateParams = API.ImageGenerateParams; 72 | } 73 | 74 | export default VYroAI; 75 | -------------------------------------------------------------------------------- /src/vyro/resource.ts: -------------------------------------------------------------------------------- 1 | import { VYroAI } from './index'; 2 | 3 | export class APIResource { 4 | protected _client: VYroAI; 5 | 6 | constructor(client: VYroAI) { 7 | this._client = client; 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /src/vyro/resources/images.ts: -------------------------------------------------------------------------------- 1 | import { ReadableStream } from 'node:stream/web'; 2 | 3 | import { type RequestOptions, type Uploadable } from 'openai/core'; 4 | import { toFile } from 'openai/uploads'; 5 | 6 | import { random } from '../../util'; 7 | import { APIResource } from '../resource'; 8 | 9 | export class Images extends APIResource { 10 | protected models: Record = { 11 | 'imagine-v5': 33, 12 | 'anime-v5': 34, 13 | 'imagine-v4.1': 32, 14 | 'imagine-v4': 31, 15 | 'imagine-v3': 30, 16 | 'imagine-v1': 28, 17 | realistic: 29, 18 | anime: 21, 19 | portrait: 26, 20 | 'sdxl-1.0': 122, 21 | }; 22 | 23 | /** 24 | * Creates a variation of a given image. 25 | */ 26 | async createVariation(params: ImageCreateVariationParams, options?: RequestOptions): Promise { 27 | const client = this._client; 28 | 29 | const formData = new FormData(); 30 | 31 | const { model, style = this.models[model ?? 'realistic'] } = params; 32 | 33 | // @ts-expect-error 34 | formData.append('image', await toFile(params.image)); 35 | formData.append('style_id', (style || 29).toString()); 36 | formData.append('prompt', params.prompt); 37 | formData.append('negative_prompt', params.negative_prompt || ''); 38 | formData.append('strength', (params.strength || 0).toString()); 39 | formData.append('steps', (params.steps || 30).toString()); 40 | formData.append('cfg', (params.cfg || 7.5).toString()); 41 | formData.append('seed', (params.seed || random(1, 1000000)).toString()); 42 | 43 | const response: Response = await client.post(`/imagine/${client.apiType}/generations/variations`, { 44 | ...options, 45 | body: { 46 | body: formData, 47 | [Symbol.toStringTag]: 'MultipartBody', 48 | }, 49 | __binaryResponse: true, 50 | }); 51 | 52 | return { 53 | data: [ 54 | { 55 | binary: response.body as unknown as ReadableStream, 56 | }, 57 | ], 58 | created: Math.floor(Date.now() / 1000), 59 | }; 60 | } 61 | 62 | /** 63 | * Experience the magic of Imagine's Image Remix feature, designed to breathe new life into your existing images. 64 | */ 65 | async edit(params: ImageEditParams, options?: RequestOptions): Promise { 66 | const client = this._client; 67 | 68 | const formData = new FormData(); 69 | 70 | const { model, style = this.models[model ?? 'realistic'] } = params; 71 | 72 | // @ts-expect-error 73 | formData.append('image', await toFile(params.image)); 74 | formData.append('style_id', (style || 29).toString()); 75 | formData.append('prompt', params.prompt); 76 | formData.append('negative_prompt', params.negative_prompt || ''); 77 | formData.append('strength', (params.strength || 0).toString()); 78 | formData.append('control', params.control || 'openpose'); 79 | formData.append('steps', (params.steps || 30).toString()); 80 | formData.append('cfg', (params.cfg || 7.5).toString()); 81 | formData.append('seed', (params.seed || random(1, 1000000)).toString()); 82 | 83 | const response: Response = await client.post(`/imagine/${client.apiType}/edits/remix`, { 84 | ...options, 85 | body: { 86 | body: formData, 87 | [Symbol.toStringTag]: 'MultipartBody', 88 | }, 89 | __binaryResponse: true, 90 | }); 91 | 92 | return { 93 | data: [ 94 | { 95 | binary: response.body as unknown as ReadableStream, 96 | }, 97 | ], 98 | created: Math.floor(Date.now() / 1000), 99 | }; 100 | } 101 | 102 | /** 103 | * Creates an image given a prompt. 104 | */ 105 | async generate(params: ImageGenerateParams, options?: RequestOptions): Promise { 106 | const client = this._client; 107 | 108 | const formData = new FormData(); 109 | 110 | const { model, style = this.models[model ?? 'imagine-v4'] } = params; 111 | 112 | formData.append('style_id', (style || 30).toString()); 113 | formData.append('prompt', params.prompt); 114 | formData.append('negative_prompt', params.negative_prompt || ''); 115 | formData.append('aspect_ratio', params.aspect_ratio || '1:1'); 116 | formData.append('steps', (params.steps || 30).toString()); 117 | formData.append('cfg', (params.cfg || 7.5).toString()); 118 | formData.append('seed', (params.seed || random(1, 1000000)).toString()); 119 | formData.append('high_res_results', params.quality === 'hd' ? '1' : '0'); 120 | 121 | const response: Response = await client.post(`/imagine/${client.apiType}/generations`, { 122 | ...options, 123 | body: { 124 | body: formData, 125 | [Symbol.toStringTag]: 'MultipartBody', 126 | }, 127 | __binaryResponse: true, 128 | }); 129 | 130 | return { 131 | created: Math.floor(Date.now() / 1000), 132 | data: [ 133 | { 134 | binary: response.body as unknown as ReadableStream, 135 | }, 136 | ], 137 | }; 138 | } 139 | 140 | /** 141 | * The image upscale feature provides a better image to the user by increasing its resolution. 142 | */ 143 | async upscale(params: ImageUpscaleParams, options?: RequestOptions): Promise { 144 | const client = this._client; 145 | 146 | const formData = new FormData(); 147 | 148 | // @ts-expect-error 149 | formData.append('image', await toFile(params.image)); 150 | 151 | const response: Response = await client.post(`/imagine/${client.apiType}/upscale`, { 152 | ...options, 153 | body: { 154 | body: formData, 155 | [Symbol.toStringTag]: 'MultipartBody', 156 | }, 157 | __binaryResponse: true, 158 | }); 159 | 160 | return { 161 | created: Math.floor(Date.now() / 1000), 162 | data: [ 163 | { 164 | binary: response.body as unknown as ReadableStream, 165 | }, 166 | ], 167 | }; 168 | } 169 | 170 | /** 171 | * Inpaint is an advanced feature of the Text-to-Image Stable Diffusion pipeline. 172 | * It allows users to remove unwanted objects or elements from an image by intelligently filling in the missing areas. 173 | */ 174 | async restoration(params: ImageRestorationParams, options?: RequestOptions): Promise { 175 | const client = this._client; 176 | 177 | const formData = new FormData(); 178 | 179 | // @ts-expect-error 180 | formData.append('image', await toFile(params.image)); 181 | // @ts-expect-error 182 | formData.append('mask', await toFile(params.mask)); 183 | formData.append('style_id', '1'); 184 | formData.append('prompt', params.prompt); 185 | formData.append('neg_prompt', params.negative_prompt || ''); 186 | formData.append('inpaint_strength', (params.strength || 0).toString()); 187 | formData.append('cfg', (params.cfg || 7.5).toString()); 188 | 189 | const response: Response = await client.post(`/imagine/${client.apiType}/generations/variations`, { 190 | ...options, 191 | body: { 192 | body: formData, 193 | [Symbol.toStringTag]: 'MultipartBody', 194 | }, 195 | __binaryResponse: true, 196 | }); 197 | 198 | return { 199 | data: [ 200 | { 201 | binary: response.body as unknown as ReadableStream, 202 | }, 203 | ], 204 | created: Math.floor(Date.now() / 1000), 205 | }; 206 | } 207 | } 208 | 209 | export type ImageModel = 210 | | 'imagine-v5' 211 | | 'anime-v5' 212 | | 'imagine-v4.1' 213 | | 'imagine-v4' 214 | | 'imagine-v3' 215 | | 'imagine-v1' 216 | | 'realistic' 217 | | 'anime' 218 | | 'portrait' 219 | | 'sdxl-1.0'; 220 | 221 | export interface ImageRestorationParams { 222 | /** 223 | * The image to use as the basis for the variation(s). Must be a valid PNG file, 224 | * less than 4MB, and square. 225 | */ 226 | image: Uploadable; 227 | 228 | /** 229 | * The mask indicating the areas to be inpainted. 230 | */ 231 | mask: Uploadable; 232 | 233 | /** 234 | * The text guides the image generation. 235 | */ 236 | prompt: string; 237 | 238 | /** 239 | * The model to use for image generation. 240 | */ 241 | model?: 'vyro-inpaint' | null; 242 | 243 | /** 244 | * The negative_prompt parameter empowers you to provide additional 245 | * guidance to the AI by specifying what you don't want in the image. 246 | * It helps refine the creative direction, ensuring that the generated 247 | * content aligns with your intentions. 248 | */ 249 | negative_prompt?: string | null; 250 | 251 | /** 252 | * Specifies the model to be used. Currently supports only 1 for realism. 253 | * 254 | * @defaultValue 1 255 | */ 256 | style?: 1 | null; 257 | 258 | /** 259 | * Weightage to be given to text 260 | * 261 | * Range: 3 - 15 262 | * 263 | * @defaultValue 7.5 264 | */ 265 | cfg?: number | null; 266 | 267 | /** 268 | * Weightage given to initial image. Greater this parameter more the output will be close to starting image and far from prompt. 269 | * 270 | * Range: 0 - 1 271 | * 272 | * @defaultValue 0.5 273 | */ 274 | strength?: number | null; 275 | 276 | /** 277 | * 目前仅支持 binary 格式 278 | */ 279 | response_format?: 'binary' | null; 280 | } 281 | 282 | export interface ImageCreateVariationParams { 283 | /** 284 | * The image to use as the basis for the variation(s). Must be a valid PNG file, 285 | * less than 4MB, and square. 286 | */ 287 | image: Uploadable; 288 | 289 | /** 290 | * The text guides the image generation. 291 | */ 292 | prompt: string; 293 | 294 | /** 295 | * The model to use for image generation. 296 | */ 297 | model?: ImageModel | null; 298 | 299 | /** 300 | * The negative_prompt parameter empowers you to provide additional 301 | * guidance to the AI by specifying what you don't want in the image. 302 | * It helps refine the creative direction, ensuring that the generated 303 | * content aligns with your intentions. 304 | */ 305 | negative_prompt?: string | null; 306 | 307 | /** 308 | * The style_id parameter is like choosing an artistic palette for your image. 309 | * By selecting a style id, you guide the AI in crafting the image with a particular visual aesthetic. 310 | * Style IDs range from 1 to N, each representing a unique artistic style. 311 | * 312 | * @defaultValue 30 313 | */ 314 | style?: number | null; 315 | 316 | /** 317 | * The steps parameter defines the number of operations or iterations that the 318 | * generator will perform during image creation. It can impact the complexity 319 | * and detail of the generated image. 320 | * 321 | * Range: 30-50 322 | * 323 | * @defaultValue 30 324 | */ 325 | steps?: number | null; 326 | 327 | /** 328 | * The cfg parameter acts as a creative control knob. 329 | * You can adjust it to fine-tune the level of artistic innovation in the image. 330 | * Lower values encourage faithful execution of the prompt, 331 | * while higher values introduce more creative and imaginative variations. 332 | * 333 | * Range: 3 - 15 334 | * 335 | * @defaultValue 7.5 336 | */ 337 | cfg?: number | null; 338 | 339 | /** 340 | * The seed parameter serves as the initial value for the random number generator. 341 | * By setting a specific seed value, you can ensure that the AI generates the same 342 | * image or outcome each time you use that exact seed. 343 | * 344 | * range: 1-Infinity 345 | */ 346 | seed?: number | null; 347 | 348 | /** 349 | * Influences the impact of the control image on output. 350 | * 351 | * Range: 0 - 1 352 | * 353 | * @defaultValue 0 354 | */ 355 | strength?: number | null; 356 | 357 | /** 358 | * 目前仅支持 binary 格式 359 | */ 360 | response_format?: 'binary' | null; 361 | } 362 | 363 | export interface ImageEditParams { 364 | /** 365 | * The image to use as the basis for the variation(s). Must be a valid PNG file, 366 | * less than 4MB, and square. 367 | */ 368 | image: Uploadable; 369 | 370 | /** 371 | * The text guides the image generation. 372 | */ 373 | prompt: string; 374 | 375 | /** 376 | * The model to use for image generation. 377 | */ 378 | model?: ImageModel | null; 379 | 380 | /** 381 | * The negative_prompt parameter empowers you to provide additional 382 | * guidance to the AI by specifying what you don't want in the image. 383 | * It helps refine the creative direction, ensuring that the generated 384 | * content aligns with your intentions. 385 | */ 386 | negative_prompt?: string | null; 387 | 388 | /** 389 | * The style_id parameter is like choosing an artistic palette for your image. 390 | * By selecting a style id, you guide the AI in crafting the image with a particular visual aesthetic. 391 | * Style IDs range from 1 to N, each representing a unique artistic style. 392 | * 393 | * @defaultValue 29 394 | */ 395 | style?: number | null; 396 | 397 | /** 398 | * The steps parameter defines the number of operations or iterations that the 399 | * generator will perform during image creation. It can impact the complexity 400 | * and detail of the generated image. 401 | * 402 | * Range: 30-50 403 | * 404 | * @defaultValue 30 405 | */ 406 | steps?: number | null; 407 | 408 | /** 409 | * The cfg parameter acts as a creative control knob. 410 | * You can adjust it to fine-tune the level of artistic innovation in the image. 411 | * Lower values encourage faithful execution of the prompt, 412 | * while higher values introduce more creative and imaginative variations. 413 | * 414 | * Range: 3 - 15 415 | * 416 | * @defaultValue 7.5 417 | */ 418 | cfg?: number | null; 419 | 420 | /** 421 | * The seed parameter serves as the initial value for the random number generator. 422 | * By setting a specific seed value, you can ensure that the AI generates the same 423 | * image or outcome each time you use that exact seed. 424 | * 425 | * range: 1-Infinity 426 | */ 427 | seed?: number | null; 428 | 429 | /** 430 | * Influences the impact of the control image on output. 431 | * 432 | * Range: 0 - 1 433 | * 434 | * @defaultValue 0 435 | */ 436 | strength?: number | null; 437 | 438 | /** 439 | * The method/control used to guide image generation. 440 | * 441 | * @defaultValue openpose 442 | */ 443 | control?: 'openpose' | 'scribble' | 'canny' | 'lineart' | 'depth' | null; 444 | 445 | /** 446 | * 目前仅支持 binary 格式 447 | */ 448 | response_format?: 'binary' | null; 449 | } 450 | 451 | export interface ImageGenerateParams { 452 | /** 453 | * A prompt is the text input that guides the AI in generating visual content. 454 | * It defines the textual description or concept for the image you wish to generate. 455 | * Think of it as the creative vision you want the AI to bring to life. 456 | * Crafting clear and creative prompts is crucial for achieving the desired results with Imagine's API. 457 | * For example, A serene forest with a river under the moonlight, can be a prompt. 458 | */ 459 | prompt: string; 460 | 461 | /** 462 | * The model to use for image generation. 463 | */ 464 | model?: ImageModel | null; 465 | 466 | /** 467 | * The negative_prompt parameter empowers you to provide additional 468 | * guidance to the AI by specifying what you don't want in the image. 469 | * It helps refine the creative direction, ensuring that the generated 470 | * content aligns with your intentions. 471 | */ 472 | negative_prompt?: string | null; 473 | 474 | /** 475 | * The aspect_ratio parameter allows you to specify the proportions and dimensions of the generated image. 476 | * You can set it to different ratios like 1:1 for square images, 16:9 for widescreen, or 3:4 for vertical formats, 477 | * shaping the visual composition to your liking. 478 | * 479 | * @defaultValue 1:1 480 | */ 481 | aspect_ratio?: '1:1' | '3:2' | '4:3' | '3:4' | '16:9' | '9:16' | null; 482 | 483 | /** 484 | * The quality parameter is a flag that, when set to hd, 485 | * requests high-resolution results from the AI. 486 | * 487 | * @defaultValue standard 488 | */ 489 | quality?: 'standard' | 'hd'; 490 | 491 | /** 492 | * The style_id parameter is like choosing an artistic palette for your image. 493 | * By selecting a style id, you guide the AI in crafting the image with a particular visual aesthetic. 494 | * Style IDs range from 1 to N, each representing a unique artistic style. 495 | * 496 | * @defaultValue 30 497 | */ 498 | style?: number | null; 499 | 500 | /** 501 | * The steps parameter defines the number of operations or iterations that the 502 | * generator will perform during image creation. It can impact the complexity 503 | * and detail of the generated image. 504 | * 505 | * Range: 30-50 506 | * 507 | * @defaultValue 30 508 | */ 509 | steps?: number | null; 510 | 511 | /** 512 | * The cfg parameter acts as a creative control knob. 513 | * You can adjust it to fine-tune the level of artistic innovation in the image. 514 | * Lower values encourage faithful execution of the prompt, 515 | * while higher values introduce more creative and imaginative variations. 516 | * 517 | * Range: 3 - 15 518 | * 519 | * @defaultValue 7.5 520 | */ 521 | cfg?: number | null; 522 | 523 | /** 524 | * The seed parameter serves as the initial value for the random number generator. 525 | * By setting a specific seed value, you can ensure that the AI generates the same 526 | * image or outcome each time you use that exact seed. 527 | * 528 | * range: 1-Infinity 529 | */ 530 | seed?: number | null; 531 | 532 | /** 533 | * 目前仅支持 binary 格式 534 | */ 535 | response_format?: 'binary' | null; 536 | } 537 | 538 | export interface ImageUpscaleParams { 539 | /** 540 | * The image to use as the basis for the variation(s). Must be a valid PNG file, 541 | * less than 4MB, and square. 542 | */ 543 | image: Uploadable; 544 | } 545 | 546 | export interface Image { 547 | /** 548 | * The binary of the generated image. 549 | */ 550 | binary?: ReadableStream; 551 | 552 | /** 553 | * The base64-encoded JSON of the generated image, if `response_format` is 554 | * `b64_json`. 555 | */ 556 | b64_json?: string; 557 | 558 | /** 559 | * The prompt that was used to generate the image, if there was any revision to the 560 | * prompt. 561 | */ 562 | revised_prompt?: string; 563 | 564 | /** 565 | * The URL of the generated image, if `response_format` is `url` (default). 566 | */ 567 | url?: string; 568 | } 569 | 570 | export interface ImagesResponse { 571 | /** 572 | * When the request was made. 573 | */ 574 | created: number; 575 | 576 | /** 577 | * The generated images. 578 | */ 579 | data: Image[]; 580 | } 581 | -------------------------------------------------------------------------------- /src/vyro/resources/index.ts: -------------------------------------------------------------------------------- 1 | export { Images, type ImageGenerateParams } from './images'; 2 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json.schemastore.org/tsconfig", 3 | "compilerOptions": { 4 | "composite": true, 5 | "target": "ES2022", 6 | "module": "ESNext", 7 | "removeComments": false, 8 | "strict": true, 9 | "forceConsistentCasingInFileNames": true, 10 | "useDefineForClassFields": false, 11 | "moduleResolution": "node", 12 | "resolveJsonModule": true, 13 | "allowSyntheticDefaultImports": true, 14 | "esModuleInterop": true, 15 | "skipLibCheck": true, 16 | "rootDir": "src" 17 | }, 18 | "include": ["src"], 19 | "exclude": ["node_modules"], 20 | "references": [ 21 | { 22 | "path": "./tsconfig.node.json" 23 | } 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /tsconfig.node.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json.schemastore.org/tsconfig", 3 | "compilerOptions": { 4 | "composite": true, 5 | "target": "ES2017", 6 | "module": "ESNext", 7 | "strict": true, 8 | "forceConsistentCasingInFileNames": true, 9 | "moduleResolution": "node", 10 | "resolveJsonModule": true, 11 | "allowSyntheticDefaultImports": true, 12 | "esModuleInterop": true 13 | }, 14 | "include": ["vite.config.mts", "package.json"], 15 | "exclude": ["node_modules"] 16 | } 17 | -------------------------------------------------------------------------------- /vite.config.mts: -------------------------------------------------------------------------------- 1 | import { defineConfig } from 'vite'; 2 | import checker from 'vite-plugin-checker'; 3 | import dts from 'vite-plugin-dts'; 4 | import { externalizeDeps } from 'vite-plugin-externalize-deps'; 5 | 6 | import pkg from './package.json'; 7 | 8 | /** 9 | * vite config 10 | * @see https://vitejs.dev/ 11 | */ 12 | export default defineConfig({ 13 | plugins: [ 14 | checker({ 15 | typescript: true, 16 | }), 17 | externalizeDeps(), 18 | dts({ 19 | outDir: './dist-types', 20 | }), 21 | ], 22 | define: { 23 | 'process.env.PKG_NAME': JSON.stringify(pkg.name), 24 | 'process.env.PKG_VERSION': JSON.stringify(pkg.version), 25 | }, 26 | build: { 27 | sourcemap: false, 28 | copyPublicDir: false, 29 | reportCompressedSize: false, 30 | lib: { 31 | entry: { 32 | index: './src/index.ts', 33 | 'shims/node': './src/shims/node.ts', 34 | }, 35 | }, 36 | rollupOptions: { 37 | output: [ 38 | { 39 | format: 'esm', 40 | dir: 'dist', 41 | exports: 'named', 42 | entryFileNames: '[name].mjs', 43 | chunkFileNames: '[name].mjs', 44 | }, 45 | { 46 | format: 'cjs', 47 | dir: 'dist', 48 | exports: 'named', 49 | entryFileNames: '[name].cjs', 50 | chunkFileNames: '[name].cjs', 51 | }, 52 | ], 53 | }, 54 | }, 55 | }); 56 | --------------------------------------------------------------------------------