├── .gitignore
├── Dockerfile
├── README.md
├── docker-compose.yml
├── package.json
└── src
├── lib
├── caches.js
├── manager.js
├── model-map.js
├── tools.js
└── upload.js
├── routes
├── chat.js
├── models.js
└── verify.js
└── server.js
/.gitignore:
--------------------------------------------------------------------------------
1 | # Dependency directories
2 | node_modules/
3 |
4 | # Optional npm cache directory
5 | .npm
6 |
7 | # dotenv environment variable files
8 | .env
9 | .env.local
10 | .env.development.local
11 | .env.test.local
12 | .env.production.local
13 |
14 | # Config directory
15 | config/
16 |
17 | # Log files
18 | logs/
19 | *.log
20 | npm-debug.log*
21 |
22 | package-lock.json
23 |
24 | # Playwright
25 | /test-results/
26 | /playwright-report/
27 | /blob-report/
28 | /playwright/.cache/
29 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM node:lts-alpine
2 |
3 | # 设置工作目录
4 | WORKDIR /app
5 |
6 | # 复制package.json和package-lock.json
7 | COPY package*.json ./
8 |
9 | # 安装依赖
10 | RUN npm install
11 |
12 | # 复制源代码
13 | COPY . .
14 |
15 | # 暴露端口
16 | EXPOSE 3000
17 |
18 | # 启动命令
19 | CMD ["npm", "start"]
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # 🚀 PromptLayer API 代理服务
4 |
5 | [](https://github.com/Rfym21/PromptlayerProxy)
6 | [](https://hub.docker.com/r/rfym21/promptlayer-proxy)
7 |
8 | *一个强大的 PromptLayer API 代理服务,支持多种主流 AI 模型*
9 |
10 | **🔗 [交流群](https://t.me/nodejs_project) | 🐳 [Docker Hub](https://hub.docker.com/r/rfym21/promptlayer-proxy)**
11 |
12 |
13 |
14 | ## ✨ 功能特点
15 |
16 |
17 |
18 | | 功能 | 状态 | 描述 |
19 | |------|------|------|
20 | | 🔄 **OpenAI API 兼容** | ✅ | 完全兼容 OpenAI API 格式 |
21 | | 🌊 **流式输出** | ✅ | 支持实时流式响应 |
22 | | 🖼️ **图像处理** | ✅ | 支持图像上传和识别 |
23 | | ⚖️ **负载均衡** | ✅ | 多账户轮询负载均衡 |
24 | | 🐳 **容器化部署** | ✅ | Docker 一键部署 |
25 | | 🔄 **自动刷新** | ✅ | 智能 Token 自动刷新 |
26 | | 🛠️ **Tools 支持** | ✅ | 支持Tools参数 |
27 | | 🔌 **其他参数(温度,Max_Tokens)** | ✅ | 支持配置其他参数,设置的参数将覆盖默认参数 |
28 |
29 |
30 |
31 | ---
32 |
33 | ## 🤖 支持的模型
34 |
35 |
36 |
37 | | 🏷️ 模型名称 | 📊 最大输出长度 | 🧠 思考长度 | 📈 类型 |
38 | |-----------|-------------|---------|-------|
39 | | 🔮 `claude-3-7-sonnet-20250219` | `64,000` | `-` | Anthropic |
40 | | 🧠 `claude-3-7-sonnet-20250219-thinking` | `64,000` | `32,000` | Anthropic |
41 | | 🔮 `claude-sonnet-4-20250514` | `64,000` | `-` | Anthropic |
42 | | 🧠 `claude-sonnet-4-20250514-thinking` | `64,000` | `32,000` | Anthropic |
43 | | 🔮 `claude-opus-4-20250514` | `32,000` | `-` | Anthropic |
44 | | 🧠 `claude-opus-4-20250514-thinking` | `32,000` | `16,000` | Anthropic |
45 | | 🤖 `o4-mini` | `100,000` | `-` | OpenAI |
46 | | 🤖 `o3` | `100,000` | `-` | OpenAI |
47 | | 🤖 `o3-mini` | `100,000` | `-` | OpenAI |
48 | | 🤖 `chatgpt-4o-latest` | `-` | `-` | OpenAI |
49 | | 🤖 `gpt-4o` | `-` | `-` | OpenAI |
50 | | 🤖 `gpt-4o-mini` | `-` | `-` | OpenAI |
51 | | 🤖 `gpt-4o-search-preview` | `-` | `-` | OpenAI |
52 | | 🤖 `gpt-4o-mini-search-preview` | `-` | `-` | OpenAI |
53 | | 🤖 `gpt-4.1` | `-` | `-` | OpenAI |
54 | | 🤖 `gpt-4.1-mini` | `-` | `-` | OpenAI |
55 | | 🤖 `gpt-4.1-nano` | `-` | `-` | OpenAI |
56 | | 🤖 `gpt-4.5-preview` | `-` | `-` | OpenAI |
57 |
58 |
59 |
60 | ---
61 |
62 | ## 🚀 快速开始
63 |
64 | ### 方式一:🐳 Docker Compose(推荐)
65 |
66 | #### 📥 **Step 1**: 下载配置文件
67 |
68 | ```bash
69 | curl -o docker-compose.yml https://raw.githubusercontent.com/Rfym21/PromptlayerProxy/refs/heads/main/docker-compose.yml
70 | ```
71 |
72 | #### ⚙️ **Step 2**: 配置环境变量
73 |
74 | 在 `docker-compose.yml` 文件中设置以下参数:
75 |
76 | ```yaml
77 | services:
78 | promptlayer-proxy:
79 | image: rfym21/promptlayer-proxy:latest
80 | container_name: promptlayer-proxy
81 | restart: always
82 | ports:
83 | - "3000:3000"
84 | environment:
85 | # 🔐 PromptLayer 账号密码
86 | - ACCOUNTS=your_account1:your_password1,your_account2:your_password2...
87 | # 🔑 API 认证密钥
88 | - AUTH_TOKEN=your_auth_token_here
89 | ```
90 |
91 | #### 🚀 **Step 3**: 启动服务
92 |
93 | ```bash
94 | docker-compose up -d
95 | ```
96 |
97 | ---
98 |
99 | ### 方式二:🐳 Docker CLI
100 |
101 | ```bash
102 | docker run -d \
103 | --name promptlayer-proxy \
104 | -p 3000:3000 \
105 | -e ACCOUNTS=your_account:your_password \
106 | -e AUTH_TOKEN=your_auth_token_here \
107 | rfym21/promptlayer-proxy:latest
108 | ```
109 |
110 | ---
111 |
112 | ### 方式三:💻 本地开发
113 |
114 | #### 📦 **Step 1**: 安装依赖
115 |
116 | ```bash
117 | npm install
118 | ```
119 |
120 | #### 📝 **Step 2**: 环境配置
121 |
122 | 创建 `.env` 文件:
123 |
124 | ```env
125 | ACCOUNTS=your_account:your_password
126 | AUTH_TOKEN=your_auth_token_here
127 | ```
128 |
129 | #### 🏃 **Step 3**: 启动开发模式
130 |
131 | ```bash
132 | npm run dev
133 | ```
134 |
135 | ---
136 |
137 |
138 |
139 | ## 💬 交流与支持
140 |
141 | [](https://t.me/nodejs_project)
142 |
143 |
144 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | promptlayer-proxy:
3 | image: rfym21/promptlayer-proxy:latest
4 | container_name: promptlayer-proxy
5 | restart: always
6 | ports:
7 | - "3000:3000"
8 | environment:
9 | - ACCOUNTS=your_account:your_password # 填入promptlayer账号密码,账号密码之间用:隔开,多个账号用逗号分隔
10 | - AUTH_TOKEN=your_auth_token_here # 设置API认证密钥
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "promptlayer-proxy",
3 | "version": "3.3.0",
4 | "description": "",
5 | "main": "src/server.js",
6 | "scripts": {
7 | "start": "node src/server.js",
8 | "dev": "nodemon src/server.js"
9 | },
10 | "keywords": [
11 | "api",
12 | "promptlayer",
13 | "express"
14 | ],
15 | "author": "",
16 | "license": "ISC",
17 | "dependencies": {
18 | "axios": "^1.6.5",
19 | "dotenv": "^16.3.1",
20 | "express": "^4.18.2",
21 | "form-data": "^4.0.2",
22 | "uuid": "^11.1.0",
23 | "ws": "^8.16.0"
24 | },
25 | "devDependencies": {
26 | "@types/node": "^22.15.23",
27 | "nodemon": "^3.0.3"
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/src/lib/caches.js:
--------------------------------------------------------------------------------
1 | const crypto = require('crypto')
2 |
3 | const imageCache = new Map()
4 |
5 | function computeHash(base64Data) {
6 | const base64Content = base64Data.includes('base64,')
7 | ? base64Data.split('base64,')[1]
8 | : base64Data
9 |
10 | return crypto.createHash('sha256')
11 | .update(base64Content)
12 | .digest('hex')
13 | }
14 |
15 | function hasImage(base64Data) {
16 | const hash = computeHash(base64Data)
17 | return imageCache.has(hash)
18 | }
19 |
20 | function getImageUrl(base64Data) {
21 | const hash = computeHash(base64Data)
22 | if (imageCache.has(hash)) {
23 | return imageCache.get(hash)
24 | }
25 | return null
26 | }
27 |
28 | function addImage(base64Data, imageUrl) {
29 | const hash = computeHash(base64Data)
30 | if (!imageCache.has(hash)) {
31 | imageCache.set(hash, imageUrl)
32 | }
33 | }
34 |
35 |
36 |
37 | module.exports = {
38 | hasImage,
39 | getImageUrl,
40 | addImage
41 | }
--------------------------------------------------------------------------------
/src/lib/manager.js:
--------------------------------------------------------------------------------
1 | const axios = require("axios")
2 | require('dotenv').config()
3 |
4 | class Manager {
5 | constructor(accounts) {
6 | this.accounts = []
7 | this.init(accounts)
8 | this.current_account = 0
9 | this.interval = setInterval(() => {
10 | this.refreshToken()
11 | }, 1000 * 60 * 60 * 24 * 5)
12 | }
13 |
14 | async init(accounts) {
15 | accounts = accounts.split(",").filter(account => account.trim() !== "")
16 | for (const account of accounts) {
17 | // const [username, password] = account.split(":")
18 | this.accounts.push({
19 | username: null,
20 | password: null,
21 | token: account,
22 | clientId: null,
23 | workspaceId: null,
24 | access_token: null,
25 | refresh: null
26 | })
27 | }
28 | }
29 |
30 | async login(username, password) {
31 | try {
32 | const response = await axios.post("https://api.promptlayer.com/login", {
33 | email: username,
34 | password: password
35 | }, {
36 | headers: {
37 | "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36 Edg/136.0.0.0"
38 | }
39 | })
40 |
41 | if (response.data) {
42 | return response.data.access_token
43 | }
44 | return false
45 | } catch (error) {
46 | if (error.status === 429) {
47 | await new Promise(resolve => setTimeout(resolve, 1000))
48 | return this.login(username, password)
49 | }
50 | return false
51 | }
52 | }
53 |
54 | async getClientId(token) {
55 | try {
56 | const response = await axios.post("https://api.promptlayer.com/ws-token-request", null, {
57 | headers: {
58 | Authorization: "Bearer " + token
59 | }
60 | })
61 | if (response.data.success) {
62 | const access_token = response.data.token_details.token
63 | const clientId = response.data.token_details.clientId
64 | return { access_token, clientId }
65 | }
66 | } catch (error) {
67 | // console.error('获取clientId失败:', error)
68 | return false
69 | }
70 | }
71 |
72 | async getWorkspaceId(token) {
73 | try {
74 | const response = await axios.get("https://api.promptlayer.com/workspaces", {
75 | headers: {
76 | Authorization: "Bearer " + token
77 | }
78 | })
79 | if (response.data.success && response.data.workspaces.length > 0) {
80 | const workspaceId = response.data.workspaces[0].id
81 | return workspaceId
82 | }
83 | } catch (error) {
84 | // console.error('获取workspaceId失败:', error)
85 | return false
86 | }
87 | }
88 |
89 | async initAccount(account) {
90 | // const token = await this.login(account.username, account.password)
91 | // if (!token) {
92 | // return false
93 | // }
94 | const { clientId, access_token } = await this.getClientId(account.token)
95 | if (!clientId || !access_token) {
96 | return false
97 | }
98 | const workspaceId = await this.getWorkspaceId(account.token)
99 | if (!workspaceId) {
100 | return false
101 | }
102 | // account.token = token
103 | account.clientId = clientId
104 | account.workspaceId = workspaceId
105 | account.access_token = access_token
106 | account.refresh = setInterval(async () => {
107 | const { access_token, clientId } = await this.getClientId(account.token)
108 | account.access_token = access_token
109 | account.clientId = clientId
110 | console.log(`${account.username} 刷新token成功`)
111 | }, 1000 * 60 * 30)
112 | return account
113 | }
114 |
115 | async getAccount() {
116 | const account = this.accounts[this.current_account]
117 | if (!account) {
118 | console.error('没有可用的账户')
119 | return null
120 | }
121 |
122 | if (!account.token || !account.access_token || !account.clientId || !account.workspaceId) {
123 | console.log(`初始化账户: ${account.username}`)
124 | const initialized = await this.initAccount(account)
125 | if (!initialized) {
126 | console.error(`账户初始化失败: ${account.username}`)
127 | // 尝试下一个账户
128 | this.current_account++
129 | if (this.current_account >= this.accounts.length) {
130 | this.current_account = 0
131 | }
132 | // 递归尝试下一个账户
133 | return await this.getAccount()
134 | }
135 | }
136 |
137 | console.log(`当前账户: ${account.username}, Token: ${account.token}`)
138 | this.current_account++
139 | if (this.current_account >= this.accounts.length) {
140 | this.current_account = 0
141 | }
142 | return account
143 | }
144 |
145 | async refreshToken() {
146 | this.accounts = []
147 | this.init(process.env.ACCOUNTS)
148 | }
149 |
150 |
151 | }
152 |
153 |
154 | if (!process.env.ACCOUNTS || process.env.ACCOUNTS === "" || process.env.AUTH_TOKEN === undefined) {
155 | console.error("ACCOUNTS 或 AUTH_TOKEN 未设置")
156 | process.exit(1)
157 | }
158 |
159 | const manager = new Manager(process.env.ACCOUNTS)
160 |
161 | module.exports = manager
162 |
--------------------------------------------------------------------------------
/src/lib/model-map.js:
--------------------------------------------------------------------------------
1 | const modelMap = {
2 | "claude-3-7-sonnet-20250219": {
3 | "provider": "anthropic",
4 | "name": "claude-3-7-sonnet-latest",
5 | "model_config_display_name": null,
6 | "parameters": {
7 | "max_tokens": 64000,
8 | "temperature": 1,
9 | "top_k": 0,
10 | "top_p": 0
11 | }
12 | },
13 | "claude-3-7-sonnet-20250219-thinking": {
14 | "provider": "anthropic",
15 | "name": "claude-3-7-sonnet-latest",
16 | "model_config_display_name": null,
17 | "parameters": {
18 | "max_tokens": 64000,
19 | "thinking": {
20 | "type": "enabled",
21 | "budget_tokens": 32000
22 | }
23 | }
24 | },
25 | "claude-sonnet-4-20250514": {
26 | "provider": "anthropic",
27 | "name": "claude-sonnet-4-20250514",
28 | "model_config_display_name": null,
29 | "parameters": {
30 | "max_tokens": 64000,
31 | "temperature": 1,
32 | "top_k": 0,
33 | "top_p": 0
34 | }
35 | },
36 | "claude-sonnet-4-20250514-thinking": {
37 | "provider": "anthropic",
38 | "name": "claude-sonnet-4-20250514",
39 | "model_config_display_name": null,
40 | "parameters": {
41 | "max_tokens": 64000,
42 | "thinking": {
43 | "type": "enabled",
44 | "budget_tokens": 32000
45 | }
46 | }
47 | },
48 | "claude-opus-4-20250514": {
49 | "provider": "anthropic",
50 | "name": "claude-opus-4-20250514",
51 | "model_config_display_name": null,
52 | "parameters": {
53 | "max_tokens": 32000,
54 | "temperature": 1,
55 | "top_k": 0,
56 | "top_p": 0
57 | }
58 | },
59 | "claude-opus-4-20250514-thinking": {
60 | "provider": "anthropic",
61 | "name": "claude-opus-4-20250514",
62 | "model_config_display_name": null,
63 | "parameters": {
64 | "max_tokens": 32000,
65 | "thinking": {
66 | "type": "enabled",
67 | "budget_tokens": 16000
68 | }
69 | }
70 | },
71 | "o4-mini": {
72 | "provider": "openai",
73 | "name": "o4-mini",
74 | "model_config_display_name": null,
75 | "parameters": {
76 | "response_format": {
77 | "type": "text"
78 | },
79 | "reasoning_effort": "high",
80 | "max_completion_tokens": 100000
81 | }
82 | },
83 | "chatgpt-4o-latest": {
84 | "provider": "openai",
85 | "name": "chatgpt-4o-latest",
86 | "model_config_display_name": null,
87 | "parameters": {
88 | "temperature": 1,
89 | "seed": 0,
90 | "response_format": null,
91 | "top_p": 1,
92 | "frequency_penalty": 0,
93 | "presence_penalty": 0
94 | }
95 | },
96 | "gpt-4.1": {
97 | "provider": "openai",
98 | "name": "gpt-4.1",
99 | "model_config_display_name": null,
100 | "parameters": {
101 | "temperature": 1,
102 | "seed": 0,
103 | "response_format": null,
104 | "top_p": 1
105 | }
106 | },
107 | "gpt-4.1-mini": {
108 | "provider": "openai",
109 | "name": "gpt-4.1-mini",
110 | "model_config_display_name": null,
111 | "parameters": {
112 | "temperature": 1,
113 | "seed": 0,
114 | "response_format": null,
115 | "top_p": 1
116 | }
117 | },
118 | "gpt-4.1-nano": {
119 | "provider": "openai",
120 | "name": "gpt-4.1-nano",
121 | "model_config_display_name": null,
122 | "parameters": {
123 | "temperature": 1,
124 | "seed": 0,
125 | "response_format": null,
126 | "top_p": 1
127 | }
128 | },
129 | "gpt-4.5-preview": {
130 | "provider": "openai",
131 | "name": "gpt-4.5-preview",
132 | "model_config_display_name": null,
133 | "parameters": {
134 | "temperature": 1,
135 | "seed": 0,
136 | "response_format": null,
137 | "top_p": 1,
138 | "frequency_penalty": 0,
139 | "presence_penalty": 0
140 | }
141 | },
142 | "o3": {
143 | "provider": "openai",
144 | "name": "o3",
145 | "model_config_display_name": null,
146 | "parameters": {
147 | "response_format": {
148 | "type": "text"
149 | },
150 | "reasoning_effort": "high",
151 | "max_completion_tokens": 100000
152 | }
153 | },
154 | "gpt-4o-search-preview": {
155 | "provider": "openai",
156 | "name": "gpt-4o-search-preview",
157 | "model_config_display_name": null,
158 | "parameters": {
159 | "response_format": null,
160 | "web_search_options": {
161 | "search_context_size": "high",
162 | "user_location": {
163 | "approximate": {
164 | "city": "New York",
165 | "country": "US",
166 | "region": "New York",
167 | "timezone": "America/New_York"
168 | },
169 | "type": "approximate"
170 | }
171 | }
172 | }
173 | },
174 | "gpt-4o-mini-search-preview": {
175 | "provider": "openai",
176 | "name": "gpt-4o-mini-search-preview",
177 | "model_config_display_name": null,
178 | "parameters": {
179 | "response_format": null,
180 | "web_search_options": {
181 | "search_context_size": "medium",
182 | "user_location": {
183 | "approximate": {
184 | "city": "New York",
185 | "country": "US",
186 | "region": "New York",
187 | "timezone": "America/New_York"
188 | },
189 | "type": "approximate"
190 | }
191 | }
192 | }
193 | },
194 | "gpt-4o": {
195 | "provider": "openai",
196 | "name": "gpt-4o-2024-11-20",
197 | "model_config_display_name": null,
198 | "parameters": {
199 | "temperature": 1,
200 | "seed": 0,
201 | "response_format": null,
202 | "top_p": 1,
203 | "frequency_penalty": 0,
204 | "presence_penalty": 0
205 | }
206 | },
207 | "gpt-4o-mini": {
208 | "provider": "openai",
209 | "name": "gpt-4o-mini",
210 | "model_config_display_name": null,
211 | "parameters": {
212 | "temperature": 1,
213 | "seed": 0,
214 | "response_format": null,
215 | "top_p": 1,
216 | "frequency_penalty": 0,
217 | "presence_penalty": 0
218 | }
219 | },
220 | "o3-mini": {
221 | "provider": "openai",
222 | "name": "o3-mini",
223 | "model_config_display_name": null,
224 | "parameters": {
225 | "response_format": {
226 | "type": "text"
227 | },
228 | "reasoning_effort": "high",
229 | "max_completion_tokens": 100000
230 | }
231 | }
232 |
233 | }
234 |
235 | module.exports = modelMap
236 |
--------------------------------------------------------------------------------
/src/lib/tools.js:
--------------------------------------------------------------------------------
1 | const sleep = (ms) => new Promise(resolve => setTimeout(resolve, ms))
2 |
3 | function isJsonString(str) {
4 | try {
5 | JSON.parse(str)
6 | return true
7 | } catch (e) {
8 | return false
9 | }
10 | }
11 |
12 |
13 | module.exports = {
14 | sleep,
15 | isJsonString
16 | }
--------------------------------------------------------------------------------
/src/lib/upload.js:
--------------------------------------------------------------------------------
1 | const axios = require('axios')
2 | const FormData = require('form-data')
3 | const imageCache = require('./caches')
4 | const manager = require('./manager')
5 |
6 |
7 | async function uploadFileBuffer(fileBuffer) {
8 | try {
9 | const account = await manager.getAccount()
10 |
11 | // 检查account是否存在
12 | if (!account) {
13 | console.error('无法获取有效的账户')
14 | return { success: false, error: '账户获取失败' }
15 | }
16 |
17 | const authToken = account.token
18 |
19 | // 检查token是否存在
20 | if (!authToken) {
21 | console.error('无法获取有效的认证token')
22 | return { success: false, error: '认证失败' }
23 | }
24 |
25 | // 转换为base64用于缓存检查
26 | const base64Data = fileBuffer.toString('base64')
27 |
28 | // 检查缓存中是否已存在此图片
29 | const cachedUrl = imageCache.getImageUrl(base64Data)
30 | if (cachedUrl) {
31 | console.log('使用缓存的图片URL:', cachedUrl)
32 | return { success: true, file_url: cachedUrl }
33 | }
34 |
35 | // 创建表单数据
36 | const form = new FormData()
37 |
38 | // 添加文件内容到表单,使用正确的文件名和content-type
39 | form.append('file', fileBuffer, {
40 | filename: `image_${Date.now()}.png`,
41 | contentType: 'image/png'
42 | })
43 |
44 | // 设置请求头,添加必要的浏览器相关头信息
45 | const headers = {
46 | ...form.getHeaders(),
47 | 'Authorization': `Bearer ${authToken}`,
48 | 'Accept': '*/*',
49 | 'Accept-Language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
50 | 'Origin': 'https://dashboard.promptlayer.com',
51 | 'Referer': 'https://dashboard.promptlayer.com/',
52 | 'Sec-Ch-Ua': '"Chromium";v="136", "Microsoft Edge";v="136", "Not.A/Brand";v="99"',
53 | 'Sec-Ch-Ua-Mobile': '?0',
54 | 'Sec-Ch-Ua-Platform': '"Windows"',
55 | 'Sec-Fetch-Dest': 'empty',
56 | 'Sec-Fetch-Mode': 'cors',
57 | 'Sec-Fetch-Site': 'same-site',
58 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36 Edg/136.0.0.0',
59 | }
60 |
61 | console.log('开始上传图片,大小:', fileBuffer.length, 'bytes')
62 |
63 | // 发送请求
64 | const response = await axios.post('https://api.promptlayer.com/upload', form, {
65 | headers,
66 | timeout: 30000 // 30秒超时
67 | })
68 |
69 | // 如果上传成功,添加到缓存
70 | if (response.data && response.data.success && response.data.file_url) {
71 | imageCache.addImage(base64Data, response.data.file_url)
72 | console.log('图片上传成功:', response.data.file_url)
73 | }
74 |
75 | // 返回响应数据
76 | return response.data
77 | } catch (error) {
78 | console.error('图片上传失败:', {
79 | status: error.response?.status,
80 | statusText: error.response?.statusText,
81 | data: error.response?.data,
82 | message: error.message
83 | })
84 | return { success: false, error: error.message }
85 | }
86 | }
87 |
88 | module.exports = {
89 | uploadFileBuffer
90 | }
91 |
--------------------------------------------------------------------------------
/src/routes/chat.js:
--------------------------------------------------------------------------------
1 | const express = require('express')
2 | const axios = require('axios')
3 | const WebSocket = require('ws')
4 | const router = express.Router()
5 | const { v4: uuidv4 } = require('uuid')
6 | const { uploadFileBuffer } = require('../lib/upload')
7 | const verify = require('./verify')
8 | const modelMap = require('../lib/model-map')
9 |
10 |
11 | async function parseMessages(req, res, next) {
12 | const messages = req.body.messages
13 | if (!Array.isArray(messages)) {
14 | req.processedMessages = []
15 | return next()
16 | }
17 |
18 | try {
19 | const transformedMessages = await Promise.all(messages.map(async (msg) => {
20 | const message = {
21 | role: msg.role,
22 | tool_calls: [],
23 | template_format: "f-string"
24 | }
25 |
26 | if (Array.isArray(msg.content)) {
27 | const contentItems = await Promise.all(msg.content.map(async (item) => {
28 | if (item.type === "text") {
29 | return {
30 | type: "text",
31 | text: item.text
32 | }
33 | }
34 | else if (item.type === "image_url") {
35 | try {
36 | const base64Match = item.image_url.url.match(/^data:image\/\w+;base64,(.+)$/)
37 | if (base64Match) {
38 | const base64 = base64Match[1]
39 | const data = Buffer.from(base64, 'base64')
40 | const uploadResult = await uploadFileBuffer(data)
41 |
42 | return {
43 | type: "media",
44 | media: {
45 | "type": "image",
46 | "url": uploadResult.file_url,
47 | "title": `image_${Date.now()}.png`
48 | }
49 | }
50 | } else {
51 | return {
52 | type: "media",
53 | media: {
54 | "type": "image",
55 | "url": item.image_url.url,
56 | "title": "external_image"
57 | }
58 | }
59 | }
60 | } catch (error) {
61 | console.error("处理图像时出错:", error)
62 | return {
63 | type: "text",
64 | text: "[图像处理失败]"
65 | }
66 | }
67 | } else {
68 | return {
69 | type: "text",
70 | text: JSON.stringify(item)
71 | }
72 | }
73 | }))
74 |
75 | message.content = contentItems
76 | } else {
77 | message.content = [
78 | {
79 | type: "text",
80 | text: msg.content || ""
81 | }
82 | ]
83 | }
84 |
85 | return message
86 | }))
87 |
88 | req.body.messages = transformedMessages
89 | return next()
90 | } catch (error) {
91 | console.error("处理消息时出错:", error.status)
92 | req.body.messages = []
93 | return next(error)
94 | }
95 | }
96 |
97 | async function getChatID(req, res) {
98 | try {
99 | const url = 'https://api.promptlayer.com/api/dashboard/v2/workspaces/' + req.account.workspaceId + '/playground_sessions'
100 | const headers = { Authorization: "Bearer " + req.account.token }
101 | const model_data = modelMap[req.body.model] ? modelMap[req.body.model] : modelMap["claude-3-7-sonnet-20250219"]
102 | let data = {
103 | "id": uuidv4(),
104 | "name": "Not implemented",
105 | "prompt_blueprint": {
106 | "inference_client_name": null,
107 | "metadata": {
108 | "model": model_data
109 | },
110 | "prompt_template": {
111 | "type": "chat",
112 | "messages": req.body.messages,
113 | "tools": req.body.tools || null,
114 | "tool_choice": req.body.tool_choice || null,
115 | "input_variables": [],
116 | "functions": [],
117 | "function_call": null
118 | },
119 | "provider_base_url_name": null
120 | },
121 | "input_variables": []
122 | }
123 |
124 | for (const item in req.body) {
125 | if (item === "messages" || item === "model" || item === "stream") {
126 | continue
127 | } else if (model_data.parameters[item]) {
128 | if (item === "thinking" && req.body[item].type === "disabled") { continue }
129 | model_data.parameters[item] = req.body[item]
130 | }
131 | }
132 |
133 | data.prompt_blueprint.metadata.model = model_data
134 | if (model_data.parameters.max_tokens && model_data.parameters.thinking?.budget_tokens && model_data.parameters.max_tokens < model_data.parameters.thinking.budget_tokens) {
135 | data.prompt_blueprint.metadata.model.parameters.thinking.budget_tokens = model_data.parameters.max_tokens
136 | }
137 | console.log("模型参数 => ", data.prompt_blueprint.metadata.model)
138 |
139 | const response = await axios.put(url, data, { headers })
140 | if (response.data.success) {
141 | console.log(`生成会话ID成功: ${response.data.playground_session.id}`)
142 | req.chatID = response.data.playground_session.id
143 | return response.data.playground_session.id
144 | } else {
145 | return false
146 | }
147 | } catch (error) {
148 | // console.error("错误:", error.response?.data)
149 | res.status(500).json({
150 | "error": {
151 | "message": error.message || "服务器内部错误",
152 | "type": "server_error",
153 | "param": null,
154 | "code": "server_error"
155 | }
156 | })
157 | return false
158 | }
159 | }
160 |
161 | async function sentRequest(req, res) {
162 | try {
163 | const url = 'https://api.promptlayer.com/api/dashboard/v2/workspaces/' + req.account.workspaceId + '/run_groups'
164 | const headers = { Authorization: "Bearer " + req.account.token }
165 | const model_data = modelMap[req.body.model] ? modelMap[req.body.model] : modelMap["claude-3-7-sonnet-20250219"]
166 | let data = {
167 | "id": uuidv4(),
168 | "playground_session_id": req.chatID,
169 | "shared_prompt_blueprint": {
170 | "inference_client_name": null,
171 | "metadata": {
172 | "model": model_data
173 | },
174 | "prompt_template": {
175 | "type": "chat",
176 | "messages": req.body.messages,
177 | "tools": req.body.tools || null,
178 | "tool_choice": req.body.tool_choice || null,
179 | "input_variables": [],
180 | "functions": [],
181 | "function_call": null
182 | },
183 | "provider_base_url_name": null
184 | },
185 | "individual_run_requests": [
186 | {
187 | "input_variables": {},
188 | "run_group_position": 1
189 | }
190 | ]
191 | }
192 |
193 | for (const item in req.body) {
194 | if (item === "messages" || item === "model" || item === "stream") {
195 | continue
196 | } else if (model_data.parameters[item]) {
197 | if (item === "thinking" && req.body[item].type === "disabled") continue
198 | model_data.parameters[item] = req.body[item]
199 | }
200 | }
201 | data.shared_prompt_blueprint.metadata.model = model_data
202 | if (model_data.parameters.max_tokens && model_data.parameters.thinking?.budget_tokens && model_data.parameters.max_tokens < model_data.parameters.thinking.budget_tokens) {
203 | data.prompt_blueprint.metadata.model.parameters.thinking.budget_tokens = model_data.parameters.max_tokens
204 | }
205 |
206 | const response = await axios.post(url, data, { headers })
207 | if (response.data.success) {
208 | return response.data.run_group.individual_run_requests[0].id
209 | } else {
210 | return false
211 | }
212 | } catch (error) {
213 | // console.error("错误:", error.response?.data)
214 | res.status(500).json({
215 | "error": {
216 | "message": error.message || "服务器内部错误",
217 | "type": "server_error",
218 | "param": null,
219 | "code": "server_error"
220 | }
221 | })
222 | }
223 | }
224 |
225 | // 聊天完成路由
226 | router.post('/v1/chat/completions', verify, parseMessages, async (req, res) => {
227 | try {
228 |
229 | const setHeader = () => {
230 | try {
231 | if (req.body.stream === true) {
232 | res.setHeader('Content-Type', 'text/event-stream')
233 | res.setHeader('Cache-Control', 'no-cache')
234 | res.setHeader('Connection', 'keep-alive')
235 | } else {
236 | res.setHeader('Content-Type', 'application/json')
237 | }
238 | } catch (error) {
239 | // console.error("设置响应头时出错:", error)
240 | }
241 | }
242 |
243 | const { access_token, clientId } = req.account
244 | // 生成会话ID
245 | await getChatID(req, res)
246 |
247 | // 发送的数据
248 | const sendAction = `{"action":10,"channel":"user:${clientId}","params":{"agent":"react-hooks/2.0.2"}}`
249 | // 构建 WebSocket URL
250 | const wsUrl = `wss://realtime.ably.io/?access_token=${encodeURIComponent(access_token)}&clientId=${clientId}&format=json&heartbeats=true&v=3&agent=ably-js%2F2.0.2%20browser`
251 | // 创建 WebSocket 连接
252 | const ws = new WebSocket(wsUrl)
253 |
254 | // 状态详细
255 | let ThinkingLastContent = ""
256 | let TextLastContent = ""
257 | let ThinkingStart = false
258 | let ThinkingEnd = false
259 | let RequestID = ""
260 | let MessageID = "chatcmpl-" + uuidv4()
261 | let streamChunk = {
262 | "id": MessageID,
263 | "object": "chat.completion.chunk",
264 | "system_fingerprint": "fp_44709d6fcb",
265 | "created": Math.floor(Date.now() / 1000),
266 | "model": req.body.model,
267 | "choices": [
268 | {
269 | "index": 0,
270 | "delta": {
271 | "content": null
272 | },
273 | "finish_reason": null
274 | }
275 | ]
276 | }
277 |
278 | ws.on('open', async () => {
279 | ws.send(sendAction)
280 | RequestID = await sentRequest(req, res)
281 | setHeader()
282 | })
283 |
284 | ws.on('message', async (data) => {
285 | try {
286 | data = data.toString()
287 | // console.log(JSON.parse(data))
288 | let ContentText = JSON.parse(data)?.messages?.[0]
289 | let ContentData = JSON.parse(ContentText?.data)
290 | const isRequestID = ContentData?.individual_run_request_id
291 | if (isRequestID != RequestID || !isRequestID) return
292 |
293 | let output = ""
294 |
295 | if (ContentText?.name === "UPDATE_LAST_MESSAGE") {
296 | const MessageArray = ContentData?.payload?.message?.content
297 | for (const item of MessageArray) {
298 |
299 | if (item.type === "text") {
300 | output = item.text.replace(TextLastContent, "")
301 | if (ThinkingStart && !ThinkingEnd) {
302 | ThinkingEnd = true
303 | output = `${output}\n\n`
304 | }
305 | TextLastContent = item.text
306 | }
307 | else if (item.type === "thinking" && MessageArray.length === 1) {
308 | output = item.thinking.replace(ThinkingLastContent, "")
309 | if (!ThinkingStart) {
310 | ThinkingStart = true
311 | output = `\n\n${output}`
312 | }
313 | ThinkingLastContent = item.thinking
314 | }
315 |
316 | }
317 |
318 | if (req.body.stream === true) {
319 | streamChunk.choices[0].delta.content = output
320 | res.write(`data: ${JSON.stringify(streamChunk)}\n\n`)
321 | }
322 |
323 | }
324 | else if (ContentText?.name === "INDIVIDUAL_RUN_COMPLETE") {
325 |
326 | if (req.body.stream !== true) {
327 | output = ThinkingLastContent ? `\n\n${ThinkingLastContent}\n\n\n\n${TextLastContent}` : TextLastContent
328 | }
329 |
330 | if (ThinkingLastContent === "" && TextLastContent === "") {
331 | output = "该模型在发送请求时遇到错误: \n1. 请检查请求参数,模型支持参数和默认参数可在/v1/models下查看\n2. 参数设置大小是否超过模型限制\n3. 模型当前官网此模型可能负载过高,可以切换别的模型尝试,这属于正常现象\n4. Anthropic系列模型的temperature的取值为0-1,请勿设置超过1的值,思考模型的 budget_tokens 不可小于 max_tokens \n5. 交流与支持群: https://t.me/nodejs_project"
332 | streamChunk.choices[0].delta.content = output
333 | res.write(`data: ${JSON.stringify(streamChunk)}\n\n`)
334 | }
335 |
336 | if (!req.body.stream || req.body.stream !== true) {
337 | let responseJson = {
338 | "id": MessageID,
339 | "object": "chat.completion",
340 | "created": Math.floor(Date.now() / 1000),
341 | "system_fingerprint": "fp_44709d6fcb",
342 | "model": req.body.model,
343 | "choices": [
344 | {
345 | "index": 0,
346 | "message": {
347 | "role": "assistant",
348 | "content": output
349 | },
350 | "finish_reason": "stop"
351 | }
352 | ],
353 | "usage": {
354 | "prompt_tokens": 0,
355 | "completion_tokens": 0,
356 | "total_tokens": 0
357 | }
358 | }
359 |
360 | res.json(responseJson)
361 | ws.close()
362 | return
363 | } else {
364 | // 流式响应:发送结束标记
365 | let finalChunk = {
366 | "id": MessageID,
367 | "object": "chat.completion.chunk",
368 | "system_fingerprint": "fp_44709d6fcb",
369 | "created": Math.floor(Date.now() / 1000),
370 | "model": req.body.model,
371 | "choices": [
372 | {
373 | "index": 0,
374 | "delta": {},
375 | "finish_reason": "stop"
376 | }
377 | ]
378 | }
379 |
380 | res.write(`data: ${JSON.stringify(finalChunk)}\n\n`)
381 | res.write(`data: [DONE]\n\n`)
382 | res.end()
383 | }
384 | ws.close()
385 | }
386 |
387 | } catch (err) {
388 | // console.error("处理WebSocket消息出错:", err)
389 | }
390 | })
391 |
392 | ws.on('error', (err) => {
393 | // 标准OpenAI错误响应格式
394 | res.status(500).json({
395 | "error": {
396 | "message": err.message,
397 | "type": "server_error",
398 | "param": null,
399 | "code": "server_error"
400 | }
401 | })
402 | })
403 |
404 | setTimeout(() => {
405 | if (ws.readyState === WebSocket.OPEN) {
406 | ws.close()
407 | if (!res.headersSent) {
408 | // 标准OpenAI超时错误响应格式
409 | res.status(504).json({
410 | "error": {
411 | "message": "请求超时",
412 | "type": "timeout",
413 | "param": null,
414 | "code": "timeout_error"
415 | }
416 | })
417 | }
418 | }
419 | }, 300 * 1000)
420 |
421 | } catch (error) {
422 | console.error("错误:", error)
423 | // 标准OpenAI通用错误响应格式
424 | res.status(500).json({
425 | "error": {
426 | "message": error.message || "服务器内部错误",
427 | "type": "server_error",
428 | "param": null,
429 | "code": "server_error"
430 | }
431 | })
432 | }
433 | })
434 |
435 | module.exports = router
436 |
--------------------------------------------------------------------------------
/src/routes/models.js:
--------------------------------------------------------------------------------
1 | const express = require('express')
2 | const router = express.Router()
3 | const modelMap = require('../lib/model-map')
4 |
5 | router.get('/v1/models', (req, res) => {
6 |
7 | const result = Object.keys(modelMap).map((id) => {
8 | const model_data = {
9 | id,
10 | object: "model",
11 | created: 1626777600,
12 | owned_by: modelMap[id].provider
13 | }
14 | if (modelMap[id].parameters) {
15 | for (const item in modelMap[id].parameters) {
16 | model_data[item] = modelMap[id].parameters[item]
17 | }
18 | }
19 | return model_data
20 | })
21 |
22 | res.json({
23 | object: "list",
24 | data: result
25 | })
26 | })
27 |
28 | module.exports = router
--------------------------------------------------------------------------------
/src/routes/verify.js:
--------------------------------------------------------------------------------
1 | const manager = require('../lib/manager')
2 | const verify = async (req, res, next) => {
3 | const authorization = req.headers.authorization
4 | if (!authorization) {
5 | return res.status(401).json({ message: 'Unauthorized' })
6 | }
7 |
8 | const token = authorization.replace('Bearer ', '')
9 |
10 | if (token === process.env.AUTH_TOKEN) {
11 | try {
12 | req.account = await manager.getAccount()
13 | if (!req.account) {
14 | return res.status(503).json({
15 | error: {
16 | message: '服务暂时不可用,无法获取有效账户',
17 | type: 'service_unavailable',
18 | code: 'account_unavailable'
19 | }
20 | })
21 | }
22 | // console.log(`身份校验成功,使用账号=> ${JSON.stringify(req.account)}`)
23 | // console.log(req.body)
24 | next()
25 | } catch (error) {
26 | console.error('获取账户时出错:', error)
27 | return res.status(503).json({
28 | error: {
29 | message: '服务暂时不可用',
30 | type: 'service_unavailable',
31 | code: 'internal_error'
32 | }
33 | })
34 | }
35 | } else {
36 | return res.status(401).json({ message: 'Unauthorized' })
37 | }
38 | }
39 |
40 | module.exports = verify
41 |
--------------------------------------------------------------------------------
/src/server.js:
--------------------------------------------------------------------------------
1 | const express = require('express')
2 | const modelsRoute = require('./routes/models')
3 | const chatRoute = require('./routes/chat')
4 | require('dotenv').config()
5 |
6 | // 创建 Express 应用
7 | const app = express()
8 |
9 | // 中间件配置
10 | app.use(express.json({ limit: "50mb" }))
11 | app.use(express.urlencoded({ limit: "50mb", extended: true }))
12 |
13 |
14 | // 错误处理
15 | app.use((err, req, res, next) => {
16 | console.error(err.stack)
17 | res.status(500).send('服务器错误')
18 | })
19 |
20 | // 注册路由
21 | app.use(modelsRoute)
22 | app.use(chatRoute)
23 |
24 | // 初始化账户系统并启动服务器
25 | const PORT = process.env.PORT || 3000
26 |
27 |
28 | app.listen(PORT, () => {
29 | console.log(`服务器运行在 http://localhost:${PORT}`)
30 | })
31 |
32 |
33 | module.exports = app
--------------------------------------------------------------------------------