├── .github └── workflows │ └── cf-deploy.yml ├── img ├── j1.png ├── j2.png ├── j3.png ├── j4.png ├── j5.png ├── j6.png └── j7.png ├── readme.md ├── worker.js └── wrangler.toml /.github/workflows/cf-deploy.yml: -------------------------------------------------------------------------------- 1 | name: Deploy Worker 2 | on: 3 | push: 4 | branches: 5 | - main 6 | pull_request: 7 | repository_dispatch: 8 | 9 | jobs: 10 | deploy: 11 | runs-on: ubuntu-latest 12 | name: Deploy 13 | steps: 14 | - uses: actions/checkout@v4 15 | 16 | - name: Create D1 Database and Update Config 17 | run: | 18 | # 安装 wrangler 19 | echo "📦 安装 wrangler..." 20 | npm install -g wrangler 21 | 22 | # 检查数据库是否已存在并获取ID 23 | echo "🔍 检查数据库是否存在..." 24 | DB_LIST=$(wrangler d1 list) 25 | if echo "$DB_LIST" | grep -q "a1_db"; then 26 | echo "✅ 数据库已存在,获取数据库ID" 27 | DB_ID=$(echo "$DB_LIST" | grep "a1_db" | grep -Eo '[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}') 28 | echo "获取到的数据库 ID: $DB_ID" 29 | else 30 | # 创建 D1 数据库,指定位置为 WEUR (Western Europe) 31 | echo "🔧 创建新的 D1 数据库..." 32 | DB_OUTPUT=$(wrangler d1 create a1_db --location=weur) 33 | echo "数据库创建输出:" 34 | echo "$DB_OUTPUT" 35 | 36 | # 提取数据库 ID 37 | DB_ID=$(echo "$DB_OUTPUT" | grep "database_id" | sed -E 's/.*"database_id": "([^"]+)".*/\1/') 38 | echo "📝 提取的数据库 ID: $DB_ID" 39 | fi 40 | 41 | echo "使用的数据库 ID: $DB_ID" 42 | 43 | # 更新 wrangler.toml 文件 44 | echo "✏️ 更新 wrangler.toml 文件..." 45 | sed -i "s/database_id = \".*\"/database_id = \"$DB_ID\"/" wrangler.toml 46 | 47 | # 显示更新后的 wrangler.toml 内容 48 | echo "📄 更新后的 wrangler.toml 内容:" 49 | cat wrangler.toml 50 | 51 | echo "✅ 数据库配置完成!" 52 | env: 53 | CLOUDFLARE_API_TOKEN: ${{ secrets.CF_API_TOKEN }} 54 | CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CF_ACCOUNT_ID }} 55 | 56 | - name: Build & Deploy Worker 57 | uses: cloudflare/wrangler-action@v3 58 | with: 59 | apiToken: ${{ secrets.CF_API_TOKEN }} 60 | accountId: ${{ secrets.CF_ACCOUNT_ID }} -------------------------------------------------------------------------------- /img/j1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MoYangking/openai-grok/645eeda3c34531a3c4968f7fbc01608ba15a7dda/img/j1.png -------------------------------------------------------------------------------- /img/j2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MoYangking/openai-grok/645eeda3c34531a3c4968f7fbc01608ba15a7dda/img/j2.png -------------------------------------------------------------------------------- /img/j3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MoYangking/openai-grok/645eeda3c34531a3c4968f7fbc01608ba15a7dda/img/j3.png -------------------------------------------------------------------------------- /img/j4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MoYangking/openai-grok/645eeda3c34531a3c4968f7fbc01608ba15a7dda/img/j4.png -------------------------------------------------------------------------------- /img/j5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MoYangking/openai-grok/645eeda3c34531a3c4968f7fbc01608ba15a7dda/img/j5.png -------------------------------------------------------------------------------- /img/j6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MoYangking/openai-grok/645eeda3c34531a3c4968f7fbc01608ba15a7dda/img/j6.png -------------------------------------------------------------------------------- /img/j7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MoYangking/openai-grok/645eeda3c34531a3c4968f7fbc01608ba15a7dda/img/j7.png -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | 2 | # OpenAI Grok Cloudflare Worker 3 | 4 | ## 项目原地址 5 | 该项目基于orbitoo大佬的[grok_chat_proxy](https://github.com/orbitoo/grok_chat_proxy)我仅仅只是使其能在Workers上运行,用于转发聊天请求至目标地址,同时提供一个基于 D1 SQL 数据库的配置管理页面,实现 Cookie 轮询、模型列表返回、消息预处理等功能。 6 | 7 | ## 功能概述 8 | 9 | - **API 接口** 10 | - **GET /v1/models**:返回模型列表(模型名称:`grok-2`、`grok-3`、`grok-3-thinking`)。 11 | - **POST /v1/chat/completions**:消息转发接口。 12 | 13 | - **配置管理页面** 14 | - 访问 `/config` 时需要密码验证(密码由环境变量 `CONFIG_PASSWORD` 设置,默认为123)。 15 | - 未登录时自动重定向到 `/config/login` 登录页面。 16 | - 登录成功后在 Cookie 中写入认证信息,进入配置管理界面。 17 | - 页面支持添加 Cookie、删除单个 Cookie、删除全部 Cookies 以及切换 Temporary Mode。 18 | 19 | - **Cookie 轮询** 20 | - 使用数据库中存储的 Cookie 列表,实现对不同模型请求时的 Cookie 自动轮询。 21 | 22 | - **消息预处理** 23 | - 支持对输入消息进行预处理,自动处理特殊标识符(例如 `<|disableSearch|>`、`<|forceConcise|>`)以及格式化消息。 24 | 25 | ## 部署步骤 26 | 27 | ### 使用 GitHub Actions 一键部署至 Cloudflare Workers 28 | 29 | 点击下方按钮,将脚本部署到您的 Cloudflare Workers: 30 | 31 | [![Deploy to Cloudflare Workers](https://deploy.workers.cloudflare.com/button)](https://deploy.workers.cloudflare.com/?url=https://github.com/MoYangking/openai-grok) 32 | 33 | ### 1. 部署到 Cloudflare Workers 34 | 点击上方 **"Deploy to Cloudflare Workers"** 按钮。 35 | 36 | ### 2. 账户配置 37 | 确保您拥有 **Cloudflare 账户**,然后填写以下信息: 38 | 39 | #### **获取 Cloudflare Account ID** 40 | - 进入 [Workers Dashboard](https://dash.cloudflare.com/?to=/:account/workers) 41 | - 若遇到下图情况,先随便创建一个 Workers 部署 42 | ![](img/j1.png) 43 | - 复制 **Account ID** 并粘贴到部署页面 44 | ![](img/j2.png) 45 | 46 | #### **获取 Cloudflare API Token** 47 | - 进入 [My Profile](https://dash.cloudflare.com/?to=/:account/workers) 48 | - 创建 API Token,选择如下模板 49 | ![](img/j3.png) 50 | - **一定要添加 D1 的读写权限**(非常重要!) 51 | - **一定要添加 D1 的读写权限**(非常重要!) 52 | - **一定要添加 D1 的读写权限**(非常重要!) 53 | ![](img/j4.png) 54 | - 复制 API Token 并粘贴到部署页面 55 | 56 | ### 3. 启用 GitHub Actions 57 | - 进入 **GitHub 仓库**,点击 **Repository - Actions** 58 | ![](img/j5.png) 59 | - 启用 Actions 60 | ![](img/j6.png) 61 | 62 | ### 4. 运行部署流程 63 | - 进入 **部署流程页面**,点击 **Workflow Enabled** 64 | - 点击 **Deploy** 开始部署 65 | 66 | ### 5. 访问部署的 Worker 67 | - 进入 **Cloudflare Workers 页面**,找到 `grok` Worker 68 | - 访问 Worker,即可看到 grok 配置页面 69 | ![](img/j7.png) 70 | - **默认密码:123**,可在 Workers 变量中修改 71 | 72 | 73 | 74 | ## 使用说明 75 | 76 | - **API 测试**: 77 | - 访问 `/v1/models` 获取模型列表。 78 | - 通过 POST 请求访问 `/v1/chat/completions` 接口进行消息转发,支持传入参数 `messages`、`model`、`stream` 等参数。 79 | 80 | - **配置管理**: 81 | - 访问 `/config` 页面进行 Cookie 管理及临时模式切换。 82 | - 若未登录,会自动重定向到 `/config/login`,输入正确密码后可管理配置。 83 | 84 | ## 其他说明 85 | 86 | - **安全性**:环境中建议修改 `CONFIG_PASSWORD` 为更高强度的密码,确保配置管理页面的安全。。 87 | 88 | --- 89 | 90 | 91 | -------------------------------------------------------------------------------- /worker.js: -------------------------------------------------------------------------------- 1 | /** 2 | * 改进后的 Cloudflare Workers 脚本 3 | * 4 | * 功能: 5 | * - API 接口: 6 | * - GET /v1/models 返回模型列表 7 | * - POST /v1/chat/completions 发送消息(需 API 密钥验证) 8 | * - POST /v1/rate-limits 检查调用频率 9 | * - 配置管理页面: 10 | * - /config 系列接口,通过环境变量 CONFIG_PASSWORD 控制访问 11 | * - 加载页面时,对每个 cookie 验证所有模型状态, 12 | * 并在页面中显示每个 cookie 的状态以及各模型状态,同时对过长的 Cookie 进行截断显示 13 | * 14 | * 使用 D1 SQL 数据库存储配置(表名:config,操作 id=1 的记录) 15 | */ 16 | 17 | const TARGET_URL = "https://grok.com/rest/app-chat/conversations/new"; 18 | const CHECK_URL = "https://grok.com/rest/rate-limits"; 19 | const MODELS = ["grok-2", "grok-3", "grok-3-thinking"]; 20 | const MODELS_TO_CHECK = ["grok-2", "grok-3", "grok-3-thinking"]; 21 | const USER_AGENTS = [ 22 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36", 23 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36", 24 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:132.0) Gecko/20100101 Firefox/132.0", 25 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36 Edg/123.0.2420.81", 26 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36 OPR/109.0.0.0", 27 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36", 28 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36", 29 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/18.0.1 Safari/605.1.15", 30 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 14.7; rv:132.0) Gecko/20100101 Firefox/132.0", 31 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 14_4_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36 OPR/109.0.0.0", 32 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 14.4; rv:124.0) Gecko/20100101 Firefox/124.0", 33 | "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36", 34 | "Mozilla/5.0 (X11; Linux i686; rv:124.0) Gecko/20100101 Firefox/124.0", 35 | ]; 36 | 37 | /* ========== 辅助函数:请求超时 ========== */ 38 | async function fetchWithTimeout(url, options, timeout = 5000) { 39 | return Promise.race([ 40 | fetch(url, options), 41 | new Promise((_, reject) => 42 | setTimeout(() => reject(new Error("请求超时")), timeout) 43 | ), 44 | ]); 45 | } 46 | 47 | /* ========== 辅助函数:截断过长的 Cookie ========== */ 48 | function truncateCookie(cookie) { 49 | const maxLen = 30; 50 | if (cookie.length > maxLen) { 51 | return cookie.slice(0, 10) + "..." + cookie.slice(-10); 52 | } 53 | return cookie; 54 | } 55 | 56 | /* ========== 数据库操作封装 ========== */ 57 | async function getConfig(env) { 58 | await env.D1_DB.prepare( 59 | `CREATE TABLE IF NOT EXISTS config ( 60 | id INTEGER PRIMARY KEY, 61 | data TEXT NOT NULL 62 | )` 63 | ).run(); 64 | 65 | let row = await env.D1_DB.prepare("SELECT data FROM config WHERE id = 1").first(); 66 | if (row && row.data) { 67 | try { 68 | return JSON.parse(row.data); 69 | } catch (e) { 70 | console.error("配置 JSON 解析错误:", e); 71 | } 72 | } 73 | const defaultConfig = { 74 | cookies: [], 75 | last_cookie_index: { "grok-2": 0, "grok-3": 0, "grok-3-thinking": 0 }, 76 | temporary_mode: true, 77 | }; 78 | await setConfig(defaultConfig, env); 79 | return defaultConfig; 80 | } 81 | 82 | async function setConfig(config, env) { 83 | const jsonStr = JSON.stringify(config); 84 | await env.D1_DB.prepare("REPLACE INTO config (id, data) VALUES (1, ?)") 85 | .bind(jsonStr) 86 | .run(); 87 | } 88 | 89 | /* ========== Cookie 轮询 ========== */ 90 | async function getNextAccount(model, env) { 91 | let config = await getConfig(env); 92 | if (!config.cookies || config.cookies.length === 0) { 93 | throw new Error("没有可用的 cookie,请先通过配置页面添加。"); 94 | } 95 | const num = config.cookies.length; 96 | const current = ((config.last_cookie_index[model] || 0) + 1) % num; 97 | config.last_cookie_index[model] = current; 98 | await setConfig(config, env); 99 | return config.cookies[current]; 100 | } 101 | 102 | /* ========== 请求头封装 ========== */ 103 | function getCommonHeaders(cookie) { 104 | return { 105 | "Accept": "*/*", 106 | "Content-Type": "application/json", 107 | "Origin": "https://grok.com", 108 | "Referer": "https://grok.com/", 109 | "Cookie": cookie, 110 | "User-Agent": USER_AGENTS[Math.floor(Math.random() * USER_AGENTS.length)], 111 | }; 112 | } 113 | 114 | /* ========== 检查调用频率 ========== */ 115 | /** 116 | * 使用指定 cookie 调用 CHECK_URL 接口,返回 JSON 数据(带超时保护) 117 | */ 118 | async function checkRateLimitWithCookie(model, cookie, isReasoning) { 119 | const headers = getCommonHeaders(cookie); 120 | const payload = { 121 | requestKind: isReasoning ? "REASONING" : "DEFAULT", 122 | modelName: model, 123 | }; 124 | const response = await fetchWithTimeout(CHECK_URL, { 125 | method: "POST", 126 | headers, 127 | body: JSON.stringify(payload), 128 | }); 129 | if (!response.ok) { 130 | throw new Error(`Rate limit check failed for model ${model}, status: ${response.status}`); 131 | } 132 | return await response.json(); 133 | } 134 | 135 | /** 136 | * 检查单个 cookie 的状态: 137 | * - expired:如果用模型 "grok-2" 测试失败,则认为该 cookie 已过期 138 | * - 对 MODELS_TO_CHECK 中的模型检查剩余查询次数,返回数组 rateLimitDetails 139 | * 140 | * 优化:对 "grok-2" 的调用只做一次,作为过期检测及剩余次数检测 141 | */ 142 | async function checkCookieStatus(cookie) { 143 | let rateLimitDetails = []; 144 | try { 145 | // 先测试 grok-2 146 | const dataGrok2 = await checkRateLimitWithCookie("grok-2", cookie, false); 147 | rateLimitDetails.push({ model: "grok-2", remainingQueries: dataGrok2.remainingQueries }); 148 | } catch (e) { 149 | return { expired: true, rateLimited: false, rateLimitDetails: [] }; 150 | } 151 | // 再检查 grok-3 152 | try { 153 | const dataGrok3 = await checkRateLimitWithCookie("grok-3", cookie, false); 154 | rateLimitDetails.push({ model: "grok-3", remainingQueries: dataGrok3.remainingQueries }); 155 | } catch (e) { 156 | rateLimitDetails.push({ model: "grok-3", error: e.toString(), remainingQueries: 0 }); 157 | } 158 | // 检查 grok-3-thinking 159 | try { 160 | const dataGrok3Thinking = await checkRateLimitWithCookie("grok-3", cookie, true); 161 | rateLimitDetails.push({ model: "grok-3-thinking", remainingQueries: dataGrok3Thinking.remainingQueries }); 162 | } catch (e) { 163 | rateLimitDetails.push({ model: "grok-3-thinking", error: e.toString(), remainingQueries: 0 }); 164 | } 165 | const rateLimited = rateLimitDetails.every(detail => detail.remainingQueries === 0); 166 | return { expired: false, rateLimited, rateLimitDetails }; 167 | } 168 | 169 | /* ========== 消息预处理 ========== */ 170 | function magic(messages) { 171 | let disableSearch = false; 172 | let forceConcise = false; 173 | if (messages && messages.length > 0) { 174 | let first = messages[0].content; 175 | if (first.includes("<|disableSearch|>")) { 176 | disableSearch = true; 177 | first = first.replace(/<\|disableSearch\|>/g, ""); 178 | } 179 | if (first.includes("<|forceConcise|>")) { 180 | forceConcise = true; 181 | first = first.replace(/<\|forceConcise\|>/g, ""); 182 | } 183 | messages[0].content = first; 184 | } 185 | return { disableSearch, forceConcise, messages }; 186 | } 187 | 188 | function formatMessage(messages) { 189 | let roleMap = { user: "Human", assistant: "Assistant", system: "System" }; 190 | const roleInfoPattern = /\s*user:\s*([^\n]*)\s*assistant:\s*([^\n]*)\s*system:\s*([^\n]*)\s*prefix:\s*([^\n]*)\s*<\/roleInfo>\n/; 191 | let prefix = false; 192 | let firstContent = messages[0].content; 193 | let match = firstContent.match(roleInfoPattern); 194 | if (match) { 195 | roleMap = { 196 | user: match[1], 197 | assistant: match[2], 198 | system: match[3], 199 | }; 200 | prefix = match[4] === "1"; 201 | messages[0].content = firstContent.replace(roleInfoPattern, ""); 202 | } 203 | let formatted = ""; 204 | for (const msg of messages) { 205 | let role = prefix ? "\b" + roleMap[msg.role] : roleMap[msg.role]; 206 | formatted += `${role}: ${msg.content}\n`; 207 | } 208 | return formatted; 209 | } 210 | 211 | /* ========== API 接口 ========== */ 212 | async function handleModels() { 213 | const data = MODELS.map((model) => ({ 214 | id: model, 215 | object: "model", 216 | created: Math.floor(Date.now() / 1000), 217 | owned_by: "Elbert", 218 | name: model, 219 | })); 220 | return new Response(JSON.stringify({ object: "list", data }), { 221 | headers: { "Content-Type": "application/json" }, 222 | }); 223 | } 224 | 225 | async function handleChatCompletions(request, env) { 226 | const authHeader = request.headers.get("Authorization"); 227 | if (!authHeader || !authHeader.startsWith("Bearer ")) { 228 | return new Response(JSON.stringify({ error: "Missing or invalid Authorization header" }), { 229 | status: 401, 230 | headers: { "Content-Type": "application/json" }, 231 | }); 232 | } 233 | const token = authHeader.split(" ")[1]; 234 | if (token !== env.CONFIG_PASSWORD) { 235 | return new Response(JSON.stringify({ error: "Invalid API key" }), { 236 | status: 401, 237 | headers: { "Content-Type": "application/json" }, 238 | }); 239 | } 240 | try { 241 | const reqJson = await request.json(); 242 | const streamFlag = reqJson.stream || false; 243 | const messages = reqJson.messages; 244 | let model = reqJson.model; 245 | if (!MODELS.includes(model)) { 246 | return new Response(JSON.stringify({ error: "模型不可用" }), { 247 | status: 500, 248 | headers: { "Content-Type": "application/json" }, 249 | }); 250 | } 251 | if (!messages) { 252 | return new Response(JSON.stringify({ error: "必须提供消息" }), { 253 | status: 400, 254 | headers: { "Content-Type": "application/json" }, 255 | }); 256 | } 257 | const { disableSearch, forceConcise, messages: newMessages } = magic(messages); 258 | const formattedMessage = formatMessage(newMessages); 259 | const isReasoning = model.length > 6; 260 | model = model.substring(0, 6); 261 | if (streamFlag) { 262 | return await sendMessageStream(formattedMessage, model, disableSearch, forceConcise, isReasoning, env); 263 | } else { 264 | return await sendMessageNonStream(formattedMessage, model, disableSearch, forceConcise, isReasoning, env); 265 | } 266 | } catch (e) { 267 | console.error("处理 chat completions 出错:", e); 268 | return new Response(JSON.stringify({ error: e.toString() }), { 269 | status: 500, 270 | headers: { "Content-Type": "application/json" }, 271 | }); 272 | } 273 | } 274 | 275 | /* ========== 检查调用频率 ========== */ 276 | async function handleRateLimits(request, env) { 277 | try { 278 | const reqJson = await request.json(); 279 | const model = reqJson.model; 280 | const isReasoning = !!reqJson.isReasoning; 281 | if (!MODELS.includes(model)) { 282 | return new Response(JSON.stringify({ error: "模型不可用" }), { 283 | status: 500, 284 | headers: { "Content-Type": "application/json" }, 285 | }); 286 | } 287 | return await checkRateLimit(model, isReasoning, env); 288 | } catch (e) { 289 | console.error("检查调用频率出错:", e); 290 | return new Response(JSON.stringify({ error: e.toString() }), { 291 | status: 500, 292 | headers: { "Content-Type": "application/json" }, 293 | }); 294 | } 295 | } 296 | 297 | async function checkRateLimit(model, isReasoning, env) { 298 | let cookie; 299 | try { 300 | cookie = await getNextAccount(model, env); 301 | } catch (e) { 302 | return new Response(JSON.stringify({ error: e.toString() }), { 303 | status: 500, 304 | headers: { "Content-Type": "application/json" }, 305 | }); 306 | } 307 | const headers = getCommonHeaders(cookie); 308 | const payload = { 309 | requestKind: isReasoning ? "REASONING" : "DEFAULT", 310 | modelName: model, 311 | }; 312 | try { 313 | const response = await fetchWithTimeout(CHECK_URL, { 314 | method: "POST", 315 | headers, 316 | body: JSON.stringify(payload), 317 | }); 318 | if (!response.ok) { 319 | throw new Error("调用频率检查失败"); 320 | } 321 | const data = await response.json(); 322 | return new Response(JSON.stringify(data), { 323 | headers: { "Content-Type": "application/json" }, 324 | }); 325 | } catch (e) { 326 | console.error("调用频率检查异常:", e); 327 | return new Response(JSON.stringify({ error: e.toString() }), { 328 | status: 500, 329 | headers: { "Content-Type": "application/json" }, 330 | }); 331 | } 332 | } 333 | 334 | /* ========== 流式消息转发 ========== */ 335 | async function sendMessageStream(message, model, disableSearch, forceConcise, isReasoning, env) { 336 | let cookie; 337 | try { 338 | cookie = await getNextAccount(model, env); 339 | } catch (e) { 340 | return new Response(JSON.stringify({ error: e.toString() }), { 341 | status: 500, 342 | headers: { "Content-Type": "application/json" }, 343 | }); 344 | } 345 | const headers = getCommonHeaders(cookie); 346 | const config = await getConfig(env); 347 | const payload = { 348 | temporary: config.temporary_mode, 349 | modelName: model, 350 | message: message, 351 | fileAttachments: [], 352 | imageAttachments: [], 353 | disableSearch: disableSearch, 354 | enableImageGeneration: false, 355 | returnImageBytes: false, 356 | returnRawGrokInXaiRequest: false, 357 | enableImageStreaming: true, 358 | imageGenerationCount: 2, 359 | forceConcise: forceConcise, 360 | toolOverrides: {}, 361 | enableSideBySide: true, 362 | isPreset: false, 363 | sendFinalMetadata: true, 364 | customInstructions: "", 365 | deepsearchPreset: "", 366 | isReasoning: isReasoning, 367 | }; 368 | const init = { 369 | method: "POST", 370 | headers, 371 | body: JSON.stringify(payload), 372 | }; 373 | const response = await fetchWithTimeout(TARGET_URL, init); 374 | if (!response.ok) { 375 | return new Response(JSON.stringify({ error: "发送消息失败" }), { 376 | status: 500, 377 | headers: { "Content-Type": "application/json" }, 378 | }); 379 | } 380 | 381 | // 使用 ReadableStream 优化流数据处理 382 | const stream = new ReadableStream({ 383 | async start(controller) { 384 | const reader = response.body.getReader(); 385 | const decoder = new TextDecoder(); 386 | const encoder = new TextEncoder(); 387 | let buffer = ""; 388 | let thinking = 2; 389 | let batchSize = 0; 390 | let batchContent = ""; 391 | const MAX_BATCH_SIZE = 5; 392 | const BATCH_INTERVAL = 50; 393 | let lastBatchTime = Date.now(); 394 | 395 | const processBatch = async () => { 396 | if (batchContent) { 397 | const chunkData = { 398 | id: "chatcmpl-" + crypto.randomUUID(), 399 | object: "chat.completion.chunk", 400 | created: Math.floor(Date.now() / 1000), 401 | model: model, 402 | choices: [ 403 | { index: 0, delta: { content: batchContent }, finish_reason: null }, 404 | ], 405 | }; 406 | controller.enqueue(encoder.encode("data: " + JSON.stringify(chunkData) + "\n\n")); 407 | batchContent = ""; 408 | batchSize = 0; 409 | } 410 | }; 411 | 412 | while (true) { 413 | const { done, value } = await reader.read(); 414 | if (done) break; 415 | 416 | buffer += decoder.decode(value, { stream: true }); 417 | const lines = buffer.split("\n"); 418 | buffer = lines.pop(); 419 | 420 | for (const line of lines) { 421 | const trimmed = line.trim(); 422 | if (!trimmed) continue; 423 | 424 | try { 425 | const data = JSON.parse(trimmed); 426 | if (!data?.result?.response || typeof data.result.response.token !== "string") { 427 | continue; 428 | } 429 | 430 | let token = data.result.response.token; 431 | let content = token; 432 | 433 | if (isReasoning) { 434 | if (thinking === 2) { 435 | thinking = 1; 436 | content = `\n${token}`; 437 | } else if (thinking === 1 && !data.result.response.isThinking) { 438 | thinking = 0; 439 | content = `\n\n${token}`; 440 | } 441 | } 442 | 443 | batchContent += content; 444 | batchSize++; 445 | 446 | // 当达到批处理阈值或距离上次发送已经过了足够时间时,发送数据 447 | const now = Date.now(); 448 | if (batchSize >= MAX_BATCH_SIZE || (now - lastBatchTime >= BATCH_INTERVAL && batchContent)) { 449 | await processBatch(); 450 | lastBatchTime = now; 451 | // 添加微小延迟,让出 CPU 452 | await new Promise(resolve => setTimeout(resolve, 1)); 453 | } 454 | 455 | if (data.result.response.isSoftStop) { 456 | await processBatch(); // 处理剩余的批次 457 | const finalChunk = { 458 | id: "chatcmpl-" + crypto.randomUUID(), 459 | object: "chat.completion.chunk", 460 | created: Math.floor(Date.now() / 1000), 461 | model: model, 462 | choices: [ 463 | { index: 0, delta: { content: "" }, finish_reason: "completed" }, 464 | ], 465 | }; 466 | controller.enqueue(encoder.encode("data: " + JSON.stringify(finalChunk) + "\n\n")); 467 | controller.close(); 468 | return; 469 | } 470 | } catch (e) { 471 | console.error("JSON 解析错误:", e, "行内容:", trimmed); 472 | } 473 | } 474 | } 475 | 476 | // 处理剩余的缓冲区数据 477 | if (buffer.trim() !== "") { 478 | try { 479 | const data = JSON.parse(buffer.trim()); 480 | if (data?.result?.response && typeof data.result.response.token === "string") { 481 | let token = data.result.response.token; 482 | let content = token; 483 | if (isReasoning) { 484 | if (thinking === 2) { 485 | thinking = 1; 486 | content = `\n${token}`; 487 | } else if (thinking === 1 && !data.result.response.isThinking) { 488 | thinking = 0; 489 | content = `\n\n${token}`; 490 | } 491 | } 492 | batchContent += content; 493 | } 494 | } catch (e) { 495 | console.error("Final JSON parse error:", e, "in buffer:", buffer); 496 | } 497 | } 498 | 499 | // 处理最后的批次 500 | await processBatch(); 501 | controller.enqueue(encoder.encode("data: [DONE]\n\n")); 502 | controller.close(); 503 | } 504 | }); 505 | 506 | return new Response(stream, { 507 | headers: { "Content-Type": "text/event-stream" }, 508 | }); 509 | } 510 | 511 | /* ========== 非流式消息转发 ========== */ 512 | async function sendMessageNonStream(message, model, disableSearch, forceConcise, isReasoning, env) { 513 | let cookie; 514 | try { 515 | cookie = await getNextAccount(model, env); 516 | } catch (e) { 517 | return new Response(JSON.stringify({ error: e.toString() }), { 518 | status: 500, 519 | headers: { "Content-Type": "application/json" }, 520 | }); 521 | } 522 | const headers = getCommonHeaders(cookie); 523 | const config = await getConfig(env); 524 | const payload = { 525 | temporary: config.temporary_mode, 526 | modelName: model, 527 | message: message, 528 | fileAttachments: [], 529 | imageAttachments: [], 530 | disableSearch: disableSearch, 531 | enableImageGeneration: false, 532 | returnImageBytes: false, 533 | returnRawGrokInXaiRequest: false, 534 | enableImageStreaming: true, 535 | imageGenerationCount: 2, 536 | forceConcise: forceConcise, 537 | toolOverrides: {}, 538 | enableSideBySide: true, 539 | isPreset: false, 540 | sendFinalMetadata: true, 541 | customInstructions: "", 542 | deepsearchPreset: "", 543 | isReasoning: isReasoning, 544 | }; 545 | const init = { 546 | method: "POST", 547 | headers, 548 | body: JSON.stringify(payload), 549 | }; 550 | const response = await fetchWithTimeout(TARGET_URL, init); 551 | if (!response.ok) { 552 | return new Response(JSON.stringify({ error: "发送消息失败" }), { 553 | status: 500, 554 | headers: { "Content-Type": "application/json" }, 555 | }); 556 | } 557 | const fullText = await response.text(); 558 | let finalMessage = ""; 559 | const lines = fullText.split("\n").filter(line => line.trim() !== ""); 560 | for (const line of lines) { 561 | try { 562 | const data = JSON.parse(line); 563 | if (data?.result?.response) { 564 | if (data.result.response.modelResponse && data.result.response.modelResponse.message) { 565 | finalMessage = data.result.response.modelResponse.message; 566 | break; 567 | } else if (typeof data.result.response.token === "string") { 568 | finalMessage += data.result.response.token; 569 | } 570 | } 571 | } catch (e) { 572 | console.error("JSON 解析错误:", e, "行内容:", line); 573 | } 574 | } 575 | const openai_response = { 576 | id: "chatcmpl-" + crypto.randomUUID(), 577 | object: "chat.completion", 578 | created: Math.floor(Date.now() / 1000), 579 | model: model, 580 | choices: [ 581 | { index: 0, message: { role: "assistant", content: finalMessage }, finish_reason: "completed" }, 582 | ], 583 | }; 584 | return new Response(JSON.stringify(openai_response), { 585 | headers: { "Content-Type": "application/json" }, 586 | }); 587 | } 588 | 589 | /* ========== 登录与认证 ========== */ 590 | async function requireAuth(request, env) { 591 | const cookieHeader = request.headers.get("Cookie") || ""; 592 | const match = cookieHeader.match(/config_auth=([^;]+)/); 593 | if (match && match[1] === env.CONFIG_PASSWORD) { 594 | return true; 595 | } 596 | return false; 597 | } 598 | 599 | function loginPage() { 600 | const html = 601 | ` 602 | 603 | 604 | 605 | 登录配置管理 606 | 614 | 615 | 616 |
617 |

请输入密码

618 |
619 | 620 | 621 |
622 |
623 | 624 | `; 625 | return new Response(html, { headers: { "Content-Type": "text/html" } }); 626 | } 627 | 628 | async function handleLogin(request, env) { 629 | const formData = await request.formData(); 630 | const password = formData.get("password") || ""; 631 | if (password === env.CONFIG_PASSWORD) { 632 | const redirectURL = new URL("/config", request.url).toString(); 633 | const urlObj = new URL(request.url); 634 | const isHttps = urlObj.protocol === "https:"; 635 | const cookieHeader = `config_auth=${env.CONFIG_PASSWORD}; Path=/; HttpOnly; ${isHttps ? "Secure; " : ""}SameSite=Strict`; 636 | return new Response("", { 637 | status: 302, 638 | headers: { 639 | "Set-Cookie": cookieHeader, 640 | "Location": redirectURL, 641 | }, 642 | }); 643 | } else { 644 | return new Response("密码错误", { status: 401 }); 645 | } 646 | } 647 | 648 | /* ========== 配置管理页面 ========== */ 649 | async function configPage(request, env) { 650 | const config = await getConfig(env); 651 | let cookieStatuses = []; 652 | try { 653 | cookieStatuses = await Promise.all( 654 | config.cookies.map(cookie => 655 | checkCookieStatus(cookie).catch(e => ({ expired: true, rateLimited: false, rateLimitDetails: [] })) 656 | ) 657 | ); 658 | } catch (e) { 659 | console.error("Error checking cookie statuses:", e); 660 | } 661 | 662 | const tableRows = config.cookies.map((cookie, index) => { 663 | const status = cookieStatuses[index] || { expired: true, rateLimited: false, rateLimitDetails: [] }; 664 | const cookieStateHtml = status.expired 665 | ? '已过期' 666 | : '有效'; 667 | const rateLimitHtml = status.expired 668 | ? '--' 669 | : status.rateLimitDetails.map(detail => { 670 | if (detail.error) { 671 | return `${detail.model}: 错误(${detail.error})`; 672 | } else { 673 | return detail.remainingQueries > 0 674 | ? `${detail.model}: 有效 (剩余: ${detail.remainingQueries})` 675 | : `${detail.model}: 限额已达`; 676 | } 677 | }).join(" | "); 678 | return ` 679 | ${index + 1} 680 | ${truncateCookie(cookie)} 681 | ${cookieStateHtml} 682 | ${rateLimitHtml} 683 | 684 |
685 | 686 | 687 | 688 |
689 | 690 | `; 691 | }).join(''); 692 | 693 | const html = 694 | ` 695 | 696 | 697 | 698 | 配置管理 699 | 717 | 718 | 719 |
720 |

配置管理

721 |

API Key: 与配置密码相同

722 |

当前 Cookies

723 | 724 | 725 | 726 | 727 | 728 | 729 | 730 | 731 | 732 | 733 | 734 | ${tableRows} 735 | 736 |
#CookieCookie状态模型状态操作
737 |

Temporary Mode: ${config.temporary_mode ? "开启" : "关闭"}

738 |
739 |

添加 Cookie

740 |
741 | 742 | 743 | 744 |
745 |
746 |

全局操作

747 |
748 |
749 | 750 | 751 |
752 |
753 | 754 | 755 |
756 |
757 |
758 | 759 | `; 760 | return new Response(html, { headers: { "Content-Type": "text/html" } }); 761 | } 762 | 763 | async function updateConfig(request, env) { 764 | const formData = await request.formData(); 765 | const action = formData.get("action"); 766 | const config = await getConfig(env); 767 | if (action === "add") { 768 | const newCookie = formData.get("cookie"); 769 | if (newCookie && newCookie.trim() !== "") { 770 | config.cookies.push(newCookie.trim()); 771 | } 772 | } else if (action === "delete") { 773 | config.cookies = []; 774 | } else if (action === "toggle") { 775 | config.temporary_mode = !config.temporary_mode; 776 | } else if (action === "delete_one") { 777 | const index = parseInt(formData.get("index"), 10); 778 | if (!isNaN(index) && index >= 0 && index < config.cookies.length) { 779 | config.cookies.splice(index, 1); 780 | } 781 | } 782 | await setConfig(config, env); 783 | return Response.redirect(new URL("/config", request.url).toString(), 302); 784 | } 785 | 786 | /* ========== 主调度函数 ========== */ 787 | export default { 788 | async fetch(request, env, ctx) { 789 | const url = new URL(request.url); 790 | if (url.pathname === "/" || url.pathname === "") { 791 | return Response.redirect(new URL("/config", request.url).toString(), 302); 792 | } 793 | 794 | if (url.pathname.startsWith("/config")) { 795 | if (url.pathname === "/config/login") { 796 | if (request.method === "GET") { 797 | return loginPage(); 798 | } else if (request.method === "POST") { 799 | return handleLogin(request, env); 800 | } 801 | } 802 | if (!(await requireAuth(request, env))) { 803 | return Response.redirect(new URL("/config/login", request.url).toString(), 302); 804 | } 805 | if (request.method === "GET") { 806 | return configPage(request, env); 807 | } else if (request.method === "POST") { 808 | return updateConfig(request, env); 809 | } 810 | } else if (url.pathname.startsWith("/v1/models")) { 811 | return handleModels(); 812 | } else if (url.pathname.startsWith("/v1/rate-limits")) { 813 | return handleRateLimits(request, env); 814 | } else if (url.pathname.startsWith("/v1/chat/completions")) { 815 | return handleChatCompletions(request, env); 816 | } 817 | return new Response("Not Found", { status: 404 }); 818 | } 819 | }; 820 | -------------------------------------------------------------------------------- /wrangler.toml: -------------------------------------------------------------------------------- 1 | name = "openai-grok" 2 | main = "worker.js" 3 | compatibility_date = "2025-02-24" 4 | compatibility_flags = [ "nodejs_compat" ] 5 | 6 | [[d1_databases]] 7 | binding = "D1_DB" 8 | database_name = "a1_db" 9 | database_id = "PLACEHOLDER_DB_ID" 10 | 11 | [vars] 12 | CONFIG_PASSWORD = "123" 13 | 14 | [observability] 15 | enabled = true 16 | head_sampling_rate = 1 --------------------------------------------------------------------------------