├── argo.sh ├── example.js ├── package.json ├── LICENSE ├── README.md ├── server.js └── app.js /argo.sh: -------------------------------------------------------------------------------- 1 | check_file() { 2 | [ ! -e cloudflared ] && wget -O cloudflared https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64 && chmod +x cloudflared 3 | } 4 | 5 | run_argo() { 6 | chmod +x cloudflared && nohup ./cloudflared tunnel --edge-ip-version auto --protocol http2 run --token ${ARGO_AUTH} 2>/dev/null 2>&1 & 7 | } 8 | 9 | generate_autodel() { 10 | cat > auto_del.sh < { 22 | console.log(response.data.choices[0].message.content); 23 | }) 24 | .catch((error) => { 25 | console.error(error); 26 | }); 27 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "glitch-mvp-node", 3 | "version": "0.1.1", 4 | "description": "A simple Node app built on fastify, instantly up and running.", 5 | "main": "server.js", 6 | "scripts": { 7 | "prestart": "node app.js &", 8 | "start": "node server.js" 9 | }, 10 | "dependencies": { 11 | "axios": "^1.6.7", 12 | "body-parser": "^1.20.2", 13 | "dotenv": "^16.4.5", 14 | "express": "^4.18.3", 15 | "gpt-3-encoder": "^1.1.4", 16 | "request": "^2.88.2", 17 | "http-proxy-middleware": "^2.0.6" 18 | }, 19 | "engines": { 20 | "node": "16.x" 21 | }, 22 | "repository": { 23 | "url": "https://glitch.com/edit/#!/glitch-mvp-node" 24 | }, 25 | "license": "MIT", 26 | "keywords": [ 27 | "node", 28 | "glitch" 29 | ] 30 | } 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Vincent Young 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## FreeGPT35-Glitch 2 | 3 | You can deploy this repository to Glitch by clicking [This link](https://glitch.com/edit/#!/import/git?url=https://github.com/k0baya/FreeGPT35-Glitch). 4 | 5 | ### Variables 6 | 7 | You can add variables in `.env` file. 8 | 9 | | Key | Value | Note | 10 | |-------------|----------------------------------------|-------------------------------------------| 11 | | `AUTH_TOKEN` | any_string_you_like | This is your API key for accessing FreeGPT35. | 12 | |`ARGO_AUTH`|Cloudflared token|Use Argo Tunnel to access FreeGPT35 (Optional)| 13 | 14 | >If no settings are made, then any API-Key can be accessed 15 | 16 | >If you are using the Argo Tunnel, add the `http://localhost:3000` in your dashboard. 17 | 18 | ### Usage 19 | Use the URL that Glitch provided as endpoint. 20 | ```bash 21 | curl --location 'https://your-glitch-project.glitch.me/v1/chat/completions' \ 22 | --header 'Content-Type: application/json' \ 23 | --header 'Authorization: Bearer any_string_you_like' \ 24 | --data '{ 25 | "model": "gpt-3.5-turbo", 26 | "messages": [{"role": "user", "content": "Tell me a story about socialism."}] 27 | }' 28 | ``` 29 | -------------------------------------------------------------------------------- /server.js: -------------------------------------------------------------------------------- 1 | const express = require("express"); 2 | const app = express(); 3 | const port = 3000; 4 | const PROJECT_DOMAIN = process.env.PROJECT_DOMAIN; 5 | var exec = require("child_process").exec; 6 | const os = require("os"); 7 | const { createProxyMiddleware } = require("http-proxy-middleware"); 8 | var request = require("request"); 9 | var fs = require("fs"); 10 | var path = require("path"); 11 | 12 | function authMiddleware(req, res, next) { 13 | const authToken = process.env.AUTH_TOKEN; 14 | 15 | if (authToken) { 16 | const reqAuthToken = req.headers.authorization; 17 | if (reqAuthToken && reqAuthToken === `Bearer ${authToken}`) { 18 | next(); 19 | } else { 20 | res.sendStatus(401); 21 | } 22 | } else { 23 | next(); 24 | } 25 | } 26 | 27 | //获取系统进程表 28 | app.get("/status", (req, res) => { 29 | let cmdStr = "ps -ef"; 30 | exec(cmdStr, function (err, stdout, stderr) { 31 | if (err) { 32 | res.type("html").send("
命令行执行错误:\n" + err + "
"); 33 | } else { 34 | res.type("html").send("
命令行执行结果:\n" + stdout + "
"); 35 | } 36 | }); 37 | }); 38 | 39 | // 获取系统监听端口 40 | app.get("/listen", function (req, res) { 41 | let cmdStr = "ss -nltp"; 42 | exec(cmdStr, function (err, stdout, stderr) { 43 | if (err) { 44 | res.type("html").send("
命令行执行错误:\n" + err + "
"); 45 | } else { 46 | res.type("html").send("
获取系统监听端口:\n" + stdout + "
"); 47 | } 48 | }); 49 | }); 50 | 51 | //获取系统版本、内存信息 52 | app.get("/info", (req, res) => { 53 | let cmdStr = "cat /etc/*release | grep -E ^NAME"; 54 | exec(cmdStr, function (err, stdout, stderr) { 55 | if (err) { 56 | res.send("命令行执行错误:" + err); 57 | } else { 58 | res.send( 59 | "命令行执行结果:\n" + 60 | "Linux System:" + 61 | stdout + 62 | "\nRAM:" + 63 | os.totalmem() / 1000 / 1000 + 64 | "MB" 65 | ); 66 | } 67 | }); 68 | }); 69 | 70 | //文件系统只读测试 71 | app.get("/test", (req, res) => { 72 | fs.writeFile("./test.txt", "这里是新创建的文件内容!", function (err) { 73 | if (err) res.send("创建文件失败,文件系统权限为只读:" + err); 74 | else res.send("创建文件成功,文件系统权限为非只读:"); 75 | }); 76 | }); 77 | 78 | app.use( 79 | "/" + "*", authMiddleware, 80 | createProxyMiddleware({ 81 | target: "http://127.0.0.1:3040/", 82 | changeOrigin: false, 83 | ws: true, 84 | logLevel: "error", 85 | onProxyReq: function onProxyReq(proxyReq, req, res) { } 86 | }) 87 | ); 88 | 89 | function keepalive() { 90 | let glitch_app_url = `https://${PROJECT_DOMAIN}.glitch.me`; 91 | exec("curl " + glitch_app_url, function (err, stdout, stderr) { }); 92 | 93 | exec("curl " + glitch_app_url + "/status", function (err, stdout, stderr) { 94 | if (!err) { 95 | if (stdout.indexOf("node app.js") != -1) { 96 | } else { 97 | exec("node app.js"); 98 | } 99 | } else console.log("保活-请求服务器进程表-命令行执行错误: " + err); 100 | }); 101 | } 102 | setInterval(keepalive, 9 * 1000); 103 | 104 | function keep_argo_alive() { 105 | if (!process.env.ARGO_AUTH) { 106 | console.log("未设置 ARGO_AUTH,跳过启动 Cloudflred!"); 107 | return; 108 | } 109 | exec("pgrep -laf cloudflared", function (err, stdout, stderr) { 110 | // 1.查后台系统进程,保持唤醒 111 | if (stdout.includes("./cloudflared tunnel")) { 112 | console.log("Argo 正在运行"); 113 | } else { 114 | //Argo 未运行,命令行调起 115 | exec("bash argo.sh 2>&1 &", function (err, stdout, stderr) { 116 | if (err) { 117 | console.log("保活-调起Argo-命令行执行错误:" + err); 118 | } else { 119 | console.log("保活-调起Argo-命令行执行成功!"); 120 | } 121 | }); 122 | } 123 | }); 124 | } 125 | setInterval(keep_argo_alive, 30 * 1000); 126 | 127 | app.listen(port, () => console.log(`Example app listening on port ${port}!`)); 128 | -------------------------------------------------------------------------------- /app.js: -------------------------------------------------------------------------------- 1 | const express = require("express"); 2 | const { spawn } = require("child_process"); 3 | const fs = require("fs"); 4 | const path = require("path"); 5 | const bodyParser = require("body-parser"); 6 | const axios = require("axios"); 7 | const https = require("https"); 8 | const os = require("os"); 9 | const { encode } = require("gpt-3-encoder"); 10 | const { randomUUID, randomInt, createHash } = require("crypto"); 11 | const { config } = require("dotenv"); 12 | 13 | config(); 14 | 15 | // Constants for the server and API configuration 16 | const port = process.env.SERVER_PORT || 3040; 17 | const baseUrl = "https://chat.openai.com"; 18 | const apiUrl = `${baseUrl}/backend-anon/conversation`; 19 | const refreshInterval = 60000; // Interval to refresh token in ms 20 | const errorWait = 120000; // Wait time in ms after an error 21 | const newSessionRetries = parseInt(process.env.NEW_SESSION_RETRIES) || 5; 22 | const userAgent = process.env.USER_AGENT || "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36"; 23 | let cloudflared; 24 | 25 | // Initialize global variables to store the session token and device ID 26 | let token; 27 | let oaiDeviceId; 28 | 29 | // Function to wait for a specified duration 30 | const wait = (ms) => new Promise((resolve) => setTimeout(resolve, ms)); 31 | 32 | function GenerateCompletionId(prefix = "cmpl-") { 33 | const characters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"; 34 | const length = 28; 35 | 36 | for (let i = 0; i < length; i++) { 37 | prefix += characters.charAt(Math.floor(Math.random() * characters.length)); 38 | } 39 | 40 | return prefix; 41 | } 42 | 43 | async function* chunksToLines(chunksAsync) { 44 | let previous = ""; 45 | for await (const chunk of chunksAsync) { 46 | const bufferChunk = Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk); 47 | previous += bufferChunk; 48 | let eolIndex; 49 | while ((eolIndex = previous.indexOf("\n")) >= 0) { 50 | // line includes the EOL 51 | const line = previous.slice(0, eolIndex + 1).trimEnd(); 52 | if (line === "data: [DONE]") break; 53 | if (line.startsWith("data: ")) yield line; 54 | previous = previous.slice(eolIndex + 1); 55 | } 56 | } 57 | } 58 | 59 | async function* linesToMessages(linesAsync) { 60 | for await (const line of linesAsync) { 61 | const message = line.substring("data :".length); 62 | 63 | yield message; 64 | } 65 | } 66 | 67 | async function* StreamCompletion(data) { 68 | yield* linesToMessages(chunksToLines(data)); 69 | } 70 | 71 | // Setup axios instance for API requests with predefined configurations 72 | const axiosInstance = axios.create({ 73 | httpsAgent: new https.Agent({ rejectUnauthorized: false }), 74 | proxy: 75 | process.env.PROXY === "true" 76 | ? { 77 | host: process.env.PROXY_HOST, 78 | port: Number(process.env.PROXY_PORT), 79 | auth: 80 | process.env.PROXY_AUTH === "true" 81 | ? { 82 | username: process.env.PROXY_USERNAME, 83 | password: process.env.PROXY_PASSWORD, 84 | } 85 | : undefined, 86 | protocol: process.env.PROXY_PROTOCOL, 87 | } 88 | : false, 89 | headers: { 90 | accept: "*/*", 91 | "accept-language": "en-US,en;q=0.9", 92 | "cache-control": "no-cache", 93 | "content-type": "application/json", 94 | "oai-language": "en-US", 95 | origin: baseUrl, 96 | pragma: "no-cache", 97 | referer: baseUrl, 98 | "sec-ch-ua": 99 | '"Google Chrome";v="123", "Not:A-Brand";v="8", "Chromium";v="123"', 100 | "sec-ch-ua-mobile": "?0", 101 | "sec-ch-ua-platform": '"Windows"', 102 | "sec-fetch-dest": "empty", 103 | "sec-fetch-mode": "cors", 104 | "sec-fetch-site": "same-origin", 105 | "user-agent": userAgent, 106 | }, 107 | }); 108 | 109 | function generateProofToken(seed, diff, userAgent) { 110 | const cores = [8, 12, 16, 24]; 111 | const screens = [3000, 4000, 6000]; 112 | const core = cores[randomInt(0, cores.length)]; 113 | const screen = screens[randomInt(0, screens.length)]; 114 | const now = new Date(Date.now() - 8 * 3600 * 1000); 115 | const parseTime = now.toUTCString().replace("GMT", "GMT-0500 (Eastern Time)"); 116 | const config = [core + screen, parseTime, 4294705152, 0, userAgent]; 117 | const diffLen = diff.length / 2; 118 | for (let i = 0; i < 100000; i++) { 119 | config[3] = i; 120 | const jsonData = JSON.stringify(config); 121 | const base = Buffer.from(jsonData).toString("base64"); 122 | const hashValue = createHash("sha3-512") 123 | .update(seed + base) 124 | .digest(); 125 | if (hashValue.toString("hex").substring(0, diffLen) <= diff) { 126 | const result = "gAAAAAB" + base; 127 | return result; 128 | } 129 | } 130 | const fallbackBase = Buffer.from(`"${seed}"`).toString("base64"); 131 | return "gAAAAABwQ8Lk5FbGpA2NcR9dShT6gYjU7VxZ4D" + fallbackBase; 132 | } 133 | 134 | // Function to get a new session ID and token from the OpenAI API 135 | async function getNewSession(retries = 0) { 136 | let newDeviceId = randomUUID(); 137 | try { 138 | const response = await axiosInstance.post( 139 | `${baseUrl}/backend-anon/sentinel/chat-requirements`, 140 | {}, 141 | { 142 | headers: { "oai-device-id": newDeviceId }, 143 | } 144 | ); 145 | 146 | let session = response.data; 147 | session.deviceId = newDeviceId; 148 | 149 | return session; 150 | } catch (error) { 151 | await wait(500); 152 | return retries < newSessionRetries ? getNewSession(retries + 1) : null; 153 | } 154 | } 155 | 156 | // Middleware to enable CORS and handle pre-flight requests 157 | function enableCORS(req, res, next) { 158 | res.header("Access-Control-Allow-Origin", "*"); 159 | res.header("Access-Control-Allow-Headers", "*"); 160 | res.header("Access-Control-Allow-Methods", "GET, POST, OPTIONS"); 161 | if (req.method === "OPTIONS") { 162 | return res.status(200).end(); 163 | } 164 | next(); 165 | } 166 | 167 | // Middleware to handle chat completions 168 | async function handleChatCompletion(req, res) { 169 | console.log( 170 | "Request:", 171 | `${req.method} ${req.originalUrl}`, 172 | `${req.body?.messages?.length ?? 0} messages`, 173 | req.body.stream ? "(stream-enabled)" : "(stream-disabled)" 174 | ); 175 | try { 176 | let session = await getNewSession(); 177 | if (!session) { 178 | res.write( 179 | JSON.stringify({ 180 | status: false, 181 | error: { 182 | message: `Error getting a new session, please try again later, if the issue persists, please open an issue on the GitHub repository, `, 183 | type: "invalid_request_error", 184 | }, 185 | support: "", 186 | }) 187 | ); 188 | return res.end(); 189 | } 190 | let proofToken = generateProofToken( 191 | session.proofofwork.seed, 192 | session.proofofwork.difficulty, 193 | userAgent // Make sure userAgent is defined somewhere in your code 194 | ); 195 | const body = { 196 | action: "next", 197 | messages: req.body.messages.map(message => ({ 198 | author: { role: message.role }, 199 | content: { content_type: "text", parts: [message.content] }, 200 | })), 201 | parent_message_id: randomUUID(), // Ensure randomUUID function is available 202 | model: "text-davinci-002-render-sha", 203 | timezone_offset_min: -180, 204 | suggestions: [], 205 | history_and_training_disabled: true, 206 | conversation_mode: { kind: "primary_assistant" }, 207 | websocket_request_id: randomUUID(), // Ensure randomUUID function is available 208 | }; 209 | let promptTokens = 0; 210 | let completionTokens = 0; 211 | for (let message of req.body.messages) { 212 | promptTokens += encode(message.content).length; 213 | } 214 | 215 | const response = await axiosInstance.post(apiUrl, body, { 216 | responseType: "stream", 217 | headers: { 218 | "oai-device-id": session.deviceId, 219 | "openai-sentinel-chat-requirements-token": session.token, 220 | "openai-sentinel-proof-token": proofToken, 221 | }, 222 | }); 223 | 224 | // Set the response headers based on the request type 225 | if (req.body.stream) { 226 | res.setHeader("Content-Type", "text/event-stream"); 227 | res.setHeader("Cache-Control", "no-cache"); 228 | res.setHeader("Connection", "keep-alive"); 229 | } else { 230 | res.setHeader("Content-Type", "application/json"); 231 | } 232 | 233 | let fullContent = ""; 234 | let requestId = GenerateCompletionId("chatcmpl-"); 235 | let created = Math.floor(Date.now() / 1000); // Unix timestamp in seconds 236 | let finish_reason = null; 237 | let error; 238 | 239 | for await (const message of StreamCompletion(response.data)) { 240 | // Skip heartbeat detection 241 | if (message.match(/^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6}$/)) 242 | continue; 243 | 244 | const parsed = JSON.parse(message); 245 | 246 | if (parsed.error) { 247 | error = `Error message from OpenAI: ${parsed.error}`; 248 | finish_reason = "stop"; 249 | break; 250 | } 251 | 252 | let content = parsed?.message?.content?.parts[0] ?? ""; 253 | let status = parsed?.message?.status ?? ""; 254 | 255 | for (let message of req.body.messages) { 256 | if (message.content === content) { 257 | content = ""; 258 | break; 259 | } 260 | } 261 | 262 | switch (status) { 263 | case "in_progress": 264 | finish_reason = null; 265 | break; 266 | case "finished_successfully": 267 | let finish_reason_data = 268 | parsed?.message?.metadata?.finish_details?.type ?? null; 269 | switch (finish_reason_data) { 270 | case "max_tokens": 271 | finish_reason = "length"; 272 | break; 273 | case "stop": 274 | default: 275 | finish_reason = "stop"; 276 | } 277 | break; 278 | default: 279 | finish_reason = null; 280 | } 281 | 282 | if (content === "") continue; 283 | 284 | let completionChunk = content.replace(fullContent, ""); 285 | 286 | completionTokens += encode(completionChunk).length; 287 | 288 | if (req.body.stream) { 289 | let response = { 290 | id: requestId, 291 | created: created, 292 | object: "chat.completion.chunk", 293 | model: "gpt-3.5-turbo", 294 | choices: [ 295 | { 296 | delta: { 297 | content: completionChunk, 298 | }, 299 | index: 0, 300 | finish_reason: finish_reason, 301 | }, 302 | ], 303 | }; 304 | 305 | res.write(`data: ${JSON.stringify(response)}\n\n`); 306 | } 307 | 308 | fullContent = content.length > fullContent.length ? content : fullContent; 309 | } 310 | 311 | if (req.body.stream) { 312 | res.write( 313 | `data: ${JSON.stringify({ 314 | id: requestId, 315 | created: created, 316 | object: "chat.completion.chunk", 317 | model: "gpt-3.5-turbo", 318 | choices: [ 319 | { 320 | delta: { 321 | content: error ?? "", 322 | }, 323 | index: 0, 324 | finish_reason: finish_reason, 325 | }, 326 | ], 327 | })}\n\n` 328 | ); 329 | } else { 330 | res.write( 331 | JSON.stringify({ 332 | id: requestId, 333 | created: created, 334 | model: "gpt-3.5-turbo", 335 | object: "chat.completion", 336 | choices: [ 337 | { 338 | finish_reason: finish_reason, 339 | index: 0, 340 | message: { 341 | content: error ?? fullContent, 342 | role: "assistant", 343 | }, 344 | }, 345 | ], 346 | usage: { 347 | prompt_tokens: promptTokens, 348 | completion_tokens: completionTokens, 349 | total_tokens: promptTokens + completionTokens, 350 | }, 351 | }) 352 | ); 353 | } 354 | 355 | res.end(); 356 | } catch (error) { 357 | // console.log("Error:", error.response?.data ?? error.message); 358 | if (!res.headersSent) res.setHeader("Content-Type", "application/json"); 359 | // console.error("Error handling chat completion:", error); 360 | res.write( 361 | JSON.stringify({ 362 | status: false, 363 | error: { 364 | message: 365 | "An error occurred. Please try again. Additionally, ensure that your request complies with OpenAI's policy.", 366 | type: "invalid_request_error", 367 | }, 368 | support: "https://discord.pawan.krd", 369 | }) 370 | ); 371 | res.end(); 372 | } 373 | } 374 | 375 | // Initialize Express app and use middlewares 376 | const app = express(); 377 | app.use(bodyParser.json()); 378 | app.use(enableCORS); 379 | 380 | // Route to handle POST requests for chat completions 381 | app.post("/v1/chat/completions", handleChatCompletion); 382 | 383 | // 404 handler for unmatched routes 384 | app.use((req, res) => 385 | res.status(404).send({ 386 | status: false, 387 | error: { 388 | message: `The requested endpoint (${req.method.toLocaleUpperCase()} ${req.path}) was not found. please make sure what are you doing now.`, 389 | type: "invalid_request_error", 390 | }, 391 | support: "https://discord.pawan.krd", 392 | }) 393 | ); 394 | 395 | async function DownloadCloudflared() { 396 | const platform = os.platform(); 397 | let url; 398 | 399 | if (platform === "win32") { 400 | const arch = os.arch() === "x64" ? "amd64" : "386"; 401 | url = `https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-windows-${arch}.exe`; 402 | } else { 403 | let arch = os.arch(); 404 | switch (arch) { 405 | case "x64": 406 | arch = "amd64"; 407 | break; 408 | case "arm": 409 | case "arm64": 410 | break; 411 | default: 412 | arch = "amd64"; // Default to amd64 if unknown architecture 413 | } 414 | const platformLower = platform.toLowerCase(); 415 | url = `https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-${platformLower}-${arch}`; 416 | } 417 | 418 | const fileName = platform === "win32" ? "cloudflared.exe" : "cloudflared"; 419 | const filePath = path.resolve(fileName); 420 | 421 | if (fs.existsSync(filePath)) { 422 | return filePath; 423 | } 424 | 425 | try { 426 | const response = await axiosInstance({ 427 | method: "get", 428 | url: url, 429 | responseType: "stream", 430 | }); 431 | 432 | const writer = fs.createWriteStream(filePath); 433 | 434 | response.data.pipe(writer); 435 | 436 | return new Promise((resolve, reject) => { 437 | writer.on("finish", () => { 438 | if (platform !== "win32") { 439 | fs.chmodSync(filePath, 0o755); 440 | } 441 | resolve(filePath); 442 | }); 443 | 444 | writer.on("error", reject); 445 | }); 446 | } catch (error) { 447 | return null; 448 | } 449 | } 450 | 451 | async function StartCloudflaredTunnel(cloudflaredPath) { 452 | const localUrl = `http://localhost:${port}`; 453 | return new Promise((resolve, reject) => { 454 | cloudflared = spawn(cloudflaredPath, ["tunnel", "--url", localUrl]); 455 | 456 | cloudflared.stdout.on("data", (data) => { 457 | const output = data.toString(); 458 | 459 | const urlMatch = output.match(/https:\/\/[^\s]+\.trycloudflare\.com/); 460 | if (urlMatch) { 461 | let url = urlMatch[0]; 462 | resolve(url); 463 | } 464 | }); 465 | 466 | cloudflared.stderr.on("data", (data) => { 467 | const output = data.toString(); 468 | 469 | const urlMatch = output.match(/https:\/\/[^\s]+\.trycloudflare\.com/); 470 | if (urlMatch) { 471 | let url = urlMatch[0]; 472 | resolve(url); 473 | } 474 | }); 475 | 476 | cloudflared.on("close", (code) => { 477 | resolve(null); 478 | }); 479 | }); 480 | } 481 | 482 | // Start the server and the session ID refresh loop 483 | app.listen(port, async () => { 484 | if (process.env.CLOUDFLARED === undefined) process.env.CLOUDFLARED = "true"; 485 | let cloudflaredEnabled = process.env.CLOUDFLARED === "true"; 486 | let filePath; 487 | let publicURL; 488 | if (cloudflaredEnabled) { 489 | filePath = await DownloadCloudflared(); 490 | publicURL = await StartCloudflaredTunnel(filePath); 491 | } 492 | 493 | console.log(`💡 Server is running at http://localhost:${port}`); 494 | console.log(); 495 | console.log(`🔗 Local Base URL: http://localhost:${port}/v1`); 496 | console.log(`🔗 Local Endpoint: http://localhost:${port}/v1/chat/completions`); 497 | console.log(); 498 | if (cloudflaredEnabled && publicURL) console.log(`🔗 Public Base URL: ${publicURL}/v1`); 499 | if (cloudflaredEnabled && publicURL) console.log(`🔗 Public Endpoint: ${publicURL}/v1/chat/completions`); 500 | else if (cloudflaredEnabled && !publicURL) { 501 | console.log("🔗 Public Endpoint: (Failed to start cloudflared tunnel, please restart the server.)"); 502 | if (filePath) fs.unlinkSync(filePath); 503 | } 504 | if (cloudflaredEnabled && publicURL) console.log(); 505 | console.log("📝 Author: Pawan.Krd"); 506 | console.log(`🌐 Discord server: https://discord.gg/pawan`); 507 | console.log("🌍 GitHub Repository: https://github.com/PawanOsman/ChatGPT"); 508 | console.log(`💖 Don't forget to star the repository if you like this project!` ); 509 | }); --------------------------------------------------------------------------------