├── Dockerfile
├── LICENSE
├── .github
└── workflows
│ └── docker_build_push.yml
├── README.md
├── README_en.md
├── cf-openai-azure-proxy.js
├── cf-openai-palm-proxy.js
└── express-openai-azure-proxy.js
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM node:18.12-slim
2 |
3 | WORKDIR /app
4 |
5 | # 安装 Cloudflare Workers CLI 工具
6 | RUN npm install -g wrangler@2.15.0
7 |
8 | ENV WRANGLER_SEND_METRICS=false
9 |
10 | ENV DEPLOY_NAME_GPT35=""
11 | ENV DEPLOY_NAME_GPT4=""
12 |
13 | # 复制 Workers 脚本到镜像
14 | COPY cf-openai-azure-proxy.js .
15 |
16 | # 启动本地开发服务器
17 | CMD wrangler dev cf-openai-azure-proxy.js --local --var RESOURCE_NAME:$RESOURCE_NAME DEPLOY_NAME_GPT35:$DEPLOY_NAME_GPT35 DEPLOY_NAME_GPT4:$DEPLOY_NAME_GPT4
18 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 haibbo
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/.github/workflows/docker_build_push.yml:
--------------------------------------------------------------------------------
1 | name: Build and Push Docker Image
2 |
3 | on:
4 | push:
5 | tags:
6 | - 'v*'
7 |
8 | jobs:
9 | build_and_push:
10 | runs-on: ubuntu-latest
11 |
12 | steps:
13 | - name: Checkout code
14 | uses: actions/checkout@v2
15 |
16 | - name: Set up Docker Buildx
17 | uses: docker/setup-buildx-action@v1
18 |
19 | - name: Login to DockerHub
20 | uses: docker/login-action@v1
21 | with:
22 | username: ${{ secrets.DOCKERHUB_USERNAME }}
23 | password: ${{ secrets.DOCKERHUB_TOKEN }}
24 |
25 | - name: Build and push Docker image
26 | uses: docker/build-push-action@v2
27 | with:
28 | context: .
29 | push: true
30 | tags: haibbo/cf-openai-azure-proxy:${{ github.ref_name }}
31 | - name: Build and push Docker image
32 | uses: docker/build-push-action@v2
33 | with:
34 | context: .
35 | push: true
36 | tags: |
37 | haibbo/cf-openai-azure-proxy:${{ github.ref_name }}
38 | haibbo/cf-openai-azure-proxy:latest
39 | platforms: linux/amd64,linux/arm64/v8
40 | - name: Create GitHub Release
41 | id: create_release
42 | uses: actions/create-release@v1
43 | env:
44 | GITHUB_TOKEN: ${{ secrets.ACTIONS_RELEASE_TOKEN }}
45 | with:
46 | tag_name: ${{ github.ref }}
47 | release_name: Release ${{ github.ref }}
48 | body: |
49 | Release notes for ${{ github.ref }}
50 |
51 | ```sh
52 | docker run -d -p 8787:8787 -t cf-azure-openai-proxy \
53 | --env RESOURCE_NAME=codegpt \
54 | --env DEPLOY_NAME=gpt3 \
55 | --env DEPLOY_NAME_GPT4=gpt4 \
56 | haibbo/cf-openai-azure-proxy:latest
57 | ```
58 |
59 |
60 | Docker image is available at:
61 | - `haibbo/cf-openai-azure-proxy:${{ github.ref_name }}`
62 | - `haibbo/cf-openai-azure-proxy:latest`
63 |
64 | Enjoy!
65 | draft: false
66 | prerelease: false
67 |
68 |
69 |
70 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # cf-openai-azure-proxy
2 |
3 | English |
4 | 中文
5 |
6 | > 大多数 OpenAI 客户端不支持 Azure OpenAI Service,但Azure OpenAI Service的申请和绑卡都非常简单,并且还提供了免费的额度。此脚本使用免费的 Cloudflare Worker 作为代理,使得支持 OpenAI 的客户端可以直接使用 Azure OpenAI Service。
7 |
8 | ### 项目说明:
9 | - 我没有服务器可以使用吗?
10 | - 这段脚本跑在Cloudflare Worker, 不需要服务器, 不需要绑卡, 每天10W次请求 免费
11 | - 我没有自己的域名可以使用吗?
12 | - 也可以, 参考: https://github.com/haibbo/cf-openai-azure-proxy/issues/3
13 | - 实现打印机模式:
14 | - Azure OpenAI Service's 回复是一段一段回复的
15 | - 返回给客户端的时候, 本项目拆出一条条的消息, 依次给, 达到打印机模式
16 | - 项目也支持 Docker 部署(基于 wrangler)
17 |
18 | ### 部署
19 | 代理 OpenAI 的请求到 Azure OpenAI Serivce,代码部署步骤:
20 |
21 | 1. 注册并登录到 Cloudflare 账户
22 | 2. 创建一个新的 Cloudflare Worker
23 | 3. 将 [cf-openai-azure-proxy.js](./cf-openai-azure-proxy.js) 复制并粘贴到 Cloudflare Worker 编辑器中
24 | 4. 通过修改或环境变量调整 resourceName 和 deployment mapper 的值
25 | 5. 保存并部署 Cloudflare Worker
26 | 6. https://github.com/haibbo/cf-openai-azure-proxy/issues/3 **可选**绑定自定义域名: 在 Worker 详情页 -> Trigger -> Custom Domains 中为这个 Worker 添加一个自定义域名
27 |
28 |
29 | ### 使用说明
30 |
31 | 先得到 resourceName 和 deployment mapper, 登录到Azure的后台:
32 |
33 |
34 |
35 | #### 这里有两种做法:
36 | - 直接修改他们的值, 如:
37 | ```js
38 | // The name of your Azure OpenAI Resource.
39 | const resourceName="codegpt"
40 |
41 | // deployment model mapper
42 | const mapper = {
43 | 'gpt-3.5-turbo': 'gpt3',
44 | 'gpt-4': 'gpt4'
45 | };
46 | 其他的map规则直接按这样的格式续写即可
47 | ```
48 | - 或者通过 cloudflare worker 控制台, 进入 Workers script > Settings > Add variable under Environment Variables.
49 |
50 |
51 |
52 | ### 客户端
53 | 以 OpenCat 为例: 自定义 API 域名填写 第六步绑定的域名:
54 |
55 |
56 |
57 | 我已经尝试了多种客户端, 如果遇到其他客户端有问题, 欢迎创建issue.
58 |
--------------------------------------------------------------------------------
/README_en.md:
--------------------------------------------------------------------------------
1 | # cf-openai-azure-proxy
2 |
3 | English |
4 | 中文
5 |
6 | > Most OpenAI clients do not support Azure OpenAI Service, but the application for Azure OpenAI Service is very simple, and it also provides free quotas. This script uses a free Cloudflare Worker as a proxy, allowing OpenAI-supported clients to directly use Azure OpenAI Service.
7 |
8 | This script proxies requests to Azure OpenAI Service for OpenAI clients. The code deployment steps are as follows:
9 |
10 | Register and log in to your Cloudflare account.
11 | - Create a new Cloudflare Worker.
12 | - Copy and paste cf-openai-azure-proxy.js into the Cloudflare Worker editor.
13 | - Adjust the values of **resourceName** and deployment **mapper** by either direct modification or using environment variables..
14 | - Save and deploy the Cloudflare Worker.
15 | - https://github.com/haibbo/cf-openai-azure-proxy/issues/3 Optional: Bind a custom domain name: Add a custom domain name for this worker in the Worker details page -> Trigger -> Custom Domains.
16 |
17 | ## Instructions
18 | First obtain the resourceName and deployment mapper, and log in to the Azure portal:
19 |
20 |
21 |
22 | #### There are two ways to do this:
23 | - Directly modify their values, such as:
24 | ```js
25 | // The name of your Azure OpenAI Resource.
26 | const resourceName="codegpt"
27 |
28 | const mapper:any = {
29 | 'gpt-3.5-turbo': 'gpt3',
30 | 'gpt-4': 'gpt4'
31 | };
32 | ```
33 | Other map rules can be continued directly in this format.
34 | - **OR** go to the Cloudflare Worker console, navigate to Workers script > Settings > Add variable under Environment Variables.
35 |
36 |
37 |
38 | ## Client
39 | Take OpenCat as an example: fill in the custom API domain name with the domain name bound in step 6:
40 |
41 |
42 | I have tried multiple clients. If you encounter problems with other clients, please feel free to create an issue.
43 |
44 | QA:
45 |
46 | - Do I need a server to use this?
47 | - This script runs on Cloudflare Worker and does not require a server or a bound card. It is free for up to 100,000 requests per day.
48 | - Do I need my own domain name to use this?
49 | - No, it is not necessary. Refer to: https://github.com/haibbo/cf-openai-azure-proxy/issues/3
50 |
--------------------------------------------------------------------------------
/cf-openai-azure-proxy.js:
--------------------------------------------------------------------------------
1 | // The name of your Azure OpenAI Resource.
2 | const resourceName=RESOURCE_NAME
3 |
4 | // The deployment name you chose when you deployed the model.
5 | const mapper = {
6 | 'gpt-3.5-turbo': DEPLOY_NAME_GPT35,
7 | 'gpt-4': DEPLOY_NAME_GPT4
8 | };
9 |
10 | const apiVersion="2023-05-15"
11 |
12 | addEventListener("fetch", (event) => {
13 | event.respondWith(handleRequest(event.request));
14 | });
15 |
16 | async function handleRequest(request) {
17 | if (request.method === 'OPTIONS') {
18 | return handleOPTIONS(request)
19 | }
20 |
21 | const url = new URL(request.url);
22 | if (url.pathname.startsWith("//")) {
23 | url.pathname = url.pathname.replace('/',"")
24 | }
25 | if (url.pathname === '/v1/chat/completions') {
26 | var path="chat/completions"
27 | } else if (url.pathname === '/v1/completions') {
28 | var path="completions"
29 | } else if (url.pathname === '/v1/models') {
30 | return handleModels(request)
31 | } else {
32 | return new Response('404 Not Found', { status: 404 })
33 | }
34 |
35 | let body;
36 | if (request.method === 'POST') {
37 | body = await request.json();
38 | }
39 |
40 | const modelName = body?.model;
41 | const deployName = mapper[modelName] || ''
42 |
43 | if (deployName === '') {
44 | return new Response('Missing model mapper', {
45 | status: 403
46 | });
47 | }
48 | const fetchAPI = `https://${resourceName}.openai.azure.com/openai/deployments/${deployName}/${path}?api-version=${apiVersion}`
49 |
50 | const authKey = request.headers.get('Authorization');
51 | if (!authKey) {
52 | return new Response("Not allowed", {
53 | status: 403
54 | });
55 | }
56 |
57 | const payload = {
58 | method: request.method,
59 | headers: {
60 | "Content-Type": "application/json",
61 | "api-key": authKey.replace('Bearer ', ''),
62 | },
63 | body: typeof body === 'object' ? JSON.stringify(body) : '{}',
64 | };
65 |
66 | let response = await fetch(fetchAPI, payload);
67 | response = new Response(response.body, response);
68 | response.headers.set("Access-Control-Allow-Origin", "*");
69 |
70 | if (body?.stream != true){
71 | return response
72 | }
73 |
74 | let { readable, writable } = new TransformStream()
75 | stream(response.body, writable);
76 | return new Response(readable, response);
77 |
78 | }
79 |
80 | function sleep(ms) {
81 | return new Promise(resolve => setTimeout(resolve, ms));
82 | }
83 |
84 | // support printer mode and add newline
85 | async function stream(readable, writable) {
86 | const reader = readable.getReader();
87 | const writer = writable.getWriter();
88 |
89 | // const decoder = new TextDecoder();
90 | const encoder = new TextEncoder();
91 | const decoder = new TextDecoder();
92 | // let decodedValue = decoder.decode(value);
93 | const newline = "\n";
94 | const delimiter = "\n\n"
95 | const encodedNewline = encoder.encode(newline);
96 |
97 | let buffer = "";
98 | while (true) {
99 | let { value, done } = await reader.read();
100 | if (done) {
101 | break;
102 | }
103 | buffer += decoder.decode(value, { stream: true }); // stream: true is important here,fix the bug of incomplete line
104 | let lines = buffer.split(delimiter);
105 |
106 | // Loop through all but the last line, which may be incomplete.
107 | for (let i = 0; i < lines.length - 1; i++) {
108 | await writer.write(encoder.encode(lines[i] + delimiter));
109 | await sleep(20);
110 | }
111 |
112 | buffer = lines[lines.length - 1];
113 | }
114 |
115 | if (buffer) {
116 | await writer.write(encoder.encode(buffer));
117 | }
118 | await writer.write(encodedNewline)
119 | await writer.close();
120 | }
121 |
122 | async function handleModels(request) {
123 | const data = {
124 | "object": "list",
125 | "data": []
126 | };
127 |
128 | for (let key in mapper) {
129 | data.data.push({
130 | "id": key,
131 | "object": "model",
132 | "created": 1677610602,
133 | "owned_by": "openai",
134 | "permission": [{
135 | "id": "modelperm-M56FXnG1AsIr3SXq8BYPvXJA",
136 | "object": "model_permission",
137 | "created": 1679602088,
138 | "allow_create_engine": false,
139 | "allow_sampling": true,
140 | "allow_logprobs": true,
141 | "allow_search_indices": false,
142 | "allow_view": true,
143 | "allow_fine_tuning": false,
144 | "organization": "*",
145 | "group": null,
146 | "is_blocking": false
147 | }],
148 | "root": key,
149 | "parent": null
150 | });
151 | }
152 |
153 | const json = JSON.stringify(data, null, 2);
154 | return new Response(json, {
155 | headers: { 'Content-Type': 'application/json' },
156 | });
157 | }
158 |
159 | async function handleOPTIONS(request) {
160 | return new Response(null, {
161 | headers: {
162 | 'Access-Control-Allow-Origin': '*',
163 | 'Access-Control-Allow-Methods': '*',
164 | 'Access-Control-Allow-Headers': '*'
165 | }
166 | })
167 | }
168 |
169 |
--------------------------------------------------------------------------------
/cf-openai-palm-proxy.js:
--------------------------------------------------------------------------------
1 | // The deployment name you chose when you deployed the model.
2 | const chatmodel = 'chat-bison-001';
3 | const textmodel = 'text-bison-001';
4 |
5 | addEventListener("fetch", (event) => {
6 | event.respondWith(handleRequest(event.request));
7 | });
8 |
9 | async function handleRequest(request) {
10 | if (request.method === 'OPTIONS') {
11 | return handleOPTIONS(request)
12 | }
13 |
14 | const url = new URL(request.url);
15 | if (url.pathname === '/v1/chat/completions') {
16 | var path = "generateMessage"
17 | var deployName = chatmodel;
18 | } else if (url.pathname === '/v1/completions') {
19 | var path = "generateText"
20 | var deployName = textmodel;
21 | } else {
22 | return new Response('404 Not Found', { status: 404 })
23 | }
24 |
25 | let body;
26 | if (request.method === 'POST') {
27 | body = await request.json();
28 | }
29 |
30 | const authKey = request.headers.get('Authorization');
31 | if (!authKey) {
32 | return new Response("Not allowed", { status: 403 });
33 | }
34 |
35 | // Remove 'Bearer ' from the start of authKey
36 | const apiKey = authKey.replace('Bearer ', '');
37 |
38 | const fetchAPI = `https://generativelanguage.googleapis.com/v1beta2/models/${deployName}:${path}?key=${apiKey}`
39 |
40 | // Transform request body from OpenAI to PaLM format
41 | const transformedBody = {
42 | temperature: body?.temperature,
43 | candidateCount: body?.n,
44 | topP: body?.top_p,
45 | prompt: {
46 | context: body?.messages?.find(msg => msg.role === 'system')?.content,
47 | messages: body?.messages?.filter(msg => msg.role !== 'system').map(msg => ({
48 | // author: msg.role === 'user' ? '0' : '1',
49 | content: msg.content,
50 | })),
51 | },
52 | };
53 |
54 | const payload = {
55 | method: request.method,
56 | headers: {
57 | "Content-Type": "application/json",
58 | },
59 | body: JSON.stringify(transformedBody),
60 | };
61 |
62 | const response = await fetch(fetchAPI, payload);
63 | const palmData = await response.json();
64 |
65 | // Transform response from PaLM to OpenAI format
66 | const transformedResponse = transformResponse(palmData);
67 |
68 | if (body?.stream != true){
69 | return new Response(JSON.stringify(transformedResponse), {
70 | headers: {'Content-Type': 'application/json',
71 | 'Access-Control-Allow-Origin': '*',
72 | 'Access-Control-Allow-Methods': '*',
73 | 'Access-Control-Allow-Headers': '*' }
74 | });
75 | } else {
76 | let { readable, writable } = new TransformStream();
77 | streamResponse(transformedResponse, writable);
78 | return new Response(readable, {
79 | headers: {'Content-Type': 'text/event-stream',
80 | 'Access-Control-Allow-Origin': '*',
81 | 'Access-Control-Allow-Methods': '*',
82 | 'Access-Control-Allow-Headers': '*' }
83 | });
84 | }
85 | }
86 |
87 | function streamResponse(response, writable) {
88 | let encoder = new TextEncoder();
89 | let writer = writable.getWriter();
90 |
91 | let content = response.choices[0].message.content;
92 |
93 | // Split the content into chunks, and send each chunk as a separate event
94 | let chunks = content.match(/\s+|\S+/g) || [];
95 | chunks.forEach((chunk, i) => {
96 | let chunkResponse = {
97 | ...response,
98 | object: "chat.completion.chunk",
99 | choices: [{
100 | index: response.choices[0].index,
101 | delta: { ...response.choices[0].message, content: chunk },
102 | finish_reason: i === chunks.length - 1 ? 'stop' : null // Set 'stop' for the last chunk
103 | }],
104 | usage: null
105 | };
106 |
107 | writer.write(encoder.encode(`data: ${JSON.stringify(chunkResponse)}\n\n`));
108 | });
109 |
110 | // Write the done signal
111 | writer.write(encoder.encode(`data: [DONE]\n`));
112 |
113 | writer.close();
114 | }
115 |
116 |
117 | // Function to transform the response
118 | function transformResponse(palmData) {
119 | // Check if the 'candidates' array exists and if it's not empty
120 | if (!palmData.candidates || palmData.candidates.length === 0) {
121 | // If it doesn't exist or is empty, create a default candidate message
122 | palmData.candidates = [
123 | {
124 | "author": "1",
125 | "content": "Ooops, the model returned nothing"
126 | }
127 | ];
128 | }
129 |
130 | return {
131 | id: "chatcmpl-QXlha2FBbmROaXhpZUFyZUF3ZXNvbWUK",
132 | object: 'chat.completion',
133 | created: Math.floor(Date.now() / 1000), // Current Unix timestamp
134 | model: 'gpt-3.5-turbo', // Static model name
135 | usage: {
136 | prompt_tokens: palmData.messages.length, // This is a placeholder. Replace with actual token count if available
137 | completion_tokens: palmData.candidates.length, // This is a placeholder. Replace with actual token count if available
138 | total_tokens: palmData.messages.length + palmData.candidates.length, // This is a placeholder. Replace with actual token count if available
139 | },
140 | choices: palmData.candidates.map((candidate, index) => ({
141 | message: {
142 | role: 'assistant',
143 | content: candidate.content,
144 | },
145 | finish_reason: 'stop', // Static finish reason
146 | index: index,
147 | })),
148 | };
149 | }
150 |
151 | async function handleOPTIONS(request) {
152 | return new Response("pong", {
153 | headers: {
154 | 'Access-Control-Allow-Origin': '*',
155 | 'Access-Control-Allow-Methods': '*',
156 | 'Access-Control-Allow-Headers': '*'
157 | }
158 | })
159 | }
160 |
--------------------------------------------------------------------------------
/express-openai-azure-proxy.js:
--------------------------------------------------------------------------------
1 | /*!
2 | * This is the main file for the Azure GPT Proxy server.
3 | * 该文件是 Azure GPT 代理服务器的主文件。
4 | *
5 | * It sets up an Express server that proxies requests to the OpenAI GPT API.
6 | * 它设置了一个 Express 服务器,将请求代理到 OpenAI GPT API。
7 | *
8 | * The server listens on the specified port (default 3000) and proxies requests to the OpenAI API.
9 | * 服务器监听指定的端口(默认为 3000),并将请求代理到 OpenAI API。
10 | *
11 | * The server also provides a /v1/models endpoint that returns a list of available models.
12 | * 服务器还提供了一个 /v1/models 端点,返回可用模型的列表。
13 | *
14 | * @file This file contains the main server code.
15 | * @file 该文件包含主服务器代码。
16 | * @module index
17 | * @author ShinChven
18 | */
19 |
20 | const express = require('express');
21 | const axios = require('axios');
22 | const { Transform, Readable } = require('stream');
23 | const { TextDecoder, TextEncoder } = require('util');
24 |
25 | const app = express();
26 | const port = process.env.PORT || 3000;
27 |
28 | const resourceName = 'openai'; // change to your resource name
29 | // 资源名称,请更改为您的资源名称
30 |
31 | const mapper = {
32 | 'gpt-3.5-turbo': 'gpt-35-turbo', // change to your deployment name
33 | 'gpt-3.5-turbo-16k': 'gpt-35-turbo-16k', // change to your deployment name
34 | };
35 | // 映射器,将模型名称映射到部署名称
36 | // 请更改为您的映射器
37 |
38 | const getModelDeployName = (name) => mapper[name] || 'gpt-35-turbo-16k';
39 | // 获取模型的部署名称
40 | // 请更改为您的部署名称
41 |
42 | const apiVersion = '2023-05-15'; // change api version if needed
43 | // API 版本,请根据需要更改
44 |
45 | app.use(express.json());
46 |
47 | /**
48 | * Handle requests to /v1/chat/completions endpoint
49 | * 处理 /v1/chat/completions 端点的请求
50 | */
51 | app.all('/v1/chat/completions', async (req, res) => {
52 | const fetchAPI = getFetchAPI(req, '/chat/completions');
53 | const response = await axios(getPayload(req, res, fetchAPI));
54 | handleResponse(req, res, response);
55 | });
56 |
57 | /**
58 | * Handle requests to /v1/completions endpoint
59 | * 处理 /v1/completions 端点的请求
60 | */
61 | app.all('/v1/completions', async (req, res) => {
62 | const fetchAPI = getFetchAPI(req, '/completions');
63 | const response = await axios(getPayload(req, res, fetchAPI));
64 | handleResponse(req, res, response);
65 | });
66 |
67 | /**
68 | * Handle requests to /v1/models endpoint
69 | * 处理 /v1/models 端点的请求
70 | */
71 | app.all('/v1/models', async (req, res) => {
72 | const data = {
73 | object: 'list',
74 | data: [],
75 | };
76 |
77 | for (let key in mapper) {
78 | data.data.push({
79 | id: key,
80 | object: 'model',
81 | created: 1677610602,
82 | owned_by: 'openai',
83 | permission: [
84 | {
85 | id: 'modelperm-M56FXnG1AsIr3SXq8BYPvXJA',
86 | object: 'model_permission',
87 | created: 1679602088,
88 | allow_create_engine: false,
89 | allow_sampling: true,
90 | allow_logprobs: true,
91 | allow_search_indices: false,
92 | allow_view: true,
93 | allow_fine_tuning: false,
94 | organization: '*',
95 | group: null,
96 | is_blocking: false,
97 | },
98 | ],
99 | root: key,
100 | parent: null,
101 | });
102 | }
103 |
104 | const json = JSON.stringify(data, null, 2);
105 | res.setHeader('Content-Type', 'application/json');
106 | res.send(json);
107 | });
108 |
109 | /**
110 | * Handle all other requests
111 | * 处理所有其他请求
112 | */
113 | app.all('*', (req, res) => {
114 | res.status(404).send('404 Not Found');
115 | });
116 |
117 | /**
118 | * Get the API endpoint URL for the given request
119 | * 获取给定请求的 API 端点 URL
120 | * @param {Object} req - The request object
121 | * @param {string} path - The path to the endpoint
122 | * @returns {string} - The API endpoint URL
123 | */
124 | function getFetchAPI(req, path) {
125 | const modelName = req.body?.model;
126 | const deployName = getModelDeployName(modelName);
127 |
128 | if (deployName === '') {
129 | res.status(403).send('Missing model mapper');
130 | }
131 |
132 | return `https://${resourceName}.openai.azure.com/openai/deployments/${deployName}${path}?api-version=${apiVersion}`;
133 | }
134 |
135 | /**
136 | * Get the payload for the given request
137 | * 获取给定请求的有效载荷
138 | * @param {Object} req - The request object
139 | * @param {Object} res - The response object
140 | * @param {string} url - The API endpoint URL
141 | * @returns {Object} - The request payload
142 | */
143 | function getPayload(req, res, url) {
144 | const authKey = req.headers.authorization;
145 | if (!authKey) {
146 | res.status(403).send('Not allowed');
147 | }
148 |
149 | return {
150 | method: req.method,
151 | url,
152 | headers: {
153 | 'Content-Type': 'application/json',
154 | 'api-key': authKey.replace('Bearer ', ''),
155 | },
156 | data: JSON.stringify(req.body) || '{}',
157 | responseType: 'stream',
158 | };
159 | }
160 |
161 | /**
162 | * Handle the response from the API
163 | * 处理来自 API 的响应
164 | * @param {Object} req - The request object
165 | * @param {Object} res - The response object
166 | * @param {Object} axiosResponse - The response from the API
167 | */
168 | async function handleResponse(req, res, axiosResponse) {
169 | // Set headers and status
170 | res.setHeader('Access-Control-Allow-Origin', '*');
171 | res.setHeader('Content-Type', 'text/event-stream');
172 | res.status(axiosResponse.status);
173 |
174 | // Pipe the response stream directly into the response
175 | axiosResponse.data.pipe(res);
176 | }
177 |
178 | /**
179 | * Handle uncaught exceptions
180 | * 处理未捕获的异常
181 | */
182 | process.on('uncaughtException', (err) => {
183 | console.error(err);
184 | });
185 |
186 | app.listen(port, () => {
187 | console.log(`Server listening on port ${port}`);
188 | });
189 |
--------------------------------------------------------------------------------