├── .github
└── workflows
│ └── docker_build_push.yml
├── Dockerfile
├── LICENSE
├── README.md
├── README_en.md
├── cf-openai-azure-proxy.js
└── cf-openai-palm-proxy.js
/.github/workflows/docker_build_push.yml:
--------------------------------------------------------------------------------
1 | name: Build and Push Docker Image
2 |
3 | on:
4 | push:
5 | tags:
6 | - 'v*'
7 |
8 | jobs:
9 | build_and_push:
10 | runs-on: ubuntu-latest
11 |
12 | steps:
13 | - name: Checkout code
14 | uses: actions/checkout@v2
15 |
16 | - name: Set up Docker Buildx
17 | uses: docker/setup-buildx-action@v1
18 |
19 | - name: Login to DockerHub
20 | uses: docker/login-action@v1
21 | with:
22 | username: ${{ secrets.DOCKERHUB_USERNAME }}
23 | password: ${{ secrets.DOCKERHUB_TOKEN }}
24 |
25 | - name: Build and push Docker image
26 | uses: docker/build-push-action@v2
27 | with:
28 | context: .
29 | push: true
30 | tags: haibbo/cf-openai-azure-proxy:${{ github.ref_name }}
31 | - name: Build and push Docker image
32 | uses: docker/build-push-action@v2
33 | with:
34 | context: .
35 | push: true
36 | tags: |
37 | haibbo/cf-openai-azure-proxy:${{ github.ref_name }}
38 | haibbo/cf-openai-azure-proxy:latest
39 | platforms: linux/amd64,linux/arm64/v8
40 | - name: Create GitHub Release
41 | id: create_release
42 | uses: actions/create-release@v1
43 | env:
44 | GITHUB_TOKEN: ${{ secrets.ACTIONS_RELEASE_TOKEN }}
45 | with:
46 | tag_name: ${{ github.ref }}
47 | release_name: Release ${{ github.ref }}
48 | body: |
49 | Release notes for ${{ github.ref }}
50 |
51 | ```sh
52 | docker run -d -p 8787:8787 -t cf-azure-openai-proxy \
53 | --env RESOURCE_NAME=codegpt \
54 | --env DEPLOY_NAME=gpt3 \
55 | --env DEPLOY_NAME_GPT4=gpt4 \
56 | haibbo/cf-openai-azure-proxy:latest
57 | ```
58 |
59 |
60 | Docker image is available at:
61 | - `haibbo/cf-openai-azure-proxy:${{ github.ref_name }}`
62 | - `haibbo/cf-openai-azure-proxy:latest`
63 |
64 | Enjoy!
65 | draft: false
66 | prerelease: false
67 |
68 |
69 |
70 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM node:18.12-slim
2 |
3 | WORKDIR /app
4 |
5 | # 安装 Cloudflare Workers CLI 工具
6 | RUN npm install -g wrangler@2.15.0
7 |
8 | ENV WRANGLER_SEND_METRICS=false
9 |
10 | ENV DEPLOY_NAME_GPT35=""
11 | ENV DEPLOY_NAME_GPT4=""
12 |
13 | # 复制 Workers 脚本到镜像
14 | COPY cf-openai-azure-proxy.js .
15 |
16 | # 启动本地开发服务器
17 | CMD wrangler dev cf-openai-azure-proxy.js --local --var RESOURCE_NAME:$RESOURCE_NAME DEPLOY_NAME_GPT35:$DEPLOY_NAME_GPT35 DEPLOY_NAME_GPT4:$DEPLOY_NAME_GPT4
18 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 haibbo
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # cf-openai-azure-proxy
2 |
3 | English |
4 | 中文
5 |
6 | > 大多数 OpenAI 客户端不支持 Azure OpenAI Service,但Azure OpenAI Service的申请和绑卡都非常简单,并且还提供了免费的额度。此脚本使用免费的 Cloudflare Worker 作为代理,使得支持 OpenAI 的客户端可以直接使用 Azure OpenAI Service。
7 |
8 | ### 支持模型:
9 | - GPT-3
10 | - GPT-4
11 | - DALL-E-3
12 |
13 | 模型子类添加非常容易, 参考下面的使用说明
14 |
15 | ### 项目说明:
16 | - 我没有服务器可以使用吗?
17 | - 这段脚本跑在Cloudflare Worker, 不需要服务器, 不需要绑卡, 每天10W次请求 免费
18 | - 我没有自己的域名可以使用吗?
19 | - 也可以, 参考: https://github.com/haibbo/cf-openai-azure-proxy/issues/3
20 | - 实现打印机模式:
21 | - Azure OpenAI Service's 回复是一段一段回复的
22 | - 返回给客户端的时候, 本项目拆出一条条的消息, 依次给, 达到打印机模式
23 | - 项目也支持 Docker 部署(基于 wrangler)
24 |
25 | ### 部署
26 | 代理 OpenAI 的请求到 Azure OpenAI Serivce,代码部署步骤:
27 |
28 | 1. 注册并登录到 Cloudflare 账户
29 | 2. 创建一个新的 Cloudflare Worker
30 | 3. 将 [cf-openai-azure-proxy.js](./cf-openai-azure-proxy.js) 复制并粘贴到 Cloudflare Worker 编辑器中
31 | 4. 通过修改或环境变量调整 resourceName 和 deployment mapper 的值
32 | 5. 保存并部署 Cloudflare Worker
33 | 6. https://github.com/haibbo/cf-openai-azure-proxy/issues/3 **可选**绑定自定义域名: 在 Worker 详情页 -> Trigger -> Custom Domains 中为这个 Worker 添加一个自定义域名
34 |
35 |
36 | ### 使用说明
37 |
38 | 先得到 resourceName 和 deployment mapper, 登录到Azure的后台:
39 |
40 |
41 |
42 | #### 这里有两种做法:
43 | - 直接修改他们的值, 如:
44 | ```js
45 | // The name of your Azure OpenAI Resource.
46 | const resourceName="codegpt"
47 |
48 | // deployment model mapper
49 | const mapper = {
50 | 'gpt-3.5-turbo': 'gpt3',
51 | 'gpt-4': 'gpt4',
52 | 'dall-e-3': 'dalle3'
53 | };
54 | 其他的map规则直接按这样的格式续写即可
55 | ```
56 | - 或者通过 cloudflare worker 控制台, 进入 Workers script > Settings > Add variable under Environment Variables.
57 |
58 |
59 |
60 | ### 客户端
61 | 以 OpenCat 为例: 自定义 API 域名填写 第六步绑定的域名:
62 |
63 |
64 |
65 | 我已经尝试了多种客户端, 如果遇到其他客户端有问题, 欢迎创建issue.
66 |
--------------------------------------------------------------------------------
/README_en.md:
--------------------------------------------------------------------------------
1 | # cf-openai-azure-proxy
2 |
3 | English |
4 | 中文
5 |
6 | > Most OpenAI clients do not support Azure OpenAI Service, but the application for Azure OpenAI Service is very simple, and it also provides free quotas. This script uses a free Cloudflare Worker as a proxy, allowing OpenAI-supported clients to directly use Azure OpenAI Service.
7 |
8 | This script proxies requests to Azure OpenAI Service for OpenAI clients. The code deployment steps are as follows:
9 |
10 | Register and log in to your Cloudflare account.
11 | - Create a new Cloudflare Worker.
12 | - Copy and paste cf-openai-azure-proxy.js into the Cloudflare Worker editor.
13 | - Adjust the values of **resourceName** and deployment **mapper** by either direct modification or using environment variables..
14 | - Save and deploy the Cloudflare Worker.
15 | - https://github.com/haibbo/cf-openai-azure-proxy/issues/3 Optional: Bind a custom domain name: Add a custom domain name for this worker in the Worker details page -> Trigger -> Custom Domains.
16 |
17 | ## Instructions
18 | First obtain the resourceName and deployment mapper, and log in to the Azure portal:
19 |
20 |
21 |
22 | #### There are two ways to do this:
23 | - Directly modify their values, such as:
24 | ```js
25 | // The name of your Azure OpenAI Resource.
26 | const resourceName="codegpt"
27 |
28 | const mapper:any = {
29 | 'gpt-3.5-turbo': 'gpt3',
30 | 'gpt-4': 'gpt4'
31 | };
32 | ```
33 | Other map rules can be continued directly in this format.
34 | - **OR** go to the Cloudflare Worker console, navigate to Workers script > Settings > Add variable under Environment Variables.
35 |
36 |
37 |
38 | ## Client
39 | Take OpenCat as an example: fill in the custom API domain name with the domain name bound in step 6:
40 |
41 |
42 | I have tried multiple clients. If you encounter problems with other clients, please feel free to create an issue.
43 |
44 | QA:
45 |
46 | - Do I need a server to use this?
47 | - This script runs on Cloudflare Worker and does not require a server or a bound card. It is free for up to 100,000 requests per day.
48 | - Do I need my own domain name to use this?
49 | - No, it is not necessary. Refer to: https://github.com/haibbo/cf-openai-azure-proxy/issues/3
50 |
--------------------------------------------------------------------------------
/cf-openai-azure-proxy.js:
--------------------------------------------------------------------------------
1 | // The name of your Azure OpenAI Resource.
2 | const resourceName=RESOURCE_NAME
3 |
4 | // The deployment name you chose when you deployed the model.
5 | const mapper = {
6 | 'gpt-3.5-turbo': DEPLOY_NAME_GPT35,
7 | 'gpt-3.5-turbo-0613': DEPLOY_NAME_GPT35,
8 | 'gpt-3.5-turbo-1106': DEPLOY_NAME_GPT35,
9 | 'gpt-3.5-turbo-16k': DEPLOY_NAME_GPT35,
10 | 'gpt-4': DEPLOY_NAME_GPT4,
11 | 'gpt-4-0613': DEPLOY_NAME_GPT4,
12 | 'gpt-4-1106-preview': DEPLOY_NAME_GPT4,
13 | 'gpt-4-32k': DEPLOY_NAME_GPT4,
14 | 'dall-e-3': typeof DEPLOY_NAME_DALLE3 !== 'undefined' ? DEPLOY_NAME_DALLE3 : "dalle3",
15 | };
16 |
17 | const apiVersion="2023-12-01-preview"
18 |
19 | addEventListener("fetch", (event) => {
20 | event.respondWith(handleRequest(event.request));
21 | });
22 |
23 | async function handleRequest(request) {
24 | if (request.method === 'OPTIONS') {
25 | return handleOPTIONS(request)
26 | }
27 |
28 | const url = new URL(request.url);
29 | if (url.pathname.startsWith("//")) {
30 | url.pathname = url.pathname.replace('/',"")
31 | }
32 | if (url.pathname === '/v1/chat/completions') {
33 | var path="chat/completions"
34 | } else if (url.pathname === '/v1/images/generations') {
35 | var path="images/generations"
36 | } else if (url.pathname === '/v1/completions') {
37 | var path="completions"
38 | } else if (url.pathname === '/v1/models') {
39 | return handleModels(request)
40 | } else {
41 | return new Response('404 Not Found', { status: 404 })
42 | }
43 |
44 | let body;
45 | if (request.method === 'POST') {
46 | body = await request.json();
47 | }
48 |
49 | const modelName = body?.model;
50 | const deployName = mapper[modelName] || ''
51 |
52 | if (deployName === '') {
53 | return new Response('Missing model mapper', {
54 | status: 403
55 | });
56 | }
57 | const fetchAPI = `https://${resourceName}.openai.azure.com/openai/deployments/${deployName}/${path}?api-version=${apiVersion}`
58 |
59 | const authKey = request.headers.get('Authorization');
60 | if (!authKey) {
61 | return new Response("Not allowed", {
62 | status: 403
63 | });
64 | }
65 |
66 | const payload = {
67 | method: request.method,
68 | headers: {
69 | "Content-Type": "application/json",
70 | "api-key": authKey.replace('Bearer ', ''),
71 | },
72 | body: typeof body === 'object' ? JSON.stringify(body) : '{}',
73 | };
74 |
75 | let response = await fetch(fetchAPI, payload);
76 | response = new Response(response.body, response);
77 | response.headers.set("Access-Control-Allow-Origin", "*");
78 |
79 | if (body?.stream != true){
80 | return response
81 | }
82 |
83 | let { readable, writable } = new TransformStream()
84 | stream(response.body, writable);
85 | return new Response(readable, response);
86 |
87 | }
88 |
89 | function sleep(ms) {
90 | return new Promise(resolve => setTimeout(resolve, ms));
91 | }
92 |
93 | // support printer mode and add newline
94 | async function stream(readable, writable) {
95 | const reader = readable.getReader();
96 | const writer = writable.getWriter();
97 |
98 | // const decoder = new TextDecoder();
99 | const encoder = new TextEncoder();
100 | const decoder = new TextDecoder();
101 | // let decodedValue = decoder.decode(value);
102 | const newline = "\n";
103 | const delimiter = "\n\n"
104 | const encodedNewline = encoder.encode(newline);
105 |
106 | let buffer = "";
107 | while (true) {
108 | let { value, done } = await reader.read();
109 | if (done) {
110 | break;
111 | }
112 | buffer += decoder.decode(value, { stream: true }); // stream: true is important here,fix the bug of incomplete line
113 | let lines = buffer.split(delimiter);
114 |
115 | // Loop through all but the last line, which may be incomplete.
116 | for (let i = 0; i < lines.length - 1; i++) {
117 | await writer.write(encoder.encode(lines[i] + delimiter));
118 | await sleep(20);
119 | }
120 |
121 | buffer = lines[lines.length - 1];
122 | }
123 |
124 | if (buffer) {
125 | await writer.write(encoder.encode(buffer));
126 | }
127 | await writer.write(encodedNewline)
128 | await writer.close();
129 | }
130 |
131 | async function handleModels(request) {
132 | const data = {
133 | "object": "list",
134 | "data": []
135 | };
136 |
137 | for (let key in mapper) {
138 | data.data.push({
139 | "id": key,
140 | "object": "model",
141 | "created": 1677610602,
142 | "owned_by": "openai",
143 | "permission": [{
144 | "id": "modelperm-M56FXnG1AsIr3SXq8BYPvXJA",
145 | "object": "model_permission",
146 | "created": 1679602088,
147 | "allow_create_engine": false,
148 | "allow_sampling": true,
149 | "allow_logprobs": true,
150 | "allow_search_indices": false,
151 | "allow_view": true,
152 | "allow_fine_tuning": false,
153 | "organization": "*",
154 | "group": null,
155 | "is_blocking": false
156 | }],
157 | "root": key,
158 | "parent": null
159 | });
160 | }
161 |
162 | const json = JSON.stringify(data, null, 2);
163 | return new Response(json, {
164 | headers: { 'Content-Type': 'application/json' },
165 | });
166 | }
167 |
168 | async function handleOPTIONS(request) {
169 | return new Response(null, {
170 | headers: {
171 | 'Access-Control-Allow-Origin': '*',
172 | 'Access-Control-Allow-Methods': '*',
173 | 'Access-Control-Allow-Headers': '*'
174 | }
175 | })
176 | }
177 |
178 |
--------------------------------------------------------------------------------
/cf-openai-palm-proxy.js:
--------------------------------------------------------------------------------
1 | // The deployment name you chose when you deployed the model.
2 | const chatmodel = 'chat-bison-001';
3 | const textmodel = 'text-bison-001';
4 |
5 | addEventListener("fetch", (event) => {
6 | event.respondWith(handleRequest(event.request));
7 | });
8 |
9 | async function handleRequest(request) {
10 | if (request.method === 'OPTIONS') {
11 | return handleOPTIONS(request)
12 | }
13 |
14 | const url = new URL(request.url);
15 | if (url.pathname === '/v1/chat/completions') {
16 | var path = "generateMessage"
17 | var deployName = chatmodel;
18 | } else if (url.pathname === '/v1/completions') {
19 | var path = "generateText"
20 | var deployName = textmodel;
21 | } else {
22 | return new Response('404 Not Found', { status: 404 })
23 | }
24 |
25 | let body;
26 | if (request.method === 'POST') {
27 | body = await request.json();
28 | }
29 |
30 | const authKey = request.headers.get('Authorization');
31 | if (!authKey) {
32 | return new Response("Not allowed", { status: 403 });
33 | }
34 |
35 | // Remove 'Bearer ' from the start of authKey
36 | const apiKey = authKey.replace('Bearer ', '');
37 |
38 | const fetchAPI = `https://generativelanguage.googleapis.com/v1beta2/models/${deployName}:${path}?key=${apiKey}`
39 |
40 | // Transform request body from OpenAI to PaLM format
41 | const transformedBody = {
42 | temperature: body?.temperature,
43 | candidateCount: body?.n,
44 | topP: body?.top_p,
45 | prompt: {
46 | context: body?.messages?.find(msg => msg.role === 'system')?.content,
47 | messages: body?.messages?.filter(msg => msg.role !== 'system').map(msg => ({
48 | // author: msg.role === 'user' ? '0' : '1',
49 | content: msg.content,
50 | })),
51 | },
52 | };
53 |
54 | const payload = {
55 | method: request.method,
56 | headers: {
57 | "Content-Type": "application/json",
58 | },
59 | body: JSON.stringify(transformedBody),
60 | };
61 |
62 | const response = await fetch(fetchAPI, payload);
63 | const palmData = await response.json();
64 |
65 | // Transform response from PaLM to OpenAI format
66 | const transformedResponse = transformResponse(palmData);
67 |
68 | if (body?.stream != true){
69 | return new Response(JSON.stringify(transformedResponse), {
70 | headers: {'Content-Type': 'application/json',
71 | 'Access-Control-Allow-Origin': '*',
72 | 'Access-Control-Allow-Methods': '*',
73 | 'Access-Control-Allow-Headers': '*' }
74 | });
75 | } else {
76 | let { readable, writable } = new TransformStream();
77 | streamResponse(transformedResponse, writable);
78 | return new Response(readable, {
79 | headers: {'Content-Type': 'text/event-stream',
80 | 'Access-Control-Allow-Origin': '*',
81 | 'Access-Control-Allow-Methods': '*',
82 | 'Access-Control-Allow-Headers': '*' }
83 | });
84 | }
85 | }
86 |
87 | function streamResponse(response, writable) {
88 | let encoder = new TextEncoder();
89 | let writer = writable.getWriter();
90 |
91 | let content = response.choices[0].message.content;
92 |
93 | // Split the content into chunks, and send each chunk as a separate event
94 | let chunks = content.match(/\s+|\S+/g) || [];
95 | chunks.forEach((chunk, i) => {
96 | let chunkResponse = {
97 | ...response,
98 | object: "chat.completion.chunk",
99 | choices: [{
100 | index: response.choices[0].index,
101 | delta: { ...response.choices[0].message, content: chunk },
102 | finish_reason: i === chunks.length - 1 ? 'stop' : null // Set 'stop' for the last chunk
103 | }],
104 | usage: null
105 | };
106 |
107 | writer.write(encoder.encode(`data: ${JSON.stringify(chunkResponse)}\n\n`));
108 | });
109 |
110 | // Write the done signal
111 | writer.write(encoder.encode(`data: [DONE]\n`));
112 |
113 | writer.close();
114 | }
115 |
116 |
117 | // Function to transform the response
118 | function transformResponse(palmData) {
119 | // Check if the 'candidates' array exists and if it's not empty
120 | if (!palmData.candidates || palmData.candidates.length === 0) {
121 | // If it doesn't exist or is empty, create a default candidate message
122 | palmData.candidates = [
123 | {
124 | "author": "1",
125 | "content": "Ooops, the model returned nothing"
126 | }
127 | ];
128 | }
129 |
130 | return {
131 | id: "chatcmpl-QXlha2FBbmROaXhpZUFyZUF3ZXNvbWUK",
132 | object: 'chat.completion',
133 | created: Math.floor(Date.now() / 1000), // Current Unix timestamp
134 | model: 'gpt-3.5-turbo', // Static model name
135 | usage: {
136 | prompt_tokens: palmData.messages.length, // This is a placeholder. Replace with actual token count if available
137 | completion_tokens: palmData.candidates.length, // This is a placeholder. Replace with actual token count if available
138 | total_tokens: palmData.messages.length + palmData.candidates.length, // This is a placeholder. Replace with actual token count if available
139 | },
140 | choices: palmData.candidates.map((candidate, index) => ({
141 | message: {
142 | role: 'assistant',
143 | content: candidate.content,
144 | },
145 | finish_reason: 'stop', // Static finish reason
146 | index: index,
147 | })),
148 | };
149 | }
150 |
151 | async function handleOPTIONS(request) {
152 | return new Response("pong", {
153 | headers: {
154 | 'Access-Control-Allow-Origin': '*',
155 | 'Access-Control-Allow-Methods': '*',
156 | 'Access-Control-Allow-Headers': '*'
157 | }
158 | })
159 | }
160 |
--------------------------------------------------------------------------------