├── README.md └── production.js /README.md: -------------------------------------------------------------------------------- 1 | # ChatGPT-CloudFlareWorkers 2 | 3 | **Enjoy ChatGPT on CloudFlare Workers!** 4 | 5 | ## Deploy 6 | 7 | Before deploying your workers, you should add environment variable `API_KEY` and fill it as your OpenAI API KEY. 8 | 9 | ## Usage 10 | 11 | ### Chat 12 | 13 | The request router is `/chat`, and the request method is `POST`. 14 | 15 | You can specify some request parameters through headers: 16 | - temperature 17 | 18 | You can either specify a plain text or a JSON text in the request body. 19 | 20 | If you specify a plain text, it will be automatically converted to a simple chat message. 21 | 22 | If you specify a JSON text, it will be parsed as a chat message. 23 | 24 | [View the description of the chat message](https://platform.openai.com/docs/api-reference/chat) 25 | 26 | ### Images 27 | 28 | The request router is `/images`, and the request method is `POST`. 29 | 30 | You can specify some request parameters through headers: 31 | - n 32 | - size 33 | - response_format 34 | 35 | Specify the prompt in the request body. 36 | -------------------------------------------------------------------------------- /production.js: -------------------------------------------------------------------------------- 1 | export default { 2 | async fetch(request, env) { 3 | if (request.method !== 'POST') { 4 | return new Response('Invalid Method', { status: 404 }) 5 | } 6 | 7 | switch (new URL(request.url).pathname) { 8 | case '/chat': 9 | return this.chat(request, env) 10 | case '/images': 11 | return this.images(request, env) 12 | default: 13 | return new Response('Invalid Router', { status: 404 }) 14 | } 15 | }, 16 | 17 | async chat(request, env) { 18 | let text = await request.text() 19 | let messages 20 | 21 | try { 22 | messages = JSON.parse(text) 23 | } catch { 24 | messages = [{ role: 'user', content: text }] 25 | } 26 | 27 | let temperature = parseFloat(request.headers.get('temperature')) 28 | 29 | if (isNaN(temperature)) { 30 | temperature = 1 31 | } 32 | 33 | let url = 'https://api.openai.com/v1/chat/completions' 34 | let options = { 35 | method: 'POST', 36 | headers: [ 37 | ['Content-Type', 'application/json'], 38 | ['Authorization', `Bearer ${env.API_KEY}`], 39 | ], 40 | body: JSON.stringify( 41 | { 42 | model: 'gpt-3.5-turbo', 43 | messages: messages, 44 | temperature: temperature, 45 | }, 46 | ), 47 | } 48 | let response = await fetch(new Request(url, options)) 49 | let data = JSON.parse(await response.text()) 50 | 51 | if (!data.choices) { 52 | return new Response(data.error.message, { status: 400 }) 53 | } 54 | 55 | return new Response(data.choices[0].message.content.trim()) 56 | }, 57 | 58 | async images(request, env) { 59 | let prompt = await request.text() 60 | let n = parseInt(request.headers.get('n')) 61 | 62 | if (isNaN(n)) { 63 | n = 1 64 | } 65 | 66 | let size = request.headers.get('size') ?? '1024x1024' 67 | let response_format = request.headers.get('response_format') ?? 'url' 68 | let url = 'https://api.openai.com/v1/images/generations' 69 | let options = { 70 | method: 'POST', 71 | headers: [ 72 | ['Content-Type', 'application/json'], 73 | ['Authorization', `Bearer ${env.API_KEY}`], 74 | ], 75 | body: JSON.stringify( 76 | { 77 | prompt: prompt, 78 | n: n, 79 | size: size, 80 | response_format: response_format, 81 | }, 82 | ), 83 | } 84 | let response = await fetch(new Request(url, options)) 85 | let data = JSON.parse(await response.text()) 86 | 87 | if (!data.data) { 88 | return new Response(data.error.message, { status: 400 }) 89 | } 90 | 91 | return new Response(JSON.stringify(data.data)) 92 | }, 93 | } 94 | --------------------------------------------------------------------------------