├── bin └── api_proxy ├── LICENSE ├── README.zh.md ├── api_proxy.go └── README.md /bin/api_proxy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nephen/simple-openai-api-proxy/HEAD/bin/api_proxy -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 来风 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.zh.md: -------------------------------------------------------------------------------- 1 | ### 说明 2 | 可以不用搭环境,自己有境外的vps就行,最好是openai支持的地区的vps,下载bin里面的[执行文件](./bin/api_proxy)直接就能跑,最简单的api proxy方式,最重要的是支持SSE,让客户端请求时响应得更加迅速,也提供了golang的源码,需要定制的可以自行完善。 3 | ```sh 4 | ./api_proxy -daemon -port 9000 # 最好开启daemon守护进程模式 5 | ``` 6 | 7 | ### 客户端使用方法 8 | python使用案例: 9 | ```python 10 | import os 11 | import openai 12 | 13 | openai.api_key = YOUR-API-KEY 14 | openai.api_base = "http://host:port/v1" # 一定要加v1 15 | 16 | for resp in openai.ChatCompletion.create( 17 | model="gpt-3.5-turbo", 18 | messages=[ 19 | {"role": "user", "content": "冒泡排序"} 20 | ], 21 | stream = True): # 流式输出,支持SSE 22 | if 'content' in resp.choices[0].delta: 23 | print(resp.choices[0].delta.content, end="", flush=True) # flush及时打印 24 | ``` 25 | 26 | js使用案例,以 https://www.npmjs.com/package/chatgpt 为例: 27 | ```js 28 | chatApi= new gpt.ChatGPTAPI({ 29 | apiKey: 'sk.....:', 30 | apiBaseUrl: "http://host:port", // 传递代理地址 31 | }); 32 | ``` 33 | 34 | (推荐:)服务器使用[ChatGPT-Next-Web](https://github.com/Yidadaa/ChatGPT-Next-Web) 的例子,设置key后,可以设置code密码来访问,api则使用当前的代理,非常好用,参考网页https://gpt.nephen.cn/。 35 | ```sh 36 | docker pull nephen2023/chatgpt-next-web:v1.7.1 37 | docker run -d -p 3000:3000 -e OPENAI_API_KEY="" -e CODE="" -e BASE_URL="ip:port" -e PROTOCOL="http" nephen2023/chatgpt-next-web:v1.7.1 38 | ``` 39 | 40 | ### 支持 41 | ![](https://nephen-blog.oss-cn-beijing.aliyuncs.com/post/20230315130826.png) 42 | -------------------------------------------------------------------------------- /api_proxy.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "log" 6 | "net/http" 7 | "net/http/httputil" 8 | "os" 9 | "os/exec" 10 | "strconv" 11 | ) 12 | 13 | const ( 14 | DAEMON = "daemon" 15 | ) 16 | 17 | var ( 18 | port int 19 | damaen bool 20 | ) 21 | 22 | func init() { 23 | flag.IntVar(&port, "port", 8080, "监听端口") 24 | flag.BoolVar(&damaen, DAEMON, false, "是否后台运行") 25 | } 26 | 27 | func ReverseProxyHandler(w http.ResponseWriter, r *http.Request) { 28 | log.Printf("[*] receive a request from %s, request header: %s: \n", r.RemoteAddr, r.Header) 29 | target := "api.openai.com" 30 | director := func(req *http.Request) { 31 | req.URL.Scheme = "https" 32 | req.URL.Host = target 33 | req.Host = target 34 | } 35 | proxy := &httputil.ReverseProxy{Director: director} 36 | proxy.ServeHTTP(w, r) 37 | log.Printf("[*] receive the destination website response header: %s\n", w.Header()) 38 | } 39 | 40 | func StripSlice(slice []string, element string) []string { 41 | for i := 0; i < len(slice); { 42 | if slice[i] == element && i != len(slice)-1 { 43 | slice = append(slice[:i], slice[i+1:]...) 44 | } else if slice[i] == element && i == len(slice)-1 { 45 | slice = slice[:i] 46 | } else { 47 | i++ 48 | } 49 | } 50 | return slice 51 | } 52 | 53 | func SubProcess(args []string) *exec.Cmd { 54 | cmd := exec.Command(args[0], args[1:]...) 55 | cmd.Stdin = os.Stdin 56 | cmd.Stdout = os.Stdout 57 | cmd.Stderr = os.Stderr 58 | err := cmd.Start() 59 | if err != nil { 60 | log.Printf("[-] Error: %s\n", err) 61 | } 62 | return cmd 63 | } 64 | 65 | func main() { 66 | flag.Parse() 67 | log.Printf("[*] PID: %d PPID: %d ARG: %s\n", os.Getpid(), os.Getppid(), os.Args) 68 | if damaen { 69 | SubProcess(StripSlice(os.Args, "-"+DAEMON)) 70 | log.Printf("[*] Daemon running in PID: %d PPID: %d\n", os.Getpid(), os.Getppid()) 71 | os.Exit(0) 72 | } 73 | log.Printf("[*] Forever running in PID: %d PPID: %d\n", os.Getpid(), os.Getppid()) 74 | log.Printf("[*] Starting server at port %v\n", port) 75 | if err := http.ListenAndServe(":"+strconv.Itoa(port), http.HandlerFunc(ReverseProxyHandler)); err != nil { 76 | log.Fatal(err) 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | > [中文说明](./README.zh.md) 2 | 3 | ### description 4 | You don’t need to set up an environment, just have an overseas vps, preferably a vps in an area supported by openai, download the [executive file] (./bin/api_proxy) in the bin and run it directly, the simplest api proxy method, the most important The most important thing is to support SSE, so that the client can respond more quickly when requesting, and also provides the source code of golang, which can be improved by itself if it needs to be customized. 5 | ```sh 6 | ./api_proxy -daemon -port 9000 # It is best to open the daemon process mode 7 | ``` 8 | 9 | ### How to use the client 10 | Python use cases: 11 | ```python 12 | import os 13 | import openai 14 | 15 | openai.api_key = YOUR-API-KEY 16 | openai.api_base = "http://host:port/v1" # Be sure to add v1 17 | 18 | for resp in openai.ChatCompletion.create( 19 | model="gpt-3.5-turbo", 20 | messages=[ 21 | {"role": "user", "content": "Bubble Sort"} 22 | ], 23 | stream = True): # Streaming output, support SSE 24 | if 'content' in resp.choices[0].delta: 25 | print(resp.choices[0].delta.content, end="", flush=True) # flush prints in time 26 | ``` 27 | 28 | JS use case, Take https://www.npmjs.com/package/chatgpt as an example 29 | ```js 30 | chatApi= new gpt.ChatGPTAPI({ 31 | apiKey: 'sk.....:', 32 | apiBaseUrl: "http://host:port", // delivery proxy address 33 | }); 34 | ``` 35 | 36 | (Recommend:)An example of using [ChatGPT-Next-Web](https://github.com/Yidadaa/ChatGPT-Next-Web) on the server, after setting the key, you can set the code password to access, and the api uses the current proxy, which is very easy to use, refer to the webpage https://gpt.nephen.cn/. 37 | ```sh 38 | docker pull nephen2023/chatgpt-next-web:v1.7.1 39 | docker run -d -p 3000:3000 -e OPENAI_API_KEY="" -e CODE="" -e BASE_URL="ip:port" -e PROTOCOL="http" nephen2023/chatgpt-next-web:v1.7.1 40 | ``` 41 | 42 | ### Suport 43 | ![](https://nephen-blog.oss-cn-beijing.aliyuncs.com/post/20230315130826.png) 44 | --------------------------------------------------------------------------------