├── tests ├── .gitignore ├── conftest.py ├── utils.py ├── test_http.py ├── test_env.py └── test_api.py ├── .vercelignore ├── openai_forward ├── content │ ├── __init__.py │ ├── whisper.py │ └── chat.py ├── routers │ ├── __init__.py │ ├── v1.py │ └── schemas.py ├── __init__.py ├── app.py ├── openai.py ├── __main__.py ├── anthropic.py ├── config.py ├── base.py ├── tool.py └── nai.py ├── render.yaml ├── .github ├── data │ └── whisper.m4a ├── workflows │ ├── python-publish.yml │ ├── gh-release.yml │ ├── docker-publish.yml │ └── ci.yml └── images │ └── jetbrains.svg ├── requirements.txt ├── .env ├── .dockerignore ├── Examples ├── embedding.py ├── chat.py └── whisper.py ├── vercel.json ├── pytest.ini ├── .pre-commit-config.yaml ├── .env.example ├── docker-compose.yaml ├── _worker.js ├── scripts └── black.sh ├── Dockerfile ├── LICENSE ├── Makefile ├── pyproject.toml ├── CODE_OF_CONDUCT.md ├── .gitignore ├── CONTRIBUTING.md ├── deploy.md └── README.md /tests/.gitignore: -------------------------------------------------------------------------------- 1 | api_test.py -------------------------------------------------------------------------------- /.vercelignore: -------------------------------------------------------------------------------- 1 | **/* 2 | !vercel.json -------------------------------------------------------------------------------- /openai_forward/content/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /openai_forward/routers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /render.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | - name: openai-forward 3 | type: web 4 | env: docker -------------------------------------------------------------------------------- /.github/data/whisper.m4a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CharHubAI/proxy/HEAD/.github/data/whisper.m4a -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | sys.path.append(os.path.join(os.path.dirname(__file__), "..")) 5 | -------------------------------------------------------------------------------- /openai_forward/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.3.0-alpha" 2 | 3 | from dotenv import load_dotenv 4 | 5 | load_dotenv() 6 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | loguru 3 | sparrow-python>=0.1.3 4 | fastapi>=0.94.0 5 | uvicorn>=0.21.0 6 | orjson 7 | python-dotenv 8 | httpx 9 | pytz -------------------------------------------------------------------------------- /.env: -------------------------------------------------------------------------------- 1 | LOG_CHAT=false 2 | OPENAI_BASE_URL=https://api.openai.com 3 | OPENAI_API_KEY= 4 | FORWARD_KEY= 5 | ROUTE_PREFIX= 6 | 7 | # 设定时区 8 | TZ=Asia/Shanghai -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | **/* 2 | !openai_forward 3 | !pyproject.toml 4 | !README.md 5 | !requirements.txt 6 | !cert.pem 7 | !fullchain.pem 8 | !chain.pem 9 | !privkey.pem -------------------------------------------------------------------------------- /Examples/embedding.py: -------------------------------------------------------------------------------- 1 | import openai 2 | 3 | openai.api_base = "http://localhost:8000/v1" 4 | openai.api_key = "sk-******" 5 | response = openai.Embedding.create( 6 | input="Your text string goes here", model="text-embedding-ada-002" 7 | ) 8 | embeddings = response['data'][0]['embedding'] 9 | print(embeddings) 10 | -------------------------------------------------------------------------------- /openai_forward/content/whisper.py: -------------------------------------------------------------------------------- 1 | from loguru import logger 2 | 3 | 4 | class WhisperSaver: 5 | def __init__(self): 6 | self.logger = logger.bind(whisper=True) 7 | 8 | def add_log(self, bytes_: bytes): 9 | text_content = bytes_.decode("utf-8") 10 | self.logger.debug(text_content) 11 | -------------------------------------------------------------------------------- /vercel.json: -------------------------------------------------------------------------------- 1 | { 2 | "rewrites": [ 3 | { "source": "/", "destination": "https://api.openai.com" }, 4 | { 5 | "source": "/:match*", 6 | "destination": "https://api.openai.com/:match*" 7 | } 8 | ], 9 | "github": { 10 | "silent": true 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | faulthandler_timeout=180 3 | markers = 4 | slow: marks tests as slow (deselect with '-m "not slow"') 5 | timeout: marks test timeout duration 6 | repeat: marks that test run n times 7 | addopts = --doctest-modules --doctest-glob=README.md --doctest-glob=*.py --ignore=setup.py 8 | norecursedirs = Examples 9 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/timothycrosley/isort 3 | rev: 5.12.0 4 | hooks: 5 | - id: isort 6 | args: ["--profile", "black"] 7 | - repo: https://github.com/psf/black 8 | rev: 22.3.0 9 | hooks: 10 | - id: black 11 | types: [python] 12 | exclude: docs/ 13 | args: 14 | - -S -------------------------------------------------------------------------------- /Examples/chat.py: -------------------------------------------------------------------------------- 1 | import openai 2 | 3 | openai.api_base = "https://api.openai-forward.com/v1" 4 | openai.api_key = "sk-******" 5 | 6 | resp = openai.ChatCompletion.create( 7 | model="gpt-3.5-turbo", 8 | messages=[ 9 | {"role": "user", "content": "Who won the world series in 2020?"}, 10 | ], 11 | ) 12 | print(resp.choices) 13 | -------------------------------------------------------------------------------- /openai_forward/app.py: -------------------------------------------------------------------------------- 1 | from sparrow.api import create_app 2 | 3 | from .anthropic import Anthropic 4 | from .openai import Openai 5 | from .routers.v1 import router as router_v1 6 | 7 | app = create_app(title="openai_forward", version="1.0") 8 | app.openapi_version = "3.0.0" 9 | 10 | openai = Openai() 11 | anthropic = Anthropic() 12 | 13 | app.include_router(router_v1) 14 | -------------------------------------------------------------------------------- /Examples/whisper.py: -------------------------------------------------------------------------------- 1 | # Note: you need to be using OpenAI Python v0.27.0 for the code below to work 2 | import openai 3 | from sparrow import relp 4 | 5 | openai.api_base = "https://api.openai-forward.com/v1" 6 | openai.api_key = "sk-******" 7 | 8 | audio_file = open(relp("../.github/data/whisper.m4a"), "rb") 9 | transcript = openai.Audio.transcribe("whisper-1", audio_file) 10 | print(transcript) 11 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # LOG_CHAT: 是否Log 对话记录 2 | LOG_CHAT=True 3 | 4 | OPENAI_BASE_URL=https://api.openai.com 5 | 6 | # OPENAI_API_KEY:允许输入多个api key 形成轮询池 7 | OPENAI_API_KEY=sk-xxx1 sk-xxx2 sk-xxx3 8 | 9 | # FORWARD_KEY: 当前面的OPENAI_API_KEY被设置,就可以设置这里的FORWARD_KEY,客户端调用时可以使用FORWARD_KEY作为api key 10 | FORWARD_KEY=fk-xxx1 11 | 12 | # ROUTE_PREFIX: 可指定整个转发服务的根路由前缀 13 | ROUTE_PREFIX= 14 | 15 | # 设定时区 16 | TZ=NewYork/America -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | openai_forward: 3 | image: beidongjiedeguang/openai-forward:latest 4 | container_name: openai-forward-container 5 | env_file: 6 | .env 7 | ports: 8 | - "8000:8000" 9 | volumes: 10 | - ./Log-caloi-top:/home/openai-forward/Log 11 | - ./openai_forward:/home/openai-forward/openai_forward 12 | command: 13 | - --port=8000 14 | - --workers=1 -------------------------------------------------------------------------------- /_worker.js: -------------------------------------------------------------------------------- 1 | export default { 2 | async fetch(request, env) { 3 | try { 4 | const url = new URL(request.url); 5 | url.hostname = "api.openai.com"; 6 | return await fetch( 7 | new Request(url, {method: request.method, headers: request.headers, body: request.body}) 8 | ); 9 | } catch (e) { 10 | return new Response(e.stack, {status: 500}); 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /scripts/black.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | pip install black==22.3.0 3 | arrVar=() 4 | echo we ignore non-*.py files 5 | excluded_files=( 6 | ) 7 | for changed_file in $CHANGED_FILES; do 8 | if [[ ${changed_file} == *.py ]] && ! [[ " ${excluded_files[@]} " =~ " ${changed_file} " ]]; then 9 | echo checking ${changed_file} 10 | arrVar+=(${changed_file}) 11 | fi 12 | done 13 | if (( ${#arrVar[@]} )); then 14 | black -S --check "${arrVar[@]}" 15 | fi 16 | echo "no files left to check" 17 | exit 0 -------------------------------------------------------------------------------- /openai_forward/openai.py: -------------------------------------------------------------------------------- 1 | from fastapi import Request 2 | 3 | from .base import OpenaiBase 4 | 5 | 6 | class Openai(OpenaiBase): 7 | def __init__(self): 8 | if self.IP_BLACKLIST or self.IP_WHITELIST: 9 | self.validate_host = True 10 | else: 11 | self.validate_host = False 12 | 13 | async def reverse_proxy(self, request: Request): 14 | if self.validate_host: 15 | self.validate_request_host(request.client.host) 16 | return await self._reverse_proxy(request) 17 | -------------------------------------------------------------------------------- /tests/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | 4 | from sparrow import ls 5 | 6 | 7 | def rm(*file_pattern: str, rel=False): 8 | """Remove files or directories. 9 | Example: 10 | -------- 11 | >>> rm("*.jpg", "*.png") 12 | >>> rm("*.jpg", "*.png", rel=True) 13 | """ 14 | path_list = ls(".", *file_pattern, relp=rel, concat="extend") 15 | for file in path_list: 16 | if os.path.isfile(file): 17 | print("remove ", file) 18 | os.remove(file) 19 | # os.system("rm -f " + file) 20 | elif os.path.isdir(file): 21 | shutil.rmtree(file, ignore_errors=True) 22 | print("rm tree ", file) 23 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.10-alpine 2 | LABEL maintainer="kunyuan" 3 | ENV LC_ALL=C.UTF-8 4 | ENV LANG=C.UTF-8 5 | ENV TZ=America/New_York 6 | RUN apk update && \ 7 | apk add tzdata --no-cache && \ 8 | cp /usr/share/zoneinfo/America/New_York /etc/localtime && \ 9 | apk del tzdata && \ 10 | mkdir -p /usr/share/zoneinfo/America/ && \ 11 | ln -s /etc/localtime /usr/share/zoneinfo/America/New_York 12 | 13 | COPY requirements.txt requirements.txt 14 | RUN pip install --no-cache-dir -r requirements.txt 15 | 16 | COPY . /home/openai-forward 17 | WORKDIR /home/openai-forward 18 | 19 | ENV ssl_keyfile="/home/openai-forward/privkey.pem" 20 | ENV ssl_certfile="/home/openai-forward/fullchain.pem" 21 | EXPOSE 8000 22 | ENTRYPOINT ["python", "-m", "openai_forward.__main__", "run"] 23 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | name: Upload Python Package 2 | 3 | on: 4 | workflow_dispatch: 5 | # release: 6 | # types: [published] 7 | push: 8 | tags: 9 | - 'v*' 10 | 11 | 12 | jobs: 13 | deploy: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/checkout@v3 17 | - name: Set up Python 18 | uses: actions/setup-python@v4 19 | with: 20 | python-version: '3.10' 21 | - name: Install dependencies 22 | run: | 23 | python -m pip install --upgrade pip 24 | pip install setuptools wheel twine build hatch 25 | - name: Build and publish 26 | env: 27 | TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} 28 | TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} 29 | run: | 30 | python -m build 31 | twine upload dist/* -------------------------------------------------------------------------------- /tests/test_http.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import time 3 | 4 | import httpx 5 | from sparrow.multiprocess import kill 6 | from utils import rm 7 | 8 | 9 | class TestRun: 10 | @classmethod 11 | def setup_class(cls): 12 | kill(8000) 13 | base_url = "https://api.openai-forward.com" 14 | subprocess.Popen(["nohup", "openai-forward", "run", "--base_url", base_url]) 15 | time.sleep(3) 16 | 17 | @classmethod 18 | def teardown_class(cls): 19 | kill(8000) 20 | rm("nohup.out") 21 | 22 | def test_get_doc(self): 23 | resp = httpx.get("http://localhost:8000/docs") 24 | assert resp.is_success 25 | 26 | def test_get_chat_completions(self): 27 | resp = httpx.get("http://localhost:8000/v1/chat/completions") 28 | assert resp.status_code == 401 29 | -------------------------------------------------------------------------------- /.github/workflows/gh-release.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | # Sequence of patterns matched against refs/tags 4 | tags: 5 | - 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10 6 | 7 | #on: 8 | # push: 9 | # branches-ignore: 10 | # - '**' # temporally disable this action 11 | 12 | 13 | name: Create Release 14 | 15 | jobs: 16 | build: 17 | name: Create Release 18 | runs-on: ubuntu-latest 19 | steps: 20 | - name: Checkout code 21 | uses: actions/checkout@v3 22 | - name: Create Release 23 | id: create_release 24 | uses: actions/create-release@v1 25 | env: 26 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # This token is provided by Actions, you do not need to create your own token 27 | with: 28 | tag_name: ${{ github.ref }} 29 | release_name: Release ${{ github.ref }} 30 | body: | 31 | See ChangeLog 32 | draft: false 33 | prerelease: false 34 | -------------------------------------------------------------------------------- /tests/test_env.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | import time 4 | 5 | import pytest 6 | from dotenv import load_dotenv 7 | 8 | import openai_forward 9 | 10 | 11 | class TestEnv: 12 | with open(".env", "r", encoding="utf-8") as f: 13 | defualt_env = f.read() 14 | 15 | @classmethod 16 | def setup_class(cls): 17 | env = """\ 18 | LOG_CHAT=true 19 | OPENAI_BASE_URL=https://api.openai.com 20 | OPENAI_API_KEY=key1 key2 21 | FORWARD_KEY=ps1 ps2 ps3 22 | ROUTE_PREFIX= 23 | IP_WHITELIST= 24 | IP_BLACKLIST= 25 | """ 26 | with open(".env", "w", encoding="utf-8") as f: 27 | f.write(env) 28 | time.sleep(0.1) 29 | 30 | load_dotenv(override=True) 31 | importlib.reload(openai_forward.base) 32 | cls.aibase = openai_forward.base.OpenaiBase() 33 | 34 | @classmethod 35 | def teardown_class(cls): 36 | with open(".env", "w", encoding="utf-8") as f: 37 | f.write(cls.defualt_env) 38 | 39 | def test_env1(self): 40 | assert self.aibase._openai_api_key_list == ["key1", "key2"] 41 | assert self.aibase._no_auth_mode is False 42 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 kunyuan 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: start build push run down test twine log pull 2 | 3 | image := "beidongjiedeguang/openai-forward:latest" 4 | container := "openai-forward-container" 5 | compose_path := "docker-compose.yaml" 6 | 7 | start: 8 | docker run -d \ 9 | --name $(container) \ 10 | --env-file .env \ 11 | -p 27001:8000 \ 12 | -v $(shell pwd)/Log:/home/openai-forward/Log \ 13 | -v $(shell pwd)/openai_forward:/home/openai-forward/openai_forward \ 14 | $(image) 15 | 16 | 17 | exec: 18 | docker exec -it $(container) bash 19 | 20 | log: 21 | docker logs -f $(container) 22 | 23 | rm: 24 | docker rm -f $(container) 25 | 26 | up: 27 | @docker-compose -f $(compose_path) up 28 | 29 | down: 30 | @docker-compose -f $(compose_path) down 31 | 32 | run: 33 | @docker-compose -f $(compose_path) run -it -p 8000:8000 openai_forward bash 34 | 35 | test: 36 | pytest -v tests 37 | 38 | twine: 39 | @twine upload dist/* 40 | @rm -rf dist/* 41 | 42 | build: 43 | docker build --tag $(image) . 44 | 45 | build-push: 46 | docker buildx build --push --platform linux/arm64/v8,linux/amd64 --tag $(image) . 47 | 48 | pull: 49 | docker pull $(image) 50 | 51 | deploy: 52 | vercel --prod -------------------------------------------------------------------------------- /.github/workflows/docker-publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish Docker image 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | tags: 7 | - 'v*' 8 | 9 | jobs: 10 | push_to_registry: 11 | name: Push Docker image to Docker Hub 12 | runs-on: ubuntu-latest 13 | steps: 14 | - 15 | name: Check out the repo 16 | uses: actions/checkout@v3 17 | - 18 | name: Log in to Docker Hub 19 | uses: docker/login-action@v2 20 | with: 21 | username: ${{ secrets.DOCKER_USERNAME }} 22 | password: ${{ secrets.DOCKER_PASSWORD }} 23 | 24 | - 25 | name: Extract metadata (tags, labels) for Docker 26 | id: meta 27 | uses: docker/metadata-action@v4 28 | with: 29 | images: beidongjiedeguang/openai-forward 30 | tags: | 31 | type=raw,value=latest 32 | type=ref,event=tag 33 | 34 | - 35 | name: Set up QEMU 36 | uses: docker/setup-qemu-action@v2 37 | 38 | - 39 | name: Set up Docker Buildx 40 | uses: docker/setup-buildx-action@v2 41 | 42 | - 43 | name: Build and push Docker image 44 | uses: docker/build-push-action@v4 45 | with: 46 | context: . 47 | file: ./Dockerfile 48 | platforms: linux/amd64,linux/arm64 49 | push: true 50 | tags: ${{ steps.meta.outputs.tags }} 51 | labels: ${{ steps.meta.outputs.labels }} 52 | cache-from: type=gha 53 | cache-to: type=gha,mode=max 54 | -------------------------------------------------------------------------------- /openai_forward/routers/v1.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, Request 2 | 3 | from ..nai import NovelAI 4 | from ..anthropic import Anthropic 5 | from ..openai import Openai 6 | from .schemas import OpenAIV1ChatCompletion, AnthropicTextCompletion, AnthropicMessagesCompletion, NAICompletion 7 | 8 | openai = Openai() 9 | anthropic = Anthropic() 10 | novelai = NovelAI() 11 | router = APIRouter(prefix=openai.ROUTE_PREFIX, tags=["v1"]) 12 | 13 | 14 | @router.post("/v1/chat/completions") 15 | async def chat_completions(params: OpenAIV1ChatCompletion, request: Request): 16 | """该接口只是为了document, 将此路由接口放在openai.reverse_proxy接口后面, 实际不会执行该接口。""" 17 | return await openai.reverse_proxy(request) 18 | 19 | @router.get("/v1/models") 20 | async def models_list(request: Request): 21 | """该接口只是为了document, 将此路由接口放在openai.reverse_proxy接口后面, 实际不会执行该接口。""" 22 | return await openai.reverse_proxy(request) 23 | 24 | @router.post("/v1/complete") 25 | async def anthropic_text_completions(params: AnthropicTextCompletion, request: Request): 26 | return await anthropic.reverse_proxy(request) 27 | 28 | @router.post("/v1/messages") 29 | async def anthropic_messages_completions(params: AnthropicMessagesCompletion, request: Request): 30 | return await anthropic.reverse_proxy(request) 31 | 32 | @router.get("/user/subscription") 33 | async def subscription_status(request: Request): 34 | return await novelai.reverse_proxy(request) 35 | 36 | # NovelAI has separate URLs for streaming and non-streaming 37 | @router.post("/ai/generate") 38 | @router.post("/ai/generate-stream") 39 | async def novelai_completions(params: NAICompletion, request: Request): 40 | return await novelai.reverse_proxy(request) -------------------------------------------------------------------------------- /tests/test_api.py: -------------------------------------------------------------------------------- 1 | from itertools import cycle 2 | 3 | import pytest 4 | from fastapi import HTTPException 5 | 6 | from openai_forward.openai import OpenaiBase 7 | 8 | 9 | @pytest.fixture(scope="module") 10 | def openai() -> OpenaiBase: 11 | return OpenaiBase() 12 | 13 | 14 | class TestOpenai: 15 | @staticmethod 16 | def teardown_method(): 17 | OpenaiBase.IP_BLACKLIST = [] 18 | OpenaiBase.IP_WHITELIST = [] 19 | OpenaiBase._default_api_key_list = [] 20 | 21 | def test_env(self, openai: OpenaiBase): 22 | assert openai.BASE_URL == "https://api.openai.com" 23 | 24 | def test_api_keys(self, openai: OpenaiBase): 25 | assert openai._default_api_key_list == [] 26 | openai._default_api_key_list = ["a", "b"] 27 | openai._cycle_api_key = cycle(openai._default_api_key_list) 28 | assert next(openai._cycle_api_key) == "a" 29 | assert next(openai._cycle_api_key) == "b" 30 | assert next(openai._cycle_api_key) == "a" 31 | assert next(openai._cycle_api_key) == "b" 32 | assert next(openai._cycle_api_key) == "a" 33 | 34 | def test_validate_ip(self, openai: OpenaiBase): 35 | ip1 = "1.1.1.1" 36 | ip2 = "2.2.2.2" 37 | assert openai.validate_request_host("*") is None 38 | openai.IP_WHITELIST.append(ip1) 39 | assert openai.validate_request_host(ip1) is None 40 | with pytest.raises(HTTPException): 41 | openai.validate_request_host(ip2) 42 | openai.IP_WHITELIST = [] 43 | openai.IP_BLACKLIST.append(ip1) 44 | assert openai.validate_request_host(ip2) is None 45 | with pytest.raises(HTTPException): 46 | openai.validate_request_host(ip1) 47 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["hatchling"] 3 | build-backend = "hatchling.build" 4 | 5 | [project] 6 | name = "openai_forward" 7 | description = "🚀 OpenAI API Reverse Proxy · ChatGPT API Proxy" 8 | authors = [ 9 | { name = "kunyuan", email = "beidongjiedeguang@gmail.com" }, 10 | ] 11 | license = "MIT" 12 | requires-python = ">=3.6" 13 | readme = "README.md" 14 | keywords = ["openai", "chatgpt", "openai-api", "openai-proxy", "OpenAI API Forwarding", "streaming-api", "fastapi", "python"] 15 | classifiers = [ 16 | "Development Status :: 5 - Production/Stable", 17 | "Operating System :: OS Independent", 18 | "Programming Language :: Python :: 3" 19 | ] 20 | 21 | dependencies = [ 22 | "loguru>=0.7.0", 23 | "sparrow-python>=0.1.5", 24 | "fastapi>=0.90.0", 25 | "uvicorn>=0.23.1", 26 | "orjson>=3.9.2", 27 | "python-dotenv", 28 | "httpx>=0.24.1", 29 | "pytz", 30 | ] 31 | 32 | dynamic = ["version"] 33 | 34 | [project.urls] 35 | Homepage = "https://github.com/beidongjiedeguang/openai-forward" 36 | Documentation = "https://github.com/beidongjiedeguang/openai-forward#openai-forward" 37 | Issues = "https://github.com/beidongjiedeguang/openai-forward/issues" 38 | Source = "https://github.com/beidongjiedeguang/openai-forward" 39 | 40 | [project.optional-dependencies] 41 | test = [ 42 | "openai>=0.27.8", 43 | "pytest", 44 | ] 45 | 46 | [project.scripts] 47 | openai_forward = "openai_forward.__main__:main" 48 | openai-forward = "openai_forward.__main__:main" 49 | aifd = "openai_forward.__main__:main" 50 | 51 | [tool.hatch.version] 52 | path = "openai_forward/__init__.py" 53 | 54 | [tool.isort] 55 | profile = "black" 56 | 57 | [tool.hatch.build] 58 | include = [ 59 | "openai_forward/**/*.py", 60 | ] 61 | exclude = [ 62 | ] 63 | 64 | artifacts = [ 65 | "openai_forward/web/index.js", 66 | ] 67 | 68 | 69 | [tool.hatch.build.targets.wheel] 70 | packages = ["openai_forward"] -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct for openai-forward 2 | 3 | ## 1. Purpose 4 | 5 | The purpose of this Code of Conduct is to provide guidelines for contributors to the openai-forward project on GitHub. We aim to create a positive and inclusive environment where all participants can contribute and collaborate effectively. By participating in this project, you agree to abide by this Code of Conduct. 6 | 7 | ## 2. Scope 8 | 9 | This Code of Conduct applies to all contributors, maintainers, and users of the openai-forward project. It extends to all project spaces, including but not limited to issues, pull requests, code reviews, comments, and other forms of communication within the project. 10 | 11 | ## 3. Our Standards 12 | 13 | We encourage the following behavior: 14 | 15 | * Being respectful and considerate to others 16 | * Actively seeking diverse perspectives 17 | * Providing constructive feedback and assistance 18 | * Demonstrating empathy and understanding 19 | 20 | We discourage the following behavior: 21 | 22 | * Harassment or discrimination of any kind 23 | * Disrespectful, offensive, or inappropriate language or content 24 | * Personal attacks or insults 25 | * Unwarranted criticism or negativity 26 | 27 | ## 4. Reporting and Enforcement 28 | 29 | If you witness or experience any violations of this Code of Conduct, please report them to the project maintainers by email or other appropriate means. The maintainers will investigate and take appropriate action, which may include warnings, temporary or permanent bans, or other measures as necessary. 30 | 31 | Maintainers are responsible for ensuring compliance with this Code of Conduct and may take action to address any violations. 32 | 33 | ## 5. Acknowledgements 34 | 35 | This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org/version/2/0/code_of_conduct.html). 36 | 37 | ## 6. Contact 38 | 39 | If you have any questions or concerns, please contact the project maintainers. 40 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push : 5 | branches: 6 | - main 7 | pull_request: 8 | paths-ignore: 9 | - 'docs/**' 10 | - '*.md' 11 | branches: 12 | - main 13 | 14 | jobs: 15 | check-black: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - uses: actions/checkout@v3 19 | with: 20 | fetch-depth: 0 21 | - name: Set up Python 3.10 22 | uses: actions/setup-python@v4 23 | with: 24 | python-version: "3.10" 25 | - id: file_changes 26 | uses: Ana06/get-changed-files@v1.2 27 | - name: check black 28 | env: 29 | CHANGED_FILES: ${{ steps.file_changes.outputs.added_modified }} 30 | run: bash ./scripts/black.sh 31 | 32 | lint-flake-8: 33 | runs-on: ubuntu-latest 34 | steps: 35 | - uses: actions/checkout@v3 36 | - name: Set up Python 3.10 37 | uses: actions/setup-python@v4 38 | with: 39 | python-version: "3.10" 40 | - name: Lint with flake8 41 | run: | 42 | pip install flake8 43 | # stop the build if there are Python syntax errors or undefined names 44 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics --exclude .git,__pycache__,docs/source/conf.py,old,build,dist,tests/ 45 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 46 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics --exclude .git,__pycache__,docs/source/conf.py,old,build,dist,tests/ 47 | 48 | unit-tests: 49 | runs-on: ubuntu-latest 50 | strategy: 51 | matrix: 52 | python-version: ["3.10"] 53 | steps: 54 | - uses: actions/checkout@v3 55 | - name: Set up Python ${{ matrix.python-version }} 56 | uses: actions/setup-python@v3 57 | with: 58 | python-version: ${{ matrix.python-version }} 59 | - name: Install dependencies 60 | run: | 61 | python -m pip install --upgrade pip 62 | python -m pip install pytest psutil 63 | python -m pip install . 64 | # python -m pip install codecov pytest-cov 65 | - name: Run tests 66 | run: | 67 | pytest -v -s 68 | -------------------------------------------------------------------------------- /openai_forward/__main__.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import fire 4 | import uvicorn 5 | 6 | 7 | class Cli: 8 | @staticmethod 9 | def run( 10 | workers=20, 11 | api_key=None, 12 | forward_key=None, 13 | base_url=None, 14 | log_chat=False, 15 | route_prefix=None, 16 | ip_whitelist=None, 17 | ip_blacklist=None, 18 | ): 19 | """Run forwarding serve. 20 | 21 | Parameters 22 | ---------- 23 | 24 | port: int, default 8000 25 | workers: int, 1 26 | api_key: str, None 27 | forward_key: str, None 28 | base_url: str, None 29 | log_chat: str, None 30 | route_prefix: str, None 31 | ip_whitelist: str, None 32 | ip_blacklist: str, None 33 | """ 34 | if base_url: 35 | os.environ["OPENAI_BASE_URL"] = base_url 36 | if api_key: 37 | os.environ["OPENAI_API_KEY"] = api_key 38 | if forward_key: 39 | os.environ["FORWARD_KEY"] = forward_key 40 | if log_chat: 41 | os.environ["LOG_CHAT"] = log_chat 42 | if route_prefix: 43 | os.environ["ROUTE_PREFIX"] = route_prefix 44 | if ip_whitelist: 45 | os.environ["IP_WHITELIST"] = ip_whitelist 46 | if ip_blacklist: 47 | os.environ["IP_BLACKLIST"] = ip_blacklist 48 | 49 | ssl_keyfile = os.environ.get("ssl_keyfile", 'privkey.pem') or None 50 | ssl_certfile = os.environ.get("ssl_certfile", 'fullchain.pem') or None 51 | port = int(os.environ.get("port", 443)) or 443 52 | uvicorn.run( 53 | app="openai_forward.app:app", 54 | host="0.0.0.0", 55 | port=port, 56 | workers=workers, 57 | app_dir="..", 58 | ssl_keyfile=ssl_keyfile, 59 | ssl_certfile=ssl_certfile, 60 | ) 61 | 62 | @staticmethod 63 | def convert(log_folder: str = "./Log/chat", target_path: str = "./Log/chat.json"): 64 | """Convert log folder to jsonl file""" 65 | from openai_forward.tool import convert_folder_to_jsonl 66 | 67 | print(f"Convert {log_folder}/*.log to {target_path}") 68 | convert_folder_to_jsonl(log_folder, target_path) 69 | 70 | 71 | def main(): 72 | fire.Fire(Cli) 73 | 74 | 75 | if __name__ == "__main__": 76 | main() 77 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .github/release-template.ejs 2 | scripts/release.sh 3 | node_modules 4 | package-lock.json 5 | package.json 6 | .idea/ 7 | .vscode/ 8 | .DS_Store 9 | third-party/ 10 | run.sh 11 | ssl/ 12 | chat.yaml 13 | chat_*.yaml 14 | *.pem 15 | Log/ 16 | Log-caloi-top/ 17 | dist/ 18 | # Byte-compiled / optimized / DLL files 19 | __pycache__/ 20 | *.py[cod] 21 | *$py.class 22 | 23 | # C extensions 24 | *.so 25 | 26 | # Distribution / packaging 27 | .Python 28 | build/ 29 | develop-eggs/ 30 | dist/ 31 | downloads/ 32 | eggs/ 33 | .eggs/ 34 | lib/ 35 | lib64/ 36 | parts/ 37 | sdist/ 38 | var/ 39 | wheels/ 40 | pip-wheel-metadata/ 41 | share/python-wheels/ 42 | *.egg-info/ 43 | .installed.cfg 44 | *.egg 45 | MANIFEST 46 | 47 | # PyInstaller 48 | # Usually these files are written by a python script from a template 49 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 50 | *.manifest 51 | *.spec 52 | 53 | # Installer logs 54 | pip-log.txt 55 | pip-delete-this-directory.txt 56 | 57 | # Unit test / coverage reports 58 | htmlcov/ 59 | .tox/ 60 | .nox/ 61 | .coverage 62 | .coverage.* 63 | .cache 64 | nosetests.xml 65 | coverage.xml 66 | *.cover 67 | *.py,cover 68 | .hypothesis/ 69 | .pytest_cache/ 70 | 71 | # Translations 72 | *.mo 73 | *.pot 74 | 75 | # Django stuff: 76 | *.log 77 | local_settings.py 78 | db.sqlite3 79 | db.sqlite3-journal 80 | 81 | # Flask stuff: 82 | instance/ 83 | .webassets-cache 84 | 85 | # Scrapy stuff: 86 | .scrapy 87 | 88 | # Sphinx documentation 89 | docs/_build/ 90 | 91 | # PyBuilder 92 | target/ 93 | 94 | # Jupyter Notebook 95 | .ipynb_checkpoints 96 | 97 | # IPython 98 | profile_default/ 99 | ipython_config.py 100 | 101 | # pyenv 102 | .python-version 103 | 104 | # pipenv 105 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 106 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 107 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 108 | # install all needed dependencies. 109 | #Pipfile.lock 110 | 111 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 112 | __pypackages__/ 113 | 114 | # Celery stuff 115 | celerybeat-schedule 116 | celerybeat.pid 117 | 118 | # SageMath parsed files 119 | *.sage.py 120 | 121 | # Environments 122 | .venv 123 | env/ 124 | venv/ 125 | ENV/ 126 | env.bak/ 127 | venv.bak/ 128 | 129 | # Spyder project settings 130 | .spyderproject 131 | .spyproject 132 | 133 | # Rope project settings 134 | .ropeproject 135 | 136 | # mkdocs documentation 137 | /site 138 | 139 | # mypy 140 | .mypy_cache/ 141 | .dmypy.json 142 | dmypy.json 143 | 144 | # Pyre type checker 145 | .pyre/ 146 | .vercel 147 | 148 | *.pem -------------------------------------------------------------------------------- /openai_forward/content/chat.py: -------------------------------------------------------------------------------- 1 | import time 2 | import uuid 3 | 4 | import orjson 5 | from fastapi import Request 6 | from httpx._decoders import LineDecoder 7 | from loguru import logger 8 | from orjson import JSONDecodeError 9 | 10 | decoder = LineDecoder() 11 | 12 | 13 | def _parse_iter_line_content(line: str): 14 | try: 15 | line_dict = orjson.loads(line) 16 | return line_dict["choices"][0]["delta"]["content"] 17 | except JSONDecodeError: 18 | return "" 19 | except KeyError: 20 | return "" 21 | 22 | 23 | def parse_chat_completions(bytes_: bytes): 24 | txt_lines = decoder.decode(bytes_.decode("utf-8")) 25 | line0 = txt_lines[0] 26 | target_info = dict() 27 | _start_token = "data: " 28 | if line0.startswith(_start_token): 29 | is_stream = True 30 | line0 = orjson.loads(line0[len(_start_token) :]) 31 | msg = line0["choices"][0]["delta"] 32 | else: 33 | is_stream = False 34 | line0 = orjson.loads("".join(txt_lines)) 35 | msg = line0["choices"][0]["message"] 36 | 37 | target_info["created"] = line0["created"] 38 | target_info["id"] = line0["id"] 39 | target_info["model"] = line0["model"] 40 | target_info["role"] = msg["role"] 41 | target_info["content"] = msg.get("content", "") 42 | if not is_stream: 43 | return target_info 44 | # loop for stream 45 | for line in txt_lines[1:]: 46 | if line in ("", "\n", "\n\n"): 47 | continue 48 | elif line.startswith(_start_token): 49 | target_info["content"] += _parse_iter_line_content( 50 | line[len(_start_token) :] 51 | ) 52 | else: 53 | logger.warning(f"line not startswith data: {line}") 54 | return target_info 55 | 56 | 57 | class ChatSaver: 58 | def __init__(self): 59 | self.logger = logger.bind(chat=True) 60 | 61 | @staticmethod 62 | async def parse_payload_to_content(request: Request, route_path: str): 63 | if route_path == "/v1/chat/completions": 64 | uid = uuid.uuid4().__str__() 65 | payload = await request.json() 66 | msgs = payload["messages"] 67 | model = payload["model"] 68 | content = { 69 | "messages": [{msg["role"]: msg["content"]} for msg in msgs], 70 | "model": model, 71 | "forwarded-for": request.headers.get("x-forwarded-for") or "", 72 | "uid": uid, 73 | "datetime": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), 74 | } 75 | else: 76 | content = {} 77 | return content 78 | 79 | @staticmethod 80 | def parse_bytes_to_content(bytes_: bytes, route_path: str): 81 | if route_path == "/v1/chat/completions": 82 | return parse_chat_completions(bytes_) 83 | else: 84 | return {} 85 | 86 | def add_chat(self, chat_info: dict): 87 | self.logger.debug(f"{chat_info}") 88 | -------------------------------------------------------------------------------- /openai_forward/anthropic.py: -------------------------------------------------------------------------------- 1 | from loguru import logger 2 | 3 | import httpx 4 | from fastapi import HTTPException, Request, status 5 | from fastapi.responses import StreamingResponse 6 | from starlette.background import BackgroundTask 7 | 8 | from openai_forward.tool import env2list 9 | 10 | class AnthropicBase: 11 | BASE_URL = "https://api.anthropic.com" 12 | IP_WHITELIST = env2list("IP_WHITELIST", sep=" ") 13 | IP_BLACKLIST = env2list("IP_BLACKLIST", sep=" ") 14 | 15 | timeout = 600 16 | 17 | def validate_request_host(self, ip): 18 | if self.IP_WHITELIST and ip not in self.IP_WHITELIST: 19 | raise HTTPException( 20 | status_code=status.HTTP_403_FORBIDDEN, 21 | detail=f"Forbidden, ip={ip} not in whitelist!", 22 | ) 23 | if self.IP_BLACKLIST and ip in self.IP_BLACKLIST: 24 | raise HTTPException( 25 | status_code=status.HTTP_403_FORBIDDEN, 26 | detail=f"Forbidden, ip={ip} in blacklist!", 27 | ) 28 | 29 | @classmethod 30 | async def _reverse_proxy(cls, request: Request): 31 | client = httpx.AsyncClient(base_url=cls.BASE_URL, http1=True, http2=False) 32 | url_path = request.url.path 33 | url = httpx.URL(path=url_path, query=request.url.query.encode("utf-8")) 34 | headers = dict(request.headers) 35 | auth = headers.pop("authorization", "") 36 | content_type = headers.pop("content-type", "application/json") 37 | auth_headers_dict = {"Content-Type": content_type, "Authorization": auth, 38 | "accept": "application/json", "anthropic-version": "2023-06-01", 39 | "x-api-key": headers.pop("x-api-key", "")} 40 | 41 | req = client.build_request( 42 | request.method, 43 | url, 44 | headers=auth_headers_dict, 45 | content=request.stream(), 46 | timeout=cls.timeout, 47 | ) 48 | try: 49 | r = await client.send(req, stream=True) 50 | except (httpx.ConnectError, httpx.ConnectTimeout) as e: 51 | error_info = ( 52 | f"{type(e)}: {e} | " 53 | f"Please check if host={request.client.host} can access [{cls.BASE_URL}] successfully?" 54 | ) 55 | logger.error(error_info) 56 | raise HTTPException( 57 | status_code=status.HTTP_504_GATEWAY_TIMEOUT, detail=error_info 58 | ) 59 | except Exception as e: 60 | logger.exception(f"{type(e)}:") 61 | raise HTTPException( 62 | status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=e 63 | ) 64 | 65 | aiter_bytes = r.aiter_bytes() 66 | return StreamingResponse( 67 | aiter_bytes, 68 | status_code=r.status_code, 69 | media_type=r.headers.get("content-type"), 70 | background=BackgroundTask(r.aclose), 71 | ) 72 | 73 | 74 | class Anthropic(AnthropicBase): 75 | def __init__(self): 76 | if self.IP_BLACKLIST or self.IP_WHITELIST: 77 | self.validate_host = True 78 | else: 79 | self.validate_host = False 80 | 81 | async def reverse_proxy(self, request: Request): 82 | if self.validate_host: 83 | self.validate_request_host(request.client.host) 84 | return await self._reverse_proxy(request) 85 | -------------------------------------------------------------------------------- /openai_forward/config.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | import time 5 | 6 | from loguru import logger 7 | from rich import print 8 | from rich.panel import Panel 9 | from rich.table import Table 10 | 11 | 12 | def print_startup_info(base_url, route_prefix, api_key, no_auth_mode, log_chat): 13 | try: 14 | from dotenv import load_dotenv 15 | 16 | load_dotenv(".env") 17 | except Exception: 18 | ... 19 | route_prefix = route_prefix or "/" 20 | api_key_info = True if len(api_key) else False 21 | table = Table(title="", box=None, width=100) 22 | table.add_column("base-url", justify="left", style="#df412f") 23 | table.add_column("route-prefix", justify="center", style="green") 24 | table.add_column("api-key-polling-pool", justify="center", style="green") 25 | table.add_column( 26 | "no-auth-mode", justify="center", style="red" if no_auth_mode else "green" 27 | ) 28 | table.add_column("Log-chat", justify="center", style="green") 29 | table.add_row( 30 | base_url, 31 | route_prefix, 32 | str(api_key_info), 33 | str(no_auth_mode), 34 | str(log_chat), 35 | ) 36 | print(Panel(table, title="🤗 openai-forward is ready to serve! ", expand=False)) 37 | 38 | 39 | class InterceptHandler(logging.Handler): 40 | def emit(self, record): 41 | # Get corresponding Loguru level if it exists 42 | try: 43 | level = logger.level(record.levelname).name 44 | except ValueError: 45 | level = record.levelno 46 | 47 | # Find caller from where originated the logged message 48 | frame, depth = logging.currentframe(), 6 49 | while frame.f_code.co_filename == logging.__file__: 50 | frame = frame.f_back 51 | depth += 1 52 | logger.opt(depth=depth, exception=record.exc_info).log( 53 | level, record.getMessage() 54 | ) 55 | 56 | 57 | def setting_log(save_file=False, log_name="openai_forward", multi_process=True): 58 | # TODO 修复时区配置 59 | if os.environ.get("TZ") == "Asia/Shanghai": 60 | os.environ["TZ"] = "UTC-8" 61 | if hasattr(time, "tzset"): 62 | time.tzset() 63 | 64 | logging.root.handlers = [InterceptHandler()] 65 | for name in logging.root.manager.loggerDict.keys(): 66 | logging.getLogger(name).handlers = [] 67 | logging.getLogger(name).propagate = True 68 | 69 | config_handlers = [ 70 | {"sink": sys.stdout, "level": "DEBUG"}, 71 | { 72 | "sink": f"./Log/chat/chat.log", 73 | "enqueue": multi_process, 74 | "rotation": "50 MB", 75 | "filter": lambda record: "chat" in record["extra"], 76 | "format": "{message}", 77 | }, 78 | { 79 | "sink": f"./Log/whisper/whisper.log", 80 | "enqueue": multi_process, 81 | "rotation": "30 MB", 82 | "filter": lambda record: "whisper" in record["extra"], 83 | "format": "{message}", 84 | }, 85 | ] 86 | if save_file: 87 | config_handlers += [ 88 | { 89 | "sink": f"./Log/{log_name}.log", 90 | "enqueue": multi_process, 91 | "rotation": "100 MB", 92 | "level": "INFO", 93 | } 94 | ] 95 | 96 | logger_config = {"handlers": config_handlers} 97 | logger.configure(**logger_config) 98 | -------------------------------------------------------------------------------- /openai_forward/base.py: -------------------------------------------------------------------------------- 1 | import os 2 | from itertools import cycle 3 | 4 | import httpx 5 | from fastapi import HTTPException, Request, status 6 | from fastapi.responses import StreamingResponse 7 | from loguru import logger 8 | from starlette.background import BackgroundTask 9 | 10 | from .config import print_startup_info 11 | from .tool import env2list 12 | 13 | 14 | class OpenaiBase: 15 | BASE_URL = os.environ.get("OPENAI_BASE_URL", "https://api.openai.com").strip() 16 | ROUTE_PREFIX = os.environ.get("ROUTE_PREFIX", "").strip() 17 | _openai_api_key_list = env2list("OPENAI_API_KEY", sep=" ") 18 | _cycle_api_key = cycle(_openai_api_key_list) 19 | _FWD_KEYS = set(env2list("FORWARD_KEY", sep=" ")) 20 | _no_auth_mode = _openai_api_key_list != [] and _FWD_KEYS == set() 21 | IP_WHITELIST = env2list("IP_WHITELIST", sep=" ") 22 | IP_BLACKLIST = env2list("IP_BLACKLIST", sep=" ") 23 | 24 | timeout = 600 25 | 26 | print_startup_info( 27 | BASE_URL, ROUTE_PREFIX, _openai_api_key_list, _no_auth_mode, False 28 | ) 29 | 30 | def validate_request_host(self, ip): 31 | if self.IP_WHITELIST and ip not in self.IP_WHITELIST: 32 | raise HTTPException( 33 | status_code=status.HTTP_403_FORBIDDEN, 34 | detail=f"Forbidden, ip={ip} not in whitelist!", 35 | ) 36 | if self.IP_BLACKLIST and ip in self.IP_BLACKLIST: 37 | raise HTTPException( 38 | status_code=status.HTTP_403_FORBIDDEN, 39 | detail=f"Forbidden, ip={ip} in blacklist!", 40 | ) 41 | 42 | @classmethod 43 | async def _reverse_proxy(cls, request: Request): 44 | client = httpx.AsyncClient(base_url=cls.BASE_URL, http1=True, http2=False) 45 | url_path = request.url.path 46 | url_path = url_path[len(cls.ROUTE_PREFIX) :] 47 | url = httpx.URL(path=url_path, query=request.url.query.encode("utf-8")) 48 | headers = dict(request.headers) 49 | auth = headers.pop("authorization", "") 50 | content_type = headers.pop("content-type", "application/json") 51 | auth_headers_dict = {"Content-Type": content_type, "Authorization": auth} 52 | auth_prefix = "Bearer " 53 | if cls._no_auth_mode or auth and auth[len(auth_prefix) :] in cls._FWD_KEYS: 54 | auth = auth_prefix + next(cls._cycle_api_key) 55 | auth_headers_dict["Authorization"] = auth 56 | 57 | req = client.build_request( 58 | request.method, 59 | url, 60 | headers=auth_headers_dict, 61 | content=request.stream(), 62 | timeout=cls.timeout, 63 | ) 64 | try: 65 | r = await client.send(req, stream=True) 66 | except (httpx.ConnectError, httpx.ConnectTimeout) as e: 67 | error_info = ( 68 | f"{type(e)}: {e} | " 69 | f"Please check if host={request.client.host} can access [{cls.BASE_URL}] successfully?" 70 | ) 71 | logger.error(error_info) 72 | raise HTTPException( 73 | status_code=status.HTTP_504_GATEWAY_TIMEOUT, detail=error_info 74 | ) 75 | except Exception as e: 76 | logger.exception(f"{type(e)}:") 77 | raise HTTPException( 78 | status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=e 79 | ) 80 | 81 | aiter_bytes = r.aiter_bytes() 82 | return StreamingResponse( 83 | aiter_bytes, 84 | status_code=r.status_code, 85 | media_type=r.headers.get("content-type"), 86 | background=BackgroundTask(r.aclose), 87 | ) 88 | -------------------------------------------------------------------------------- /openai_forward/tool.py: -------------------------------------------------------------------------------- 1 | import ast 2 | import os 3 | from typing import Dict, List, Union 4 | 5 | import orjson 6 | from rich import print 7 | from sparrow import MeasureTime, ls, relp 8 | 9 | 10 | def json_load(filepath: str, rel=False, mode="rb"): 11 | abs_path = relp(filepath, parents=1) if rel else filepath 12 | with open(abs_path, mode=mode) as f: 13 | return orjson.loads(f.read()) 14 | 15 | 16 | def json_dump( 17 | data: Union[List, Dict], filepath: str, rel=False, indent_2=False, mode="wb" 18 | ): 19 | orjson_option = 0 20 | if indent_2: 21 | orjson_option = orjson.OPT_INDENT_2 22 | abs_path = relp(filepath, parents=1) if rel else filepath 23 | with open(abs_path, mode=mode) as f: 24 | f.write(orjson.dumps(data, option=orjson_option)) 25 | 26 | 27 | def str2list(s: str, sep=" "): 28 | if s: 29 | return [i.strip() for i in s.split(sep) if i.strip()] 30 | else: 31 | return [] 32 | 33 | 34 | def env2list(env_name: str, sep=" "): 35 | return str2list(os.environ.get(env_name, "").strip(), sep=sep) 36 | 37 | 38 | def get_matches(messages: List[Dict], assistants: List[Dict]): 39 | mt = MeasureTime() 40 | mt.start() 41 | msg_len, ass_len = len(messages), len(assistants) 42 | if msg_len != ass_len: 43 | print(f"message({msg_len}) 与 assistant({ass_len}) 长度不匹配") 44 | 45 | cvt = lambda msg, ass: { 46 | "datetime": msg.get('datetime'), 47 | "forwarded-for": msg.get("forwarded-for"), 48 | "model": msg.get("model"), 49 | "messages": msg.get("messages"), 50 | "assistant": ass.get("assistant"), 51 | } 52 | 53 | msg_uid_dict = {m.pop("uid"): m for m in messages} 54 | ass_uid_dict = {a.pop("uid"): a for a in assistants} 55 | matches = [ 56 | cvt(msg_uid_dict[uid], ass_uid_dict[uid]) 57 | for uid in msg_uid_dict 58 | if uid in ass_uid_dict 59 | ] 60 | 61 | ref_len = max(msg_len, ass_len) 62 | if len(matches) != ref_len: 63 | print(f"存在{ref_len - len(matches)}条未匹配数据") 64 | mt.show_interval("计算耗时:") 65 | return matches 66 | 67 | 68 | def parse_log_to_list(log_path: str): 69 | with open(log_path, "r", encoding="utf-8") as f: 70 | messages, assistant = [], [] 71 | for line in f.readlines(): 72 | content: dict = ast.literal_eval(line) 73 | if content.get("messages"): 74 | messages.append(content) 75 | else: 76 | assistant.append(content) 77 | return messages, assistant 78 | 79 | 80 | def convert_chatlog_to_jsonl(log_path: str, target_path: str): 81 | """Convert single chatlog to jsonl""" 82 | message_list, assistant_list = parse_log_to_list(log_path) 83 | content_list = get_matches(messages=message_list, assistants=assistant_list) 84 | json_dump(content_list, target_path, indent_2=True) 85 | 86 | 87 | def get_log_files_from_folder(log_path: str): 88 | return ls(log_path, "*.log", relp=False) 89 | 90 | 91 | def convert_folder_to_jsonl(folder_path: str, target_path: str): 92 | """Convert chatlog folder to jsonl""" 93 | log_files = get_log_files_from_folder(folder_path) 94 | messages = [] 95 | assistants = [] 96 | for log_path in log_files: 97 | msg, ass = parse_log_to_list(log_path) 98 | 99 | msg_len, ass_len = len(msg), len(ass) 100 | if msg_len != ass_len: 101 | print(f"{log_path=} message({msg_len}) 与 assistant({ass_len}) 长度不匹配") 102 | messages.extend(msg) 103 | assistants.extend(ass) 104 | content_list = get_matches(messages=messages, assistants=assistants) 105 | json_dump(content_list, target_path, indent_2=True) 106 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to openai-forward 2 | 3 | First of all, thank you for considering contributing to our project! We appreciate your time and effort, and we value any contribution, whether it's reporting a bug, suggesting a new feature, or submitting a pull request. 4 | 5 | This document provides guidelines and best practices to help you contribute effectively. 6 | 7 | ## Table of Contents 8 | 9 | - [Code of Conduct](#code-of-conduct) 10 | - [Getting Started](#getting-started) 11 | - [How to Contribute](#how-to-contribute) 12 | - [Reporting Bugs](#reporting-bugs) 13 | - [Suggesting Enhancements](#suggesting-enhancements) 14 | - [Submitting Pull Requests](#submitting-pull-requests) 15 | - [Style Guidelines](#style-guidelines) 16 | - [Code Formatting](#code-formatting) 17 | - [Pre-Commit Hooks](#pre-commit-hooks) 18 | 19 | ## Code of Conduct 20 | 21 | By participating in this project, you agree to abide by our [Code of Conduct](CODE_OF_CONDUCT.md). Please read it to understand the expectations we have for everyone who contributes to this project. 22 | 23 | 24 | ## Getting Started 25 | 26 | To start contributing, follow these steps: 27 | 28 | 1. Fork the repository and clone your fork. 29 | 2. Create a new branch for your changes (use a descriptive name, such as `fix-bug-123` or `add-new-feature`). 30 | 3. Make your changes in the new branch. 31 | 4. Test your changes thoroughly. 32 | 5. Commit and push your changes to your fork. 33 | 6. Create a pull request following the guidelines in the [Submitting Pull Requests](#submitting-pull-requests) section. 34 | 35 | ## How to Contribute 36 | 37 | ### Reporting Bugs 38 | 39 | If you find a bug in the project, please create an issue on GitHub with the following information: 40 | 41 | - A clear, descriptive title for the issue. 42 | - A description of the problem, including steps to reproduce the issue. 43 | - Any relevant logs, screenshots, or other supporting information. 44 | 45 | ### Suggesting Enhancements 46 | 47 | If you have an idea for a new feature or improvement, please create an issue on GitHub with the following information: 48 | 49 | - A clear, descriptive title for the issue. 50 | - A detailed description of the proposed enhancement, including any benefits and potential drawbacks. 51 | - Any relevant examples, mockups, or supporting information. 52 | 53 | ### Submitting Pull Requests 54 | 55 | When submitting a pull request, please ensure that your changes meet the following criteria: 56 | 57 | - Your pull request should be atomic and focus on a single change. 58 | - Your pull request should include tests for your change. 59 | - You should have thoroughly tested your changes with multiple different prompts. 60 | - You should have considered potential risks and mitigations for your changes. 61 | - You should have documented your changes clearly and comprehensively. 62 | - You should not include any unrelated or "extra" small tweaks or changes. 63 | 64 | ## Style Guidelines 65 | 66 | ### Code Formatting 67 | 68 | We use the `black` code formatter to maintain a consistent coding style across the project. Please ensure that your code is formatted using `black` before submitting a pull request. You can install `black` using `pip`: 69 | 70 | ```bash 71 | pip install black 72 | ``` 73 | 74 | To format your code, run the following command in the project's root directory: 75 | 76 | ```bash 77 | black . 78 | ``` 79 | ### Pre-Commit Hooks 80 | We use pre-commit hooks to ensure that code formatting and other checks are performed automatically before each commit. To set up pre-commit hooks for this project, follow these steps: 81 | 82 | Install the pre-commit package using pip: 83 | ```bash 84 | pip install pre-commit 85 | ``` 86 | 87 | Run the following command in the project's root directory to install the pre-commit hooks: 88 | ```bash 89 | pre-commit install 90 | ``` 91 | 92 | Now, the pre-commit hooks will run automatically before each commit, checking your code formatting and other requirements. 93 | 94 | If you encounter any issues or have questions, feel free to reach out to the maintainers or open a new issue on GitHub. We're here to help and appreciate your efforts to contribute to the project. 95 | 96 | Happy coding, and once again, thank you for your contributions! -------------------------------------------------------------------------------- /openai_forward/nai.py: -------------------------------------------------------------------------------- 1 | from loguru import logger 2 | 3 | import httpx 4 | from fastapi import HTTPException, Request, status 5 | from fastapi.responses import StreamingResponse 6 | from starlette.background import BackgroundTask 7 | 8 | from openai_forward.tool import env2list 9 | 10 | class NAIBase: 11 | API_NOVEL = "https://api.novelai.net" 12 | TEXT_NOVEL = "https://text.novelai.net" 13 | IP_WHITELIST = env2list("IP_WHITELIST", sep=" ") 14 | IP_BLACKLIST = env2list("IP_BLACKLIST", sep=" ") 15 | 16 | timeout = 600 17 | 18 | def validate_request_host(self, ip): 19 | if self.IP_WHITELIST and ip not in self.IP_WHITELIST: 20 | raise HTTPException( 21 | status_code=status.HTTP_403_FORBIDDEN, 22 | detail=f"Forbidden, ip={ip} not in whitelist!", 23 | ) 24 | if self.IP_BLACKLIST and ip in self.IP_BLACKLIST: 25 | raise HTTPException( 26 | status_code=status.HTTP_403_FORBIDDEN, 27 | detail=f"Forbidden, ip={ip} in blacklist!", 28 | ) 29 | 30 | @classmethod 31 | async def _reverse_proxy(cls, request: Request): 32 | body = await request.json() 33 | BASE_URL = cls.API_NOVEL if 'clio' in body['model'] else cls.TEXT_NOVEL 34 | if 'parameters' in body and body['parameters'] is not None: 35 | if 'repetition_penalty_whitelist' in body['parameters']: 36 | repetition_penalty_whitelist = body['parameters']['repetition_penalty_whitelist'] if body['parameters']['repetition_penalty_whitelist'] is not None else [] 37 | new_whitelist = [] 38 | for sublist in repetition_penalty_whitelist: 39 | if isinstance(sublist, int): 40 | new_whitelist.append(sublist) 41 | else: 42 | new_whitelist.extend(sublist) 43 | body['parameters']['repetition_penalty_whitelist'] = new_whitelist 44 | if 'max_length' in body['parameters']: 45 | body['parameters']['max_length'] = min(int(body['parameters']['max_length']), 150) if body['parameters']['max_length'] is not None else 150 46 | if 'min_length' in body['parameters']: 47 | body['parameters']['min_length'] = min(int(body['parameters']['min_length']), 150) if body['parameters']['min_length'] is not None else 50 48 | client = httpx.AsyncClient(base_url=BASE_URL, http1=True, http2=False) 49 | url_path = request.url.path 50 | url = httpx.URL(path=url_path, query=request.url.query.encode("utf-8")) 51 | headers = dict(request.headers) 52 | auth = headers.pop("authorization", "") 53 | content_type = headers.pop("content-type", "application/json") 54 | auth_headers_dict = {"Content-Type": content_type, "Authorization": auth} 55 | 56 | req = client.build_request( 57 | request.method, 58 | url, 59 | json=body, 60 | headers=auth_headers_dict, 61 | timeout=cls.timeout, 62 | ) 63 | try: 64 | r = await client.send(req, stream=True) 65 | except (httpx.ConnectError, httpx.ConnectTimeout) as e: 66 | error_info = ( 67 | f"{type(e)}: {e} | " 68 | f"Please check if host={request.client.host} can access [{BASE_URL}] successfully?" 69 | ) 70 | logger.error(error_info) 71 | raise HTTPException( 72 | status_code=status.HTTP_504_GATEWAY_TIMEOUT, detail=error_info 73 | ) 74 | except Exception as e: 75 | logger.exception(f"{type(e)}:") 76 | raise HTTPException( 77 | status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=e 78 | ) 79 | 80 | aiter_bytes = r.aiter_bytes() 81 | return StreamingResponse( 82 | aiter_bytes, 83 | status_code=r.status_code, 84 | media_type=r.headers.get("content-type"), 85 | background=BackgroundTask(r.aclose), 86 | ) 87 | 88 | 89 | class NovelAI(NAIBase): 90 | def __init__(self): 91 | if self.IP_BLACKLIST or self.IP_WHITELIST: 92 | self.validate_host = True 93 | else: 94 | self.validate_host = False 95 | 96 | async def reverse_proxy(self, request: Request): 97 | if self.validate_host: 98 | self.validate_request_host(request.client.host) 99 | return await self._reverse_proxy(request) -------------------------------------------------------------------------------- /deploy.md: -------------------------------------------------------------------------------- 1 | 2 |

3 |
4 | 部署指南 5 |
6 |

7 |
8 | 9 | [pip部署](#pip部署) | 10 | [docker部署](#docker部署) | 11 | [railway一键部署](#railway-一键部署) | 12 | [render一键部署](#render-一键部署) | 13 | [Vercel一键部署](#Vercel-一键部署) | 14 | [cloudflare部署](#cloudflare-部署) | 15 | 16 |
17 | 18 | ## pip部署 19 | 20 | **安装** 21 | 22 | ```bash 23 | pip install openai-forward 24 | ``` 25 | 26 | **运行服务** 27 | 28 | ```bash 29 | openai-forward run # 或者使用别名 aifd run 30 | ``` 31 | 服务就搭建完成了。 32 | 配置见[配置](README.md#配置选项) 33 | 34 | ### 服务调用 35 | 36 | 使用方式只需将`https://api.openai.com` 替换为服务所在端口`http://{ip}:{port}` 就可以了。 37 | 比如 38 | ```bash 39 | # 默认 40 | https://api.openai.com/v1/chat/completions 41 | #替换为 42 | http://{ip}:{port}/v1/chat/completions 43 | ``` 44 | 45 | 更多使用方式见 [应用](README.md#应用) 46 | 47 | ### 开启SSL (以https访问域名) 48 | 首先准备好一个域名, 如本项目中使用的域名为`api.openai-forward.com` 49 | 50 | 常用方式是使用nginx(不习惯用命令行配置的话可以考虑用 [Nginx Proxy Manager](https://github.com/NginxProxyManager/nginx-proxy-manager)) 代理转发 openai-forward 服务端口(默认8000)。 51 | 需要注意的是,若要使用流式转发,在nginx配置中需要添加取消代理缓存的配置: 52 | ```bash 53 | proxy_cache off; 54 | proxy_buffering off; 55 | ``` 56 | 57 | 然后就可以https进行流式访问了。 58 | 59 | 60 | ## Docker部署 61 | 62 | ```bash 63 | docker run -d -p 9999:8000 beidongjiedeguang/openai-forward:latest 64 | ``` 65 | 66 | 将映射宿主机的9999端口,通过`http://{ip}:9999`访问服务。 67 | 容器内日志路径为`/home/openai-forward/Log/`, 可以启动时将其映射出来。 68 | 69 | 注:同样可以在启动命令中通过-e传入环境变量OPENAI_API_KEY=sk-xxx作为默认api key 70 | 启用SSL同上. 71 | 环境变量配置见[环境变量配置](README.md#环境变量配置项) 72 | 73 | 74 | ## 源码部署 75 | 76 | ```bash 77 | git clone https://github.com/beidongjiedeguang/openai-forward.git --depth=1 78 | cd openai-forward 79 | 80 | pip install -e . 81 | openai-forward run # 或使用别名 aifd run 82 | ``` 83 | 启用SSL同上. 84 | 85 | 86 | --- 87 | 88 | ## Railway 一键部署 89 | [![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/tejCum?referralCode=U0-kXv) 90 | 91 | 1. 点击上面部署按钮进行一键部署 92 | 也可先fork本仓库,再手动在操作界面导入自己的fork项目 93 | 2. 填写环境变量,必填项`PORT` :`8000`, 可选项 如默认的OPENAI_API_KEY 等 94 | 3. 绑定自定义域名 95 | 96 | 注: Railway 每月提供 $5.0和500小时执行时间的免费计划。这意味着单个免费用户每个月只能使用大约21天 97 | 98 | > https://railway.openai-forward.com 99 | 100 | --- 101 | 102 | ## Render 一键部署 103 | [![Deploy to Render](https://render.com/images/deploy-to-render-button.svg)](https://render.com/deploy?repo=https://github.com/beidongjiedeguang/openai-forward) 104 | 105 | 体验下来,Render应该算是所有部署中最简易的一种, 并且它生成的域名国内可以直接访问! 106 | 107 | 1. 点击一键部署按钮 108 | 如果提示需要绑定卡,则可先fork本仓库 -->到Render的Dashboard上 New Web Services --> Connect 到刚刚fork到仓库 109 | 2. 填写环境变量,如默认的OPENAI_API_KEY 等,也可以不填 110 | 111 | 然后等待部署完成即可。 112 | Render的免费计划: 每月750小时免费实例时间(意味着单个实例可以不间断运行)、100G带宽流量、500分钟构建时长. 113 | 114 | 注:默认render在15分钟内没有服务请求时会自动休眠(好处是休眠后不会占用750h的免费实例时间),休眠后下一次请求会被阻塞 5~10s。 115 | 若不希望服务15分钟自动休眠,可以使用定时脚本(如每14分钟)去请求服务进行保活。 116 | 如果希望零停机部署可以在设置中设置`Health Check Path`为`/docs` 117 | > https://render.openai-forward.com 118 | > https://openai-forward.onrender.com 119 | 120 | 121 | --- 122 | 123 | ⚠️下面两种部署方式仅提供简单的转发服务,没有任何额外功能。 124 | 125 | 126 | ## Vercel 一键部署 127 | 128 | [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fbeidongjiedeguang%2Fopenai-forward&project-name=openai-forward&repository-name=openai-forward&framework=other) 129 | 因python的部署方式在vercel上存在诸多限制,因此现在将Vercel部署方式切换为直接转发。 130 | 131 | 1. 点击按钮即可一键免费部署 132 | 也可先fork本仓库,再手动在vercel操作界面import项目 133 | 2. [绑定自定义域名](https://vercel.com/docs/concepts/projects/domains/add-a-domain):Vercel 分配的DNS在某些区域被污染了导致国内无法访问,绑定自定义域名即可直连。 134 | 135 | 136 | > https://vercel.openai-forward.com 137 | 138 | --- 139 | 140 | ## Cloudflare 部署 141 | 142 | 部署方式二选一: 143 | * Pages部署: fork本仓库,在[cloudflare](https://dash.cloudflare.com/)上创建应用程序时选择Pages, 然后选择连接到Git, 选择刚刚fork的仓库即可完成部署。 144 | * Workers部署: 在[cloudflare](https://dash.cloudflare.com/)上创建应用程序时选择Workers, 部署好示例代码后,点击快速修改(quick edit)复制[_worker.js](_worker.js) 至代码编辑器即可完成服务部署。 145 | 146 | 绑定自定义域名: cloudflare自动分配的域名国内也无法访问,所以也需要绑定自定义域名. (目前Pages部署时自动分配的域名国内还可以访问) 147 | 148 | 绑定自定义域名需要将域名默认nameserver(域名服务器)绑定到cloudflare提供的nameserver,大体上过程是: 149 | ```mermaid 150 | stateDiagram-v2 151 | [*] --> 注册cloudflare 152 | [*] --> 在任意机构注册域名 153 | 注册cloudflare --> 添加worker/page 154 | 添加worker/page --> 在cloudflare的worker/page中添加域名 : worker/page应用部署成功 155 | 在任意机构注册域名 --> 去注册域名机构更改默认nameserver为cloudflare提供的nameserver 156 | 去注册域名机构更改默认nameserver为cloudflare提供的nameserver --> 在cloudflare的worker/page中添加域名: 域名服务器更改验证成功 157 | 在cloudflare的worker/page中添加域名 --> 成功 158 | ``` 159 | 这种部署方式轻便简洁,支持流式转发. 对于没有vps的用户还是十分推荐的。不过目前[_worker.js](_worker.js)这个简单脚本仅提供转发服务, 不提供额外功能。 160 | 161 | > https://cloudflare.worker.openai-forward.com 162 | > https://cloudflare.page.openai-forward.com 163 | > https://openai-forward-9ak.pages.dev (这是cloudflare pages自动分配的域名,目前可以直接访问) 164 | -------------------------------------------------------------------------------- /openai_forward/routers/schemas.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import Dict, List, Optional, Union, Any 4 | 5 | from pydantic import BaseModel, Field 6 | 7 | 8 | class OpenAIV1ChatCompletion(BaseModel): 9 | """Creates a completion for the chat message""" 10 | 11 | model: str = Field( 12 | ..., description="The model to use for the completion", example="gpt-3.5-turbo" 13 | ) 14 | messages: List[Dict[str, Any]] = Field( 15 | ..., 16 | description="The message to complete", 17 | example=[{"role": "user", "content": "hi"}], 18 | ) 19 | temperature: float = Field(default=1, description="0会导致更确定的结果,1会导致更随机的结果") 20 | top_p: float = Field(default=1, description="0会导致更确定的结果,1会导致更随机的结果") 21 | n: int = Field( 22 | default=1, 23 | description="How many chat completion choices to generate for each input message.", 24 | ) 25 | stream: bool = Field(default=False) 26 | stop: Union[List[str], str, None] = Field( 27 | default=None, 28 | description="Up to 4 sequences where the API will stop generating further tokens.", 29 | ) 30 | max_tokens: Union[int, None] = Field( 31 | default=None, 32 | description="The maximum number of tokens to generate in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length.", 33 | ) 34 | presence_penalty: float = Field( 35 | default=0, description="Number between -2.0 and 2.0. " 36 | ) 37 | frequency_penalty: float = Field( 38 | default=0, description="Number between -2.0 and 2.0. " 39 | ) 40 | logit_bias: Optional[Dict[str, float]] = Field( 41 | default=None, 42 | description="取值[-100, 100], 取值越大,生成的结果越偏向于该token。 取-100表示完全不考虑该token。", 43 | ) 44 | user: Optional[str] = Field( 45 | default=None, 46 | description="A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. ", 47 | ) 48 | 49 | class Config: 50 | schema_extra = { 51 | "example": { 52 | "model": "gpt-3.5-turbo", 53 | "messages": [{"role": "user", "content": "hi"}], 54 | "stream": False, 55 | "temperature": 1, 56 | "top_p": 1, 57 | "logit_bias": None, 58 | } 59 | } 60 | 61 | class AnthropicTextCompletion(BaseModel): 62 | """Creates a completion for the prompt""" 63 | 64 | model: str = Field( 65 | ..., description="The model to use for the completion", example="claude-v1" 66 | ) 67 | prompt: str = Field( 68 | ..., 69 | description="The prompt to complete", 70 | example="Hi! How are you?", 71 | ) 72 | temperature: float = Field(default=1, description="0会导致更确定的结果,1会导致更随机的结果") 73 | top_p: float = Field(default=1, description="0会导致更确定的结果,1会导致更随机的结果") 74 | top_k: float = Field(default=50, description="0会导致更确定的结果,1会导致更随机的结果") 75 | max_tokens_to_sample: Optional[int] = Field( 76 | default=None, 77 | description="max_tokens_to_sample.", 78 | ) 79 | stream: bool = Field(default=False) 80 | 81 | class AnthropicMessagesCompletion(BaseModel): 82 | """Creates a completion for the chat messages""" 83 | 84 | model: str = Field( 85 | ..., description="The model to use for the completion", example="claude-3-opus-20240229" 86 | ) 87 | messages: List[Dict[str, Any]] = Field( 88 | ..., 89 | description="The message to complete", 90 | example=[{"role": "user", "content": "hi"}], 91 | ) 92 | system: Optional[str] = Field( 93 | default=None, 94 | description="System prompt.", 95 | example="Respond only in Spanish.", 96 | ) 97 | temperature: float = Field(default=1, description="0会导致更确定的结果,1会导致更随机的结果") 98 | top_p: float = Field(default=1, description="0会导致更确定的结果,1会导致更随机的结果") 99 | top_k: float = Field(default=50, description="0会导致更确定的结果,1会导致更随机的结果") 100 | max_tokens: Optional[int] = Field( 101 | default=None, 102 | description="The maximum number of tokens to generate before stopping.", 103 | ) 104 | stream: bool = Field(default=False) 105 | 106 | class NAICompletion(BaseModel): 107 | """Creates a completion for the input""" 108 | 109 | model: str = Field( 110 | ..., 111 | description="The model to use for the completion", 112 | example="clio-v1" 113 | ) 114 | input: str = Field( 115 | ..., 116 | description="The input to complete", 117 | example="Human: Hi! How are you?\n\nAssistant:" 118 | ) 119 | parameters: Dict[str, Any] = Field( 120 | ..., 121 | description="Generation parameters to be sent to the model", 122 | example={"max_length": 150, "min_length": 1} 123 | ) -------------------------------------------------------------------------------- /.github/images/jetbrains.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 6 | 7 | 8 | 9 | 10 | 11 | 14 | 15 | 16 | 17 | 18 | 19 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 31 | 32 | 33 | 34 | 35 | 36 | 39 | 40 | 41 | 42 | 43 | 45 | 47 | 48 | 51 | 54 | 56 | 57 | 59 | 63 | 64 | 65 | 66 | 67 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | **中文** | ~~[**English**](./README_EN.md)~~ 2 | 3 |

4 |
5 | OpenAI Forward 6 |
7 |

8 |

9 | OpenAI API 接口转发服务
10 | The fastest way to deploy openai api forwarding
11 |

12 | 13 |

14 | PyPI version 15 | 16 | License 17 | 18 | 19 | Release (latest by date) 20 | 21 | 22 | GitHub repo size 23 | 24 | 25 | docer image size 26 | 27 | 28 | tests 29 | 30 | 31 | pypi downloads 32 | 33 | 34 | codecov 35 | 36 |

37 | 38 |
39 | 40 | [功能](#功能) | 41 | [部署指南](#部署指南) | 42 | [应用](#应用) | 43 | [配置选项](#配置选项) | 44 | [对话日志](#对话日志) 45 | 46 | [![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/tejCum?referralCode=U0-kXv) 47 | [![Deploy to Render](https://render.com/images/deploy-to-render-button.svg)](https://render.com/deploy?repo=https://github.com/beidongjiedeguang/openai-forward) 48 | 49 |
50 | 51 | 本项目用于解决一些地区无法直接访问OpenAI的问题,将该服务部署在可以正常访问OpenAI API的(云)服务器上, 52 | 通过该服务转发OpenAI的请求。即搭建反向代理服务; 允许输入多个OpenAI API-KEY 组成轮询池; 可自定义二次分发api key. 53 | 54 | --- 55 | 56 | 由本项目搭建的长期代理地址: 57 | > https://api.openai-forward.com 58 | > https://cloudflare.worker.openai-forward.com 59 | > https://cloudflare.page.openai-forward.com 60 | > https://vercel.openai-forward.com 61 | > https://render.openai-forward.com 62 | > https://railway.openai-forward.com 63 | 64 | ## 功能 65 | 66 | **基础功能** 67 | 68 | - [x] 支持转发OpenAI所有接口 69 | - [x] 支持流式响应 70 | - [x] 支持指定转发路由前缀 71 | - [x] docker部署 72 | - [x] pip 安装部署 73 | - [x] cloudflare 部署 74 | - [x] Vercel一键部署 75 | - [x] Railway 一键部署 76 | - [x] Render 一键部署 77 | 78 | **高级功能** 79 | 80 | - [x] 允许输入多个openai api key 组成轮询池 81 | - [x] 自定义 转发api key (见[高级配置](#高级配置)) 82 | - [x] 流式响应对话日志 83 | 84 | ## 部署指南 85 | 86 | 👉 [部署文档](deploy.md) 87 | 88 | 提供以下几种部署方式 89 | **有海外vps方案** 90 | 91 | 1. [pip 安装部署](deploy.md#pip部署) 92 | 2. [Docker部署](deploy.md#docker部署) 93 | > https://api.openai-forward.com 94 | 95 | **无vps免费部署方案** 96 | 97 | 1. [Railway部署](deploy.md#Railway-一键部署) 98 | > https://railway.openai-forward.com 99 | 2. [Render一键部署](deploy.md#render-一键部署) 100 | > https://render.openai-forward.com 101 | 102 | --- 103 | 下面的部署仅提供单一转发功能 104 | 105 | 3. [一键Vercel部署](deploy.md#vercel-一键部署) 106 | > https://vercel.openai-forward.com 107 | 4. [cloudflare部署](deploy.md#cloudflare-部署) 108 | > https://cloudflare.page.openai-forward.com 109 | 110 | ## 应用 111 | 112 | ### [聊天应用](https://chat.beidongjiedeguang.top) 113 | 114 | 基于开源项目[ChatGPT-Next-Web](https://github.com/Yidadaa/ChatGPT-Next-Web)搭建自己的chatgpt服务 115 | 替换docker启动命令中的 `BASE_URL`为我们自己搭建的代理服务地址 116 | 117 | 118 |
119 | details 120 | 121 | ```bash 122 | docker run -d \ 123 | -p 3000:3000 \ 124 | -e OPENAI_API_KEY="sk-******" \ 125 | -e BASE_URL="https://api.openai-forward.com" \ 126 | -e CODE="******" \ 127 | yidadaa/chatgpt-next-web 128 | ``` 129 | 130 |
131 | 132 | ### 在代码中使用 133 | 134 | **Python** 135 | 136 | ```diff 137 | import openai 138 | + openai.api_base = "https://api.openai-forward.com/v1" 139 | openai.api_key = "sk-******" 140 | ``` 141 | 142 |
143 | More Examples 144 | 145 | **JS/TS** 146 | 147 | ```diff 148 | import { Configuration } from "openai"; 149 | 150 | const configuration = new Configuration({ 151 | + basePath: "https://api.openai-forward.com/v1", 152 | apiKey: "sk-******", 153 | }); 154 | ``` 155 | 156 | **gpt-3.5-turbo** 157 | 158 | ```bash 159 | curl https://api.openai-forward.com/v1/chat/completions \ 160 | -H "Content-Type: application/json" \ 161 | -H "Authorization: Bearer sk-******" \ 162 | -d '{ 163 | "model": "gpt-3.5-turbo", 164 | "messages": [{"role": "user", "content": "Hello!"}] 165 | }' 166 | ``` 167 | 168 | **Image Generation (DALL-E)** 169 | 170 | ```bash 171 | curl --location 'https://api.openai-forward.com/v1/images/generations' \ 172 | --header 'Authorization: Bearer sk-******' \ 173 | --header 'Content-Type: application/json' \ 174 | --data '{ 175 | "prompt": "A photo of a cat", 176 | "n": 1, 177 | "size": "512x512" 178 | }' 179 | ``` 180 | 181 |
182 | 183 | ## 配置选项 184 | 185 | 配置的设置方式支持两种 186 | 一种为在命令行中执行`aifd run` 的运行参数(如`--port=8000`)中指定; 187 | 另一种为读取环境变量的方式指定。 188 | 189 | ### 命令行参数 190 | 191 | 可通过 `aifd run --help` 查看 192 | 193 |
194 | Click for more details 195 | 196 | **`aifd run`参数配置项** 197 | 198 | | 配置项 | 说明 | 默认值 | 199 | |-----------------|-------------------|:----------------------:| 200 | | --port | 服务端口号 | 8000 | 201 | | --workers | 工作进程数 | 1 | 202 | | --base_url | 同 OPENAI_BASE_URL | https://api.openai.com | 203 | | --api_key | 同 OPENAI_API_KEY | `None` | 204 | | --forward_key | 同 FORWARD_KEY | `None` | 205 | | --route_prefix | 同 ROUTE_PREFIX | `None` | 206 | | --log_chat | 同 LOG_CHAT | `False` | 207 | 208 |
209 | 210 | ### 环境变量配置项 211 | 212 | 支持从运行目录下的`.env`文件中读取 213 | 214 | | 环境变量 | 说明 | 默认值 | 215 | |-----------------|-----------------------------------------------------------------------------------------------------------------------------------|:------------------------:| 216 | | OPENAI_BASE_URL | 默认 openai官方 api 地址 | https://api.openai.com | 217 | | OPENAI_API_KEY | 默认openai api key,支持多个默认api key, 以 `sk-` 开头, 以空格分割 | 无 | 218 | | FORWARD_KEY | 允许调用方使用该key代替openai api key,支持多个forward key, 以空格分割; 如果设置了OPENAI_API_KEY,而没有设置FORWARD_KEY, 则客户端调用时无需提供密钥, 此时出于安全考虑不建议FORWARD_KEY置空 | 无 | 219 | | ROUTE_PREFIX | 路由前缀 | 无 | 220 | | LOG_CHAT | 是否记录聊天内容 | `false` | 221 | 222 | ## 高级配置 223 | 224 | **设置openai api_key为自定义的forward key** 225 | 需要配置 OPENAI_API_KEY 和 FORWARD_KEY, 例如 226 |
227 | Click for more details 228 | 229 | ```bash 230 | OPENAI_API_KEY=sk-******* 231 | FORWARD_KEY=fk-****** # 这里fk-token由我们自己定义 232 | ``` 233 | 234 | 这里我们配置了FORWARD_KEY为`fk-******`, 那么后面客户端在调用时只需设置OPENAI_API_KEY为我们自定义的`fk-******` 即可。 235 | 这样的好处是在使用一些需要输入OPENAI_API_KEY的第三方应用时,我们可以使用自定义的api-key`fk-******`, 236 | 无需担心真正的OPENAI_API_KEY被泄露。并且可以对外分发`fk-******`。 237 | 238 | **用例:** 239 | 240 | ```bash 241 | curl https://api.openai-forward.com/v1/chat/completions \ 242 | -H "Content-Type: application/json" \ 243 | -H "Authorization: Bearer fk-******" \ 244 | -d '{ 245 | "model": "gpt-3.5-turbo", 246 | "messages": [{"role": "user", "content": "Hello!"}] 247 | }' 248 | ``` 249 | 250 | **Python** 251 | 252 | ```diff 253 | import openai 254 | + openai.api_base = "https://api.openai-forward.com/v1" 255 | - openai.api_key = "sk-******" 256 | + openai.api_key = "fk-******" 257 | ``` 258 | 259 | **Web application** 260 | 261 | ```bash 262 | docker run -d \ 263 | -p 3000:3000 \ 264 | -e OPENAI_API_KEY="fk-******" \ 265 | -e BASE_URL="https://api.openai-forward.com" \ 266 | -e CODE="" \ 267 | yidadaa/chatgpt-next-web 268 | ``` 269 | 270 |
271 | 272 | ## 对话日志 273 | 274 | 默认不记录对话日志,若要开启需设置环境变量`LOG_CHAT=true` 275 |
276 | Click for more details 277 | 278 | 保存路径在当前目录下的`Log/chat`路径中。 279 | 记录格式为 280 | 281 | ```text 282 | {'messages': [{'user': 'hi'}], 'model': 'gpt-3.5-turbo', 'forwarded-for': '', 'uid': '467a17ec-bf39-4b65-9ebd-e722b3bdd5c3', 'datetime': '2023-07-18 14:01:21'} 283 | {'assistant': 'Hello there! How can I assist you today?', 'uid': '467a17ec-bf39-4b65-9ebd-e722b3bdd5c3'} 284 | {'messages': [{'user': 'Hello!'}], 'model': 'gpt-3.5-turbo', 'forwarded-for': '', 'uid': 'f844d156-e747-4887-aef8-e40d977b5ee7', 'datetime': '2023-07-18 14:01:23'} 285 | {'assistant': 'Hi there! How can I assist you today?', 'uid': 'f844d156-e747-4887-aef8-e40d977b5ee7'} 286 | ``` 287 | 288 | 转换为`json`格式: 289 | 290 | ```bash 291 | aifd convert 292 | ``` 293 | 294 | 得到`chat.json`: 295 | 296 | ```json 297 | [ 298 | { 299 | "datetime": "2023-07-18 14:01:21", 300 | "forwarded-for": "", 301 | "model": "gpt-3.5-turbo", 302 | "messages": [ 303 | { 304 | "user": "hi" 305 | } 306 | ], 307 | "assistant": "Hello there! How can I assist you today?" 308 | }, 309 | { 310 | "datetime": "2023-07-18 14:01:23", 311 | "forwarded-for": "", 312 | "model": "gpt-3.5-turbo", 313 | "messages": [ 314 | { 315 | "user": "Hello!" 316 | } 317 | ], 318 | "assistant": "Hi there! How can I assist you today?" 319 | } 320 | ] 321 | ``` 322 | 323 |
324 | 325 | ## Backer and Sponsor 326 | 327 | 328 | 329 | 330 | 331 | ## License 332 | 333 | OpenAI-Forward is licensed under the [MIT](https://opensource.org/license/mit/) license. 334 | --------------------------------------------------------------------------------