├── tests ├── .gitignore ├── conftest.py ├── utils.py ├── test_http.py ├── test_env.py └── test_api.py ├── .vercelignore ├── openai_forward ├── content │ ├── __init__.py │ ├── whisper.py │ └── chat.py ├── routers │ ├── __init__.py │ ├── v1.py │ └── schemas.py ├── __init__.py ├── app.py ├── openai.py ├── __main__.py ├── anthropic.py ├── config.py ├── base.py ├── tool.py └── nai.py ├── render.yaml ├── .github ├── data │ └── whisper.m4a ├── workflows │ ├── python-publish.yml │ ├── gh-release.yml │ ├── docker-publish.yml │ └── ci.yml └── images │ └── jetbrains.svg ├── requirements.txt ├── .env ├── .dockerignore ├── Examples ├── embedding.py ├── chat.py └── whisper.py ├── vercel.json ├── pytest.ini ├── .pre-commit-config.yaml ├── .env.example ├── docker-compose.yaml ├── _worker.js ├── scripts └── black.sh ├── Dockerfile ├── LICENSE ├── Makefile ├── pyproject.toml ├── CODE_OF_CONDUCT.md ├── .gitignore ├── CONTRIBUTING.md ├── deploy.md └── README.md /tests/.gitignore: -------------------------------------------------------------------------------- 1 | api_test.py -------------------------------------------------------------------------------- /.vercelignore: -------------------------------------------------------------------------------- 1 | **/* 2 | !vercel.json -------------------------------------------------------------------------------- /openai_forward/content/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /openai_forward/routers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /render.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | - name: openai-forward 3 | type: web 4 | env: docker -------------------------------------------------------------------------------- /.github/data/whisper.m4a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CharHubAI/proxy/HEAD/.github/data/whisper.m4a -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | sys.path.append(os.path.join(os.path.dirname(__file__), "..")) 5 | -------------------------------------------------------------------------------- /openai_forward/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.3.0-alpha" 2 | 3 | from dotenv import load_dotenv 4 | 5 | load_dotenv() 6 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | loguru 3 | sparrow-python>=0.1.3 4 | fastapi>=0.94.0 5 | uvicorn>=0.21.0 6 | orjson 7 | python-dotenv 8 | httpx 9 | pytz -------------------------------------------------------------------------------- /.env: -------------------------------------------------------------------------------- 1 | LOG_CHAT=false 2 | OPENAI_BASE_URL=https://api.openai.com 3 | OPENAI_API_KEY= 4 | FORWARD_KEY= 5 | ROUTE_PREFIX= 6 | 7 | # 设定时区 8 | TZ=Asia/Shanghai -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | **/* 2 | !openai_forward 3 | !pyproject.toml 4 | !README.md 5 | !requirements.txt 6 | !cert.pem 7 | !fullchain.pem 8 | !chain.pem 9 | !privkey.pem -------------------------------------------------------------------------------- /Examples/embedding.py: -------------------------------------------------------------------------------- 1 | import openai 2 | 3 | openai.api_base = "http://localhost:8000/v1" 4 | openai.api_key = "sk-******" 5 | response = openai.Embedding.create( 6 | input="Your text string goes here", model="text-embedding-ada-002" 7 | ) 8 | embeddings = response['data'][0]['embedding'] 9 | print(embeddings) 10 | -------------------------------------------------------------------------------- /openai_forward/content/whisper.py: -------------------------------------------------------------------------------- 1 | from loguru import logger 2 | 3 | 4 | class WhisperSaver: 5 | def __init__(self): 6 | self.logger = logger.bind(whisper=True) 7 | 8 | def add_log(self, bytes_: bytes): 9 | text_content = bytes_.decode("utf-8") 10 | self.logger.debug(text_content) 11 | -------------------------------------------------------------------------------- /vercel.json: -------------------------------------------------------------------------------- 1 | { 2 | "rewrites": [ 3 | { "source": "/", "destination": "https://api.openai.com" }, 4 | { 5 | "source": "/:match*", 6 | "destination": "https://api.openai.com/:match*" 7 | } 8 | ], 9 | "github": { 10 | "silent": true 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | faulthandler_timeout=180 3 | markers = 4 | slow: marks tests as slow (deselect with '-m "not slow"') 5 | timeout: marks test timeout duration 6 | repeat: marks that test run n times 7 | addopts = --doctest-modules --doctest-glob=README.md --doctest-glob=*.py --ignore=setup.py 8 | norecursedirs = Examples 9 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/timothycrosley/isort 3 | rev: 5.12.0 4 | hooks: 5 | - id: isort 6 | args: ["--profile", "black"] 7 | - repo: https://github.com/psf/black 8 | rev: 22.3.0 9 | hooks: 10 | - id: black 11 | types: [python] 12 | exclude: docs/ 13 | args: 14 | - -S -------------------------------------------------------------------------------- /Examples/chat.py: -------------------------------------------------------------------------------- 1 | import openai 2 | 3 | openai.api_base = "https://api.openai-forward.com/v1" 4 | openai.api_key = "sk-******" 5 | 6 | resp = openai.ChatCompletion.create( 7 | model="gpt-3.5-turbo", 8 | messages=[ 9 | {"role": "user", "content": "Who won the world series in 2020?"}, 10 | ], 11 | ) 12 | print(resp.choices) 13 | -------------------------------------------------------------------------------- /openai_forward/app.py: -------------------------------------------------------------------------------- 1 | from sparrow.api import create_app 2 | 3 | from .anthropic import Anthropic 4 | from .openai import Openai 5 | from .routers.v1 import router as router_v1 6 | 7 | app = create_app(title="openai_forward", version="1.0") 8 | app.openapi_version = "3.0.0" 9 | 10 | openai = Openai() 11 | anthropic = Anthropic() 12 | 13 | app.include_router(router_v1) 14 | -------------------------------------------------------------------------------- /Examples/whisper.py: -------------------------------------------------------------------------------- 1 | # Note: you need to be using OpenAI Python v0.27.0 for the code below to work 2 | import openai 3 | from sparrow import relp 4 | 5 | openai.api_base = "https://api.openai-forward.com/v1" 6 | openai.api_key = "sk-******" 7 | 8 | audio_file = open(relp("../.github/data/whisper.m4a"), "rb") 9 | transcript = openai.Audio.transcribe("whisper-1", audio_file) 10 | print(transcript) 11 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # LOG_CHAT: 是否Log 对话记录 2 | LOG_CHAT=True 3 | 4 | OPENAI_BASE_URL=https://api.openai.com 5 | 6 | # OPENAI_API_KEY:允许输入多个api key 形成轮询池 7 | OPENAI_API_KEY=sk-xxx1 sk-xxx2 sk-xxx3 8 | 9 | # FORWARD_KEY: 当前面的OPENAI_API_KEY被设置,就可以设置这里的FORWARD_KEY,客户端调用时可以使用FORWARD_KEY作为api key 10 | FORWARD_KEY=fk-xxx1 11 | 12 | # ROUTE_PREFIX: 可指定整个转发服务的根路由前缀 13 | ROUTE_PREFIX= 14 | 15 | # 设定时区 16 | TZ=NewYork/America -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | openai_forward: 3 | image: beidongjiedeguang/openai-forward:latest 4 | container_name: openai-forward-container 5 | env_file: 6 | .env 7 | ports: 8 | - "8000:8000" 9 | volumes: 10 | - ./Log-caloi-top:/home/openai-forward/Log 11 | - ./openai_forward:/home/openai-forward/openai_forward 12 | command: 13 | - --port=8000 14 | - --workers=1 -------------------------------------------------------------------------------- /_worker.js: -------------------------------------------------------------------------------- 1 | export default { 2 | async fetch(request, env) { 3 | try { 4 | const url = new URL(request.url); 5 | url.hostname = "api.openai.com"; 6 | return await fetch( 7 | new Request(url, {method: request.method, headers: request.headers, body: request.body}) 8 | ); 9 | } catch (e) { 10 | return new Response(e.stack, {status: 500}); 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /scripts/black.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | pip install black==22.3.0 3 | arrVar=() 4 | echo we ignore non-*.py files 5 | excluded_files=( 6 | ) 7 | for changed_file in $CHANGED_FILES; do 8 | if [[ ${changed_file} == *.py ]] && ! [[ " ${excluded_files[@]} " =~ " ${changed_file} " ]]; then 9 | echo checking ${changed_file} 10 | arrVar+=(${changed_file}) 11 | fi 12 | done 13 | if (( ${#arrVar[@]} )); then 14 | black -S --check "${arrVar[@]}" 15 | fi 16 | echo "no files left to check" 17 | exit 0 -------------------------------------------------------------------------------- /openai_forward/openai.py: -------------------------------------------------------------------------------- 1 | from fastapi import Request 2 | 3 | from .base import OpenaiBase 4 | 5 | 6 | class Openai(OpenaiBase): 7 | def __init__(self): 8 | if self.IP_BLACKLIST or self.IP_WHITELIST: 9 | self.validate_host = True 10 | else: 11 | self.validate_host = False 12 | 13 | async def reverse_proxy(self, request: Request): 14 | if self.validate_host: 15 | self.validate_request_host(request.client.host) 16 | return await self._reverse_proxy(request) 17 | -------------------------------------------------------------------------------- /tests/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | 4 | from sparrow import ls 5 | 6 | 7 | def rm(*file_pattern: str, rel=False): 8 | """Remove files or directories. 9 | Example: 10 | -------- 11 | >>> rm("*.jpg", "*.png") 12 | >>> rm("*.jpg", "*.png", rel=True) 13 | """ 14 | path_list = ls(".", *file_pattern, relp=rel, concat="extend") 15 | for file in path_list: 16 | if os.path.isfile(file): 17 | print("remove ", file) 18 | os.remove(file) 19 | # os.system("rm -f " + file) 20 | elif os.path.isdir(file): 21 | shutil.rmtree(file, ignore_errors=True) 22 | print("rm tree ", file) 23 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.10-alpine 2 | LABEL maintainer="kunyuan" 3 | ENV LC_ALL=C.UTF-8 4 | ENV LANG=C.UTF-8 5 | ENV TZ=America/New_York 6 | RUN apk update && \ 7 | apk add tzdata --no-cache && \ 8 | cp /usr/share/zoneinfo/America/New_York /etc/localtime && \ 9 | apk del tzdata && \ 10 | mkdir -p /usr/share/zoneinfo/America/ && \ 11 | ln -s /etc/localtime /usr/share/zoneinfo/America/New_York 12 | 13 | COPY requirements.txt requirements.txt 14 | RUN pip install --no-cache-dir -r requirements.txt 15 | 16 | COPY . /home/openai-forward 17 | WORKDIR /home/openai-forward 18 | 19 | ENV ssl_keyfile="/home/openai-forward/privkey.pem" 20 | ENV ssl_certfile="/home/openai-forward/fullchain.pem" 21 | EXPOSE 8000 22 | ENTRYPOINT ["python", "-m", "openai_forward.__main__", "run"] 23 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | name: Upload Python Package 2 | 3 | on: 4 | workflow_dispatch: 5 | # release: 6 | # types: [published] 7 | push: 8 | tags: 9 | - 'v*' 10 | 11 | 12 | jobs: 13 | deploy: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/checkout@v3 17 | - name: Set up Python 18 | uses: actions/setup-python@v4 19 | with: 20 | python-version: '3.10' 21 | - name: Install dependencies 22 | run: | 23 | python -m pip install --upgrade pip 24 | pip install setuptools wheel twine build hatch 25 | - name: Build and publish 26 | env: 27 | TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} 28 | TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} 29 | run: | 30 | python -m build 31 | twine upload dist/* -------------------------------------------------------------------------------- /tests/test_http.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import time 3 | 4 | import httpx 5 | from sparrow.multiprocess import kill 6 | from utils import rm 7 | 8 | 9 | class TestRun: 10 | @classmethod 11 | def setup_class(cls): 12 | kill(8000) 13 | base_url = "https://api.openai-forward.com" 14 | subprocess.Popen(["nohup", "openai-forward", "run", "--base_url", base_url]) 15 | time.sleep(3) 16 | 17 | @classmethod 18 | def teardown_class(cls): 19 | kill(8000) 20 | rm("nohup.out") 21 | 22 | def test_get_doc(self): 23 | resp = httpx.get("http://localhost:8000/docs") 24 | assert resp.is_success 25 | 26 | def test_get_chat_completions(self): 27 | resp = httpx.get("http://localhost:8000/v1/chat/completions") 28 | assert resp.status_code == 401 29 | -------------------------------------------------------------------------------- /.github/workflows/gh-release.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | # Sequence of patterns matched against refs/tags 4 | tags: 5 | - 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10 6 | 7 | #on: 8 | # push: 9 | # branches-ignore: 10 | # - '**' # temporally disable this action 11 | 12 | 13 | name: Create Release 14 | 15 | jobs: 16 | build: 17 | name: Create Release 18 | runs-on: ubuntu-latest 19 | steps: 20 | - name: Checkout code 21 | uses: actions/checkout@v3 22 | - name: Create Release 23 | id: create_release 24 | uses: actions/create-release@v1 25 | env: 26 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # This token is provided by Actions, you do not need to create your own token 27 | with: 28 | tag_name: ${{ github.ref }} 29 | release_name: Release ${{ github.ref }} 30 | body: | 31 | See ChangeLog 32 | draft: false 33 | prerelease: false 34 | -------------------------------------------------------------------------------- /tests/test_env.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | import time 4 | 5 | import pytest 6 | from dotenv import load_dotenv 7 | 8 | import openai_forward 9 | 10 | 11 | class TestEnv: 12 | with open(".env", "r", encoding="utf-8") as f: 13 | defualt_env = f.read() 14 | 15 | @classmethod 16 | def setup_class(cls): 17 | env = """\ 18 | LOG_CHAT=true 19 | OPENAI_BASE_URL=https://api.openai.com 20 | OPENAI_API_KEY=key1 key2 21 | FORWARD_KEY=ps1 ps2 ps3 22 | ROUTE_PREFIX= 23 | IP_WHITELIST= 24 | IP_BLACKLIST= 25 | """ 26 | with open(".env", "w", encoding="utf-8") as f: 27 | f.write(env) 28 | time.sleep(0.1) 29 | 30 | load_dotenv(override=True) 31 | importlib.reload(openai_forward.base) 32 | cls.aibase = openai_forward.base.OpenaiBase() 33 | 34 | @classmethod 35 | def teardown_class(cls): 36 | with open(".env", "w", encoding="utf-8") as f: 37 | f.write(cls.defualt_env) 38 | 39 | def test_env1(self): 40 | assert self.aibase._openai_api_key_list == ["key1", "key2"] 41 | assert self.aibase._no_auth_mode is False 42 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 kunyuan 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: start build push run down test twine log pull 2 | 3 | image := "beidongjiedeguang/openai-forward:latest" 4 | container := "openai-forward-container" 5 | compose_path := "docker-compose.yaml" 6 | 7 | start: 8 | docker run -d \ 9 | --name $(container) \ 10 | --env-file .env \ 11 | -p 27001:8000 \ 12 | -v $(shell pwd)/Log:/home/openai-forward/Log \ 13 | -v $(shell pwd)/openai_forward:/home/openai-forward/openai_forward \ 14 | $(image) 15 | 16 | 17 | exec: 18 | docker exec -it $(container) bash 19 | 20 | log: 21 | docker logs -f $(container) 22 | 23 | rm: 24 | docker rm -f $(container) 25 | 26 | up: 27 | @docker-compose -f $(compose_path) up 28 | 29 | down: 30 | @docker-compose -f $(compose_path) down 31 | 32 | run: 33 | @docker-compose -f $(compose_path) run -it -p 8000:8000 openai_forward bash 34 | 35 | test: 36 | pytest -v tests 37 | 38 | twine: 39 | @twine upload dist/* 40 | @rm -rf dist/* 41 | 42 | build: 43 | docker build --tag $(image) . 44 | 45 | build-push: 46 | docker buildx build --push --platform linux/arm64/v8,linux/amd64 --tag $(image) . 47 | 48 | pull: 49 | docker pull $(image) 50 | 51 | deploy: 52 | vercel --prod -------------------------------------------------------------------------------- /.github/workflows/docker-publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish Docker image 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | tags: 7 | - 'v*' 8 | 9 | jobs: 10 | push_to_registry: 11 | name: Push Docker image to Docker Hub 12 | runs-on: ubuntu-latest 13 | steps: 14 | - 15 | name: Check out the repo 16 | uses: actions/checkout@v3 17 | - 18 | name: Log in to Docker Hub 19 | uses: docker/login-action@v2 20 | with: 21 | username: ${{ secrets.DOCKER_USERNAME }} 22 | password: ${{ secrets.DOCKER_PASSWORD }} 23 | 24 | - 25 | name: Extract metadata (tags, labels) for Docker 26 | id: meta 27 | uses: docker/metadata-action@v4 28 | with: 29 | images: beidongjiedeguang/openai-forward 30 | tags: | 31 | type=raw,value=latest 32 | type=ref,event=tag 33 | 34 | - 35 | name: Set up QEMU 36 | uses: docker/setup-qemu-action@v2 37 | 38 | - 39 | name: Set up Docker Buildx 40 | uses: docker/setup-buildx-action@v2 41 | 42 | - 43 | name: Build and push Docker image 44 | uses: docker/build-push-action@v4 45 | with: 46 | context: . 47 | file: ./Dockerfile 48 | platforms: linux/amd64,linux/arm64 49 | push: true 50 | tags: ${{ steps.meta.outputs.tags }} 51 | labels: ${{ steps.meta.outputs.labels }} 52 | cache-from: type=gha 53 | cache-to: type=gha,mode=max 54 | -------------------------------------------------------------------------------- /openai_forward/routers/v1.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, Request 2 | 3 | from ..nai import NovelAI 4 | from ..anthropic import Anthropic 5 | from ..openai import Openai 6 | from .schemas import OpenAIV1ChatCompletion, AnthropicTextCompletion, AnthropicMessagesCompletion, NAICompletion 7 | 8 | openai = Openai() 9 | anthropic = Anthropic() 10 | novelai = NovelAI() 11 | router = APIRouter(prefix=openai.ROUTE_PREFIX, tags=["v1"]) 12 | 13 | 14 | @router.post("/v1/chat/completions") 15 | async def chat_completions(params: OpenAIV1ChatCompletion, request: Request): 16 | """该接口只是为了document, 将此路由接口放在openai.reverse_proxy接口后面, 实际不会执行该接口。""" 17 | return await openai.reverse_proxy(request) 18 | 19 | @router.get("/v1/models") 20 | async def models_list(request: Request): 21 | """该接口只是为了document, 将此路由接口放在openai.reverse_proxy接口后面, 实际不会执行该接口。""" 22 | return await openai.reverse_proxy(request) 23 | 24 | @router.post("/v1/complete") 25 | async def anthropic_text_completions(params: AnthropicTextCompletion, request: Request): 26 | return await anthropic.reverse_proxy(request) 27 | 28 | @router.post("/v1/messages") 29 | async def anthropic_messages_completions(params: AnthropicMessagesCompletion, request: Request): 30 | return await anthropic.reverse_proxy(request) 31 | 32 | @router.get("/user/subscription") 33 | async def subscription_status(request: Request): 34 | return await novelai.reverse_proxy(request) 35 | 36 | # NovelAI has separate URLs for streaming and non-streaming 37 | @router.post("/ai/generate") 38 | @router.post("/ai/generate-stream") 39 | async def novelai_completions(params: NAICompletion, request: Request): 40 | return await novelai.reverse_proxy(request) -------------------------------------------------------------------------------- /tests/test_api.py: -------------------------------------------------------------------------------- 1 | from itertools import cycle 2 | 3 | import pytest 4 | from fastapi import HTTPException 5 | 6 | from openai_forward.openai import OpenaiBase 7 | 8 | 9 | @pytest.fixture(scope="module") 10 | def openai() -> OpenaiBase: 11 | return OpenaiBase() 12 | 13 | 14 | class TestOpenai: 15 | @staticmethod 16 | def teardown_method(): 17 | OpenaiBase.IP_BLACKLIST = [] 18 | OpenaiBase.IP_WHITELIST = [] 19 | OpenaiBase._default_api_key_list = [] 20 | 21 | def test_env(self, openai: OpenaiBase): 22 | assert openai.BASE_URL == "https://api.openai.com" 23 | 24 | def test_api_keys(self, openai: OpenaiBase): 25 | assert openai._default_api_key_list == [] 26 | openai._default_api_key_list = ["a", "b"] 27 | openai._cycle_api_key = cycle(openai._default_api_key_list) 28 | assert next(openai._cycle_api_key) == "a" 29 | assert next(openai._cycle_api_key) == "b" 30 | assert next(openai._cycle_api_key) == "a" 31 | assert next(openai._cycle_api_key) == "b" 32 | assert next(openai._cycle_api_key) == "a" 33 | 34 | def test_validate_ip(self, openai: OpenaiBase): 35 | ip1 = "1.1.1.1" 36 | ip2 = "2.2.2.2" 37 | assert openai.validate_request_host("*") is None 38 | openai.IP_WHITELIST.append(ip1) 39 | assert openai.validate_request_host(ip1) is None 40 | with pytest.raises(HTTPException): 41 | openai.validate_request_host(ip2) 42 | openai.IP_WHITELIST = [] 43 | openai.IP_BLACKLIST.append(ip1) 44 | assert openai.validate_request_host(ip2) is None 45 | with pytest.raises(HTTPException): 46 | openai.validate_request_host(ip1) 47 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["hatchling"] 3 | build-backend = "hatchling.build" 4 | 5 | [project] 6 | name = "openai_forward" 7 | description = "🚀 OpenAI API Reverse Proxy · ChatGPT API Proxy" 8 | authors = [ 9 | { name = "kunyuan", email = "beidongjiedeguang@gmail.com" }, 10 | ] 11 | license = "MIT" 12 | requires-python = ">=3.6" 13 | readme = "README.md" 14 | keywords = ["openai", "chatgpt", "openai-api", "openai-proxy", "OpenAI API Forwarding", "streaming-api", "fastapi", "python"] 15 | classifiers = [ 16 | "Development Status :: 5 - Production/Stable", 17 | "Operating System :: OS Independent", 18 | "Programming Language :: Python :: 3" 19 | ] 20 | 21 | dependencies = [ 22 | "loguru>=0.7.0", 23 | "sparrow-python>=0.1.5", 24 | "fastapi>=0.90.0", 25 | "uvicorn>=0.23.1", 26 | "orjson>=3.9.2", 27 | "python-dotenv", 28 | "httpx>=0.24.1", 29 | "pytz", 30 | ] 31 | 32 | dynamic = ["version"] 33 | 34 | [project.urls] 35 | Homepage = "https://github.com/beidongjiedeguang/openai-forward" 36 | Documentation = "https://github.com/beidongjiedeguang/openai-forward#openai-forward" 37 | Issues = "https://github.com/beidongjiedeguang/openai-forward/issues" 38 | Source = "https://github.com/beidongjiedeguang/openai-forward" 39 | 40 | [project.optional-dependencies] 41 | test = [ 42 | "openai>=0.27.8", 43 | "pytest", 44 | ] 45 | 46 | [project.scripts] 47 | openai_forward = "openai_forward.__main__:main" 48 | openai-forward = "openai_forward.__main__:main" 49 | aifd = "openai_forward.__main__:main" 50 | 51 | [tool.hatch.version] 52 | path = "openai_forward/__init__.py" 53 | 54 | [tool.isort] 55 | profile = "black" 56 | 57 | [tool.hatch.build] 58 | include = [ 59 | "openai_forward/**/*.py", 60 | ] 61 | exclude = [ 62 | ] 63 | 64 | artifacts = [ 65 | "openai_forward/web/index.js", 66 | ] 67 | 68 | 69 | [tool.hatch.build.targets.wheel] 70 | packages = ["openai_forward"] -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct for openai-forward 2 | 3 | ## 1. Purpose 4 | 5 | The purpose of this Code of Conduct is to provide guidelines for contributors to the openai-forward project on GitHub. We aim to create a positive and inclusive environment where all participants can contribute and collaborate effectively. By participating in this project, you agree to abide by this Code of Conduct. 6 | 7 | ## 2. Scope 8 | 9 | This Code of Conduct applies to all contributors, maintainers, and users of the openai-forward project. It extends to all project spaces, including but not limited to issues, pull requests, code reviews, comments, and other forms of communication within the project. 10 | 11 | ## 3. Our Standards 12 | 13 | We encourage the following behavior: 14 | 15 | * Being respectful and considerate to others 16 | * Actively seeking diverse perspectives 17 | * Providing constructive feedback and assistance 18 | * Demonstrating empathy and understanding 19 | 20 | We discourage the following behavior: 21 | 22 | * Harassment or discrimination of any kind 23 | * Disrespectful, offensive, or inappropriate language or content 24 | * Personal attacks or insults 25 | * Unwarranted criticism or negativity 26 | 27 | ## 4. Reporting and Enforcement 28 | 29 | If you witness or experience any violations of this Code of Conduct, please report them to the project maintainers by email or other appropriate means. The maintainers will investigate and take appropriate action, which may include warnings, temporary or permanent bans, or other measures as necessary. 30 | 31 | Maintainers are responsible for ensuring compliance with this Code of Conduct and may take action to address any violations. 32 | 33 | ## 5. Acknowledgements 34 | 35 | This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org/version/2/0/code_of_conduct.html). 36 | 37 | ## 6. Contact 38 | 39 | If you have any questions or concerns, please contact the project maintainers. 40 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push : 5 | branches: 6 | - main 7 | pull_request: 8 | paths-ignore: 9 | - 'docs/**' 10 | - '*.md' 11 | branches: 12 | - main 13 | 14 | jobs: 15 | check-black: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - uses: actions/checkout@v3 19 | with: 20 | fetch-depth: 0 21 | - name: Set up Python 3.10 22 | uses: actions/setup-python@v4 23 | with: 24 | python-version: "3.10" 25 | - id: file_changes 26 | uses: Ana06/get-changed-files@v1.2 27 | - name: check black 28 | env: 29 | CHANGED_FILES: ${{ steps.file_changes.outputs.added_modified }} 30 | run: bash ./scripts/black.sh 31 | 32 | lint-flake-8: 33 | runs-on: ubuntu-latest 34 | steps: 35 | - uses: actions/checkout@v3 36 | - name: Set up Python 3.10 37 | uses: actions/setup-python@v4 38 | with: 39 | python-version: "3.10" 40 | - name: Lint with flake8 41 | run: | 42 | pip install flake8 43 | # stop the build if there are Python syntax errors or undefined names 44 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics --exclude .git,__pycache__,docs/source/conf.py,old,build,dist,tests/ 45 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 46 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics --exclude .git,__pycache__,docs/source/conf.py,old,build,dist,tests/ 47 | 48 | unit-tests: 49 | runs-on: ubuntu-latest 50 | strategy: 51 | matrix: 52 | python-version: ["3.10"] 53 | steps: 54 | - uses: actions/checkout@v3 55 | - name: Set up Python ${{ matrix.python-version }} 56 | uses: actions/setup-python@v3 57 | with: 58 | python-version: ${{ matrix.python-version }} 59 | - name: Install dependencies 60 | run: | 61 | python -m pip install --upgrade pip 62 | python -m pip install pytest psutil 63 | python -m pip install . 64 | # python -m pip install codecov pytest-cov 65 | - name: Run tests 66 | run: | 67 | pytest -v -s 68 | -------------------------------------------------------------------------------- /openai_forward/__main__.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import fire 4 | import uvicorn 5 | 6 | 7 | class Cli: 8 | @staticmethod 9 | def run( 10 | workers=20, 11 | api_key=None, 12 | forward_key=None, 13 | base_url=None, 14 | log_chat=False, 15 | route_prefix=None, 16 | ip_whitelist=None, 17 | ip_blacklist=None, 18 | ): 19 | """Run forwarding serve. 20 | 21 | Parameters 22 | ---------- 23 | 24 | port: int, default 8000 25 | workers: int, 1 26 | api_key: str, None 27 | forward_key: str, None 28 | base_url: str, None 29 | log_chat: str, None 30 | route_prefix: str, None 31 | ip_whitelist: str, None 32 | ip_blacklist: str, None 33 | """ 34 | if base_url: 35 | os.environ["OPENAI_BASE_URL"] = base_url 36 | if api_key: 37 | os.environ["OPENAI_API_KEY"] = api_key 38 | if forward_key: 39 | os.environ["FORWARD_KEY"] = forward_key 40 | if log_chat: 41 | os.environ["LOG_CHAT"] = log_chat 42 | if route_prefix: 43 | os.environ["ROUTE_PREFIX"] = route_prefix 44 | if ip_whitelist: 45 | os.environ["IP_WHITELIST"] = ip_whitelist 46 | if ip_blacklist: 47 | os.environ["IP_BLACKLIST"] = ip_blacklist 48 | 49 | ssl_keyfile = os.environ.get("ssl_keyfile", 'privkey.pem') or None 50 | ssl_certfile = os.environ.get("ssl_certfile", 'fullchain.pem') or None 51 | port = int(os.environ.get("port", 443)) or 443 52 | uvicorn.run( 53 | app="openai_forward.app:app", 54 | host="0.0.0.0", 55 | port=port, 56 | workers=workers, 57 | app_dir="..", 58 | ssl_keyfile=ssl_keyfile, 59 | ssl_certfile=ssl_certfile, 60 | ) 61 | 62 | @staticmethod 63 | def convert(log_folder: str = "./Log/chat", target_path: str = "./Log/chat.json"): 64 | """Convert log folder to jsonl file""" 65 | from openai_forward.tool import convert_folder_to_jsonl 66 | 67 | print(f"Convert {log_folder}/*.log to {target_path}") 68 | convert_folder_to_jsonl(log_folder, target_path) 69 | 70 | 71 | def main(): 72 | fire.Fire(Cli) 73 | 74 | 75 | if __name__ == "__main__": 76 | main() 77 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .github/release-template.ejs 2 | scripts/release.sh 3 | node_modules 4 | package-lock.json 5 | package.json 6 | .idea/ 7 | .vscode/ 8 | .DS_Store 9 | third-party/ 10 | run.sh 11 | ssl/ 12 | chat.yaml 13 | chat_*.yaml 14 | *.pem 15 | Log/ 16 | Log-caloi-top/ 17 | dist/ 18 | # Byte-compiled / optimized / DLL files 19 | __pycache__/ 20 | *.py[cod] 21 | *$py.class 22 | 23 | # C extensions 24 | *.so 25 | 26 | # Distribution / packaging 27 | .Python 28 | build/ 29 | develop-eggs/ 30 | dist/ 31 | downloads/ 32 | eggs/ 33 | .eggs/ 34 | lib/ 35 | lib64/ 36 | parts/ 37 | sdist/ 38 | var/ 39 | wheels/ 40 | pip-wheel-metadata/ 41 | share/python-wheels/ 42 | *.egg-info/ 43 | .installed.cfg 44 | *.egg 45 | MANIFEST 46 | 47 | # PyInstaller 48 | # Usually these files are written by a python script from a template 49 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 50 | *.manifest 51 | *.spec 52 | 53 | # Installer logs 54 | pip-log.txt 55 | pip-delete-this-directory.txt 56 | 57 | # Unit test / coverage reports 58 | htmlcov/ 59 | .tox/ 60 | .nox/ 61 | .coverage 62 | .coverage.* 63 | .cache 64 | nosetests.xml 65 | coverage.xml 66 | *.cover 67 | *.py,cover 68 | .hypothesis/ 69 | .pytest_cache/ 70 | 71 | # Translations 72 | *.mo 73 | *.pot 74 | 75 | # Django stuff: 76 | *.log 77 | local_settings.py 78 | db.sqlite3 79 | db.sqlite3-journal 80 | 81 | # Flask stuff: 82 | instance/ 83 | .webassets-cache 84 | 85 | # Scrapy stuff: 86 | .scrapy 87 | 88 | # Sphinx documentation 89 | docs/_build/ 90 | 91 | # PyBuilder 92 | target/ 93 | 94 | # Jupyter Notebook 95 | .ipynb_checkpoints 96 | 97 | # IPython 98 | profile_default/ 99 | ipython_config.py 100 | 101 | # pyenv 102 | .python-version 103 | 104 | # pipenv 105 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 106 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 107 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 108 | # install all needed dependencies. 109 | #Pipfile.lock 110 | 111 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 112 | __pypackages__/ 113 | 114 | # Celery stuff 115 | celerybeat-schedule 116 | celerybeat.pid 117 | 118 | # SageMath parsed files 119 | *.sage.py 120 | 121 | # Environments 122 | .venv 123 | env/ 124 | venv/ 125 | ENV/ 126 | env.bak/ 127 | venv.bak/ 128 | 129 | # Spyder project settings 130 | .spyderproject 131 | .spyproject 132 | 133 | # Rope project settings 134 | .ropeproject 135 | 136 | # mkdocs documentation 137 | /site 138 | 139 | # mypy 140 | .mypy_cache/ 141 | .dmypy.json 142 | dmypy.json 143 | 144 | # Pyre type checker 145 | .pyre/ 146 | .vercel 147 | 148 | *.pem -------------------------------------------------------------------------------- /openai_forward/content/chat.py: -------------------------------------------------------------------------------- 1 | import time 2 | import uuid 3 | 4 | import orjson 5 | from fastapi import Request 6 | from httpx._decoders import LineDecoder 7 | from loguru import logger 8 | from orjson import JSONDecodeError 9 | 10 | decoder = LineDecoder() 11 | 12 | 13 | def _parse_iter_line_content(line: str): 14 | try: 15 | line_dict = orjson.loads(line) 16 | return line_dict["choices"][0]["delta"]["content"] 17 | except JSONDecodeError: 18 | return "" 19 | except KeyError: 20 | return "" 21 | 22 | 23 | def parse_chat_completions(bytes_: bytes): 24 | txt_lines = decoder.decode(bytes_.decode("utf-8")) 25 | line0 = txt_lines[0] 26 | target_info = dict() 27 | _start_token = "data: " 28 | if line0.startswith(_start_token): 29 | is_stream = True 30 | line0 = orjson.loads(line0[len(_start_token) :]) 31 | msg = line0["choices"][0]["delta"] 32 | else: 33 | is_stream = False 34 | line0 = orjson.loads("".join(txt_lines)) 35 | msg = line0["choices"][0]["message"] 36 | 37 | target_info["created"] = line0["created"] 38 | target_info["id"] = line0["id"] 39 | target_info["model"] = line0["model"] 40 | target_info["role"] = msg["role"] 41 | target_info["content"] = msg.get("content", "") 42 | if not is_stream: 43 | return target_info 44 | # loop for stream 45 | for line in txt_lines[1:]: 46 | if line in ("", "\n", "\n\n"): 47 | continue 48 | elif line.startswith(_start_token): 49 | target_info["content"] += _parse_iter_line_content( 50 | line[len(_start_token) :] 51 | ) 52 | else: 53 | logger.warning(f"line not startswith data: {line}") 54 | return target_info 55 | 56 | 57 | class ChatSaver: 58 | def __init__(self): 59 | self.logger = logger.bind(chat=True) 60 | 61 | @staticmethod 62 | async def parse_payload_to_content(request: Request, route_path: str): 63 | if route_path == "/v1/chat/completions": 64 | uid = uuid.uuid4().__str__() 65 | payload = await request.json() 66 | msgs = payload["messages"] 67 | model = payload["model"] 68 | content = { 69 | "messages": [{msg["role"]: msg["content"]} for msg in msgs], 70 | "model": model, 71 | "forwarded-for": request.headers.get("x-forwarded-for") or "", 72 | "uid": uid, 73 | "datetime": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), 74 | } 75 | else: 76 | content = {} 77 | return content 78 | 79 | @staticmethod 80 | def parse_bytes_to_content(bytes_: bytes, route_path: str): 81 | if route_path == "/v1/chat/completions": 82 | return parse_chat_completions(bytes_) 83 | else: 84 | return {} 85 | 86 | def add_chat(self, chat_info: dict): 87 | self.logger.debug(f"{chat_info}") 88 | -------------------------------------------------------------------------------- /openai_forward/anthropic.py: -------------------------------------------------------------------------------- 1 | from loguru import logger 2 | 3 | import httpx 4 | from fastapi import HTTPException, Request, status 5 | from fastapi.responses import StreamingResponse 6 | from starlette.background import BackgroundTask 7 | 8 | from openai_forward.tool import env2list 9 | 10 | class AnthropicBase: 11 | BASE_URL = "https://api.anthropic.com" 12 | IP_WHITELIST = env2list("IP_WHITELIST", sep=" ") 13 | IP_BLACKLIST = env2list("IP_BLACKLIST", sep=" ") 14 | 15 | timeout = 600 16 | 17 | def validate_request_host(self, ip): 18 | if self.IP_WHITELIST and ip not in self.IP_WHITELIST: 19 | raise HTTPException( 20 | status_code=status.HTTP_403_FORBIDDEN, 21 | detail=f"Forbidden, ip={ip} not in whitelist!", 22 | ) 23 | if self.IP_BLACKLIST and ip in self.IP_BLACKLIST: 24 | raise HTTPException( 25 | status_code=status.HTTP_403_FORBIDDEN, 26 | detail=f"Forbidden, ip={ip} in blacklist!", 27 | ) 28 | 29 | @classmethod 30 | async def _reverse_proxy(cls, request: Request): 31 | client = httpx.AsyncClient(base_url=cls.BASE_URL, http1=True, http2=False) 32 | url_path = request.url.path 33 | url = httpx.URL(path=url_path, query=request.url.query.encode("utf-8")) 34 | headers = dict(request.headers) 35 | auth = headers.pop("authorization", "") 36 | content_type = headers.pop("content-type", "application/json") 37 | auth_headers_dict = {"Content-Type": content_type, "Authorization": auth, 38 | "accept": "application/json", "anthropic-version": "2023-06-01", 39 | "x-api-key": headers.pop("x-api-key", "")} 40 | 41 | req = client.build_request( 42 | request.method, 43 | url, 44 | headers=auth_headers_dict, 45 | content=request.stream(), 46 | timeout=cls.timeout, 47 | ) 48 | try: 49 | r = await client.send(req, stream=True) 50 | except (httpx.ConnectError, httpx.ConnectTimeout) as e: 51 | error_info = ( 52 | f"{type(e)}: {e} | " 53 | f"Please check if host={request.client.host} can access [{cls.BASE_URL}] successfully?" 54 | ) 55 | logger.error(error_info) 56 | raise HTTPException( 57 | status_code=status.HTTP_504_GATEWAY_TIMEOUT, detail=error_info 58 | ) 59 | except Exception as e: 60 | logger.exception(f"{type(e)}:") 61 | raise HTTPException( 62 | status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=e 63 | ) 64 | 65 | aiter_bytes = r.aiter_bytes() 66 | return StreamingResponse( 67 | aiter_bytes, 68 | status_code=r.status_code, 69 | media_type=r.headers.get("content-type"), 70 | background=BackgroundTask(r.aclose), 71 | ) 72 | 73 | 74 | class Anthropic(AnthropicBase): 75 | def __init__(self): 76 | if self.IP_BLACKLIST or self.IP_WHITELIST: 77 | self.validate_host = True 78 | else: 79 | self.validate_host = False 80 | 81 | async def reverse_proxy(self, request: Request): 82 | if self.validate_host: 83 | self.validate_request_host(request.client.host) 84 | return await self._reverse_proxy(request) 85 | -------------------------------------------------------------------------------- /openai_forward/config.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | import time 5 | 6 | from loguru import logger 7 | from rich import print 8 | from rich.panel import Panel 9 | from rich.table import Table 10 | 11 | 12 | def print_startup_info(base_url, route_prefix, api_key, no_auth_mode, log_chat): 13 | try: 14 | from dotenv import load_dotenv 15 | 16 | load_dotenv(".env") 17 | except Exception: 18 | ... 19 | route_prefix = route_prefix or "/" 20 | api_key_info = True if len(api_key) else False 21 | table = Table(title="", box=None, width=100) 22 | table.add_column("base-url", justify="left", style="#df412f") 23 | table.add_column("route-prefix", justify="center", style="green") 24 | table.add_column("api-key-polling-pool", justify="center", style="green") 25 | table.add_column( 26 | "no-auth-mode", justify="center", style="red" if no_auth_mode else "green" 27 | ) 28 | table.add_column("Log-chat", justify="center", style="green") 29 | table.add_row( 30 | base_url, 31 | route_prefix, 32 | str(api_key_info), 33 | str(no_auth_mode), 34 | str(log_chat), 35 | ) 36 | print(Panel(table, title="🤗 openai-forward is ready to serve! ", expand=False)) 37 | 38 | 39 | class InterceptHandler(logging.Handler): 40 | def emit(self, record): 41 | # Get corresponding Loguru level if it exists 42 | try: 43 | level = logger.level(record.levelname).name 44 | except ValueError: 45 | level = record.levelno 46 | 47 | # Find caller from where originated the logged message 48 | frame, depth = logging.currentframe(), 6 49 | while frame.f_code.co_filename == logging.__file__: 50 | frame = frame.f_back 51 | depth += 1 52 | logger.opt(depth=depth, exception=record.exc_info).log( 53 | level, record.getMessage() 54 | ) 55 | 56 | 57 | def setting_log(save_file=False, log_name="openai_forward", multi_process=True): 58 | # TODO 修复时区配置 59 | if os.environ.get("TZ") == "Asia/Shanghai": 60 | os.environ["TZ"] = "UTC-8" 61 | if hasattr(time, "tzset"): 62 | time.tzset() 63 | 64 | logging.root.handlers = [InterceptHandler()] 65 | for name in logging.root.manager.loggerDict.keys(): 66 | logging.getLogger(name).handlers = [] 67 | logging.getLogger(name).propagate = True 68 | 69 | config_handlers = [ 70 | {"sink": sys.stdout, "level": "DEBUG"}, 71 | { 72 | "sink": f"./Log/chat/chat.log", 73 | "enqueue": multi_process, 74 | "rotation": "50 MB", 75 | "filter": lambda record: "chat" in record["extra"], 76 | "format": "{message}", 77 | }, 78 | { 79 | "sink": f"./Log/whisper/whisper.log", 80 | "enqueue": multi_process, 81 | "rotation": "30 MB", 82 | "filter": lambda record: "whisper" in record["extra"], 83 | "format": "{message}", 84 | }, 85 | ] 86 | if save_file: 87 | config_handlers += [ 88 | { 89 | "sink": f"./Log/{log_name}.log", 90 | "enqueue": multi_process, 91 | "rotation": "100 MB", 92 | "level": "INFO", 93 | } 94 | ] 95 | 96 | logger_config = {"handlers": config_handlers} 97 | logger.configure(**logger_config) 98 | -------------------------------------------------------------------------------- /openai_forward/base.py: -------------------------------------------------------------------------------- 1 | import os 2 | from itertools import cycle 3 | 4 | import httpx 5 | from fastapi import HTTPException, Request, status 6 | from fastapi.responses import StreamingResponse 7 | from loguru import logger 8 | from starlette.background import BackgroundTask 9 | 10 | from .config import print_startup_info 11 | from .tool import env2list 12 | 13 | 14 | class OpenaiBase: 15 | BASE_URL = os.environ.get("OPENAI_BASE_URL", "https://api.openai.com").strip() 16 | ROUTE_PREFIX = os.environ.get("ROUTE_PREFIX", "").strip() 17 | _openai_api_key_list = env2list("OPENAI_API_KEY", sep=" ") 18 | _cycle_api_key = cycle(_openai_api_key_list) 19 | _FWD_KEYS = set(env2list("FORWARD_KEY", sep=" ")) 20 | _no_auth_mode = _openai_api_key_list != [] and _FWD_KEYS == set() 21 | IP_WHITELIST = env2list("IP_WHITELIST", sep=" ") 22 | IP_BLACKLIST = env2list("IP_BLACKLIST", sep=" ") 23 | 24 | timeout = 600 25 | 26 | print_startup_info( 27 | BASE_URL, ROUTE_PREFIX, _openai_api_key_list, _no_auth_mode, False 28 | ) 29 | 30 | def validate_request_host(self, ip): 31 | if self.IP_WHITELIST and ip not in self.IP_WHITELIST: 32 | raise HTTPException( 33 | status_code=status.HTTP_403_FORBIDDEN, 34 | detail=f"Forbidden, ip={ip} not in whitelist!", 35 | ) 36 | if self.IP_BLACKLIST and ip in self.IP_BLACKLIST: 37 | raise HTTPException( 38 | status_code=status.HTTP_403_FORBIDDEN, 39 | detail=f"Forbidden, ip={ip} in blacklist!", 40 | ) 41 | 42 | @classmethod 43 | async def _reverse_proxy(cls, request: Request): 44 | client = httpx.AsyncClient(base_url=cls.BASE_URL, http1=True, http2=False) 45 | url_path = request.url.path 46 | url_path = url_path[len(cls.ROUTE_PREFIX) :] 47 | url = httpx.URL(path=url_path, query=request.url.query.encode("utf-8")) 48 | headers = dict(request.headers) 49 | auth = headers.pop("authorization", "") 50 | content_type = headers.pop("content-type", "application/json") 51 | auth_headers_dict = {"Content-Type": content_type, "Authorization": auth} 52 | auth_prefix = "Bearer " 53 | if cls._no_auth_mode or auth and auth[len(auth_prefix) :] in cls._FWD_KEYS: 54 | auth = auth_prefix + next(cls._cycle_api_key) 55 | auth_headers_dict["Authorization"] = auth 56 | 57 | req = client.build_request( 58 | request.method, 59 | url, 60 | headers=auth_headers_dict, 61 | content=request.stream(), 62 | timeout=cls.timeout, 63 | ) 64 | try: 65 | r = await client.send(req, stream=True) 66 | except (httpx.ConnectError, httpx.ConnectTimeout) as e: 67 | error_info = ( 68 | f"{type(e)}: {e} | " 69 | f"Please check if host={request.client.host} can access [{cls.BASE_URL}] successfully?" 70 | ) 71 | logger.error(error_info) 72 | raise HTTPException( 73 | status_code=status.HTTP_504_GATEWAY_TIMEOUT, detail=error_info 74 | ) 75 | except Exception as e: 76 | logger.exception(f"{type(e)}:") 77 | raise HTTPException( 78 | status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=e 79 | ) 80 | 81 | aiter_bytes = r.aiter_bytes() 82 | return StreamingResponse( 83 | aiter_bytes, 84 | status_code=r.status_code, 85 | media_type=r.headers.get("content-type"), 86 | background=BackgroundTask(r.aclose), 87 | ) 88 | -------------------------------------------------------------------------------- /openai_forward/tool.py: -------------------------------------------------------------------------------- 1 | import ast 2 | import os 3 | from typing import Dict, List, Union 4 | 5 | import orjson 6 | from rich import print 7 | from sparrow import MeasureTime, ls, relp 8 | 9 | 10 | def json_load(filepath: str, rel=False, mode="rb"): 11 | abs_path = relp(filepath, parents=1) if rel else filepath 12 | with open(abs_path, mode=mode) as f: 13 | return orjson.loads(f.read()) 14 | 15 | 16 | def json_dump( 17 | data: Union[List, Dict], filepath: str, rel=False, indent_2=False, mode="wb" 18 | ): 19 | orjson_option = 0 20 | if indent_2: 21 | orjson_option = orjson.OPT_INDENT_2 22 | abs_path = relp(filepath, parents=1) if rel else filepath 23 | with open(abs_path, mode=mode) as f: 24 | f.write(orjson.dumps(data, option=orjson_option)) 25 | 26 | 27 | def str2list(s: str, sep=" "): 28 | if s: 29 | return [i.strip() for i in s.split(sep) if i.strip()] 30 | else: 31 | return [] 32 | 33 | 34 | def env2list(env_name: str, sep=" "): 35 | return str2list(os.environ.get(env_name, "").strip(), sep=sep) 36 | 37 | 38 | def get_matches(messages: List[Dict], assistants: List[Dict]): 39 | mt = MeasureTime() 40 | mt.start() 41 | msg_len, ass_len = len(messages), len(assistants) 42 | if msg_len != ass_len: 43 | print(f"message({msg_len}) 与 assistant({ass_len}) 长度不匹配") 44 | 45 | cvt = lambda msg, ass: { 46 | "datetime": msg.get('datetime'), 47 | "forwarded-for": msg.get("forwarded-for"), 48 | "model": msg.get("model"), 49 | "messages": msg.get("messages"), 50 | "assistant": ass.get("assistant"), 51 | } 52 | 53 | msg_uid_dict = {m.pop("uid"): m for m in messages} 54 | ass_uid_dict = {a.pop("uid"): a for a in assistants} 55 | matches = [ 56 | cvt(msg_uid_dict[uid], ass_uid_dict[uid]) 57 | for uid in msg_uid_dict 58 | if uid in ass_uid_dict 59 | ] 60 | 61 | ref_len = max(msg_len, ass_len) 62 | if len(matches) != ref_len: 63 | print(f"存在{ref_len - len(matches)}条未匹配数据") 64 | mt.show_interval("计算耗时:") 65 | return matches 66 | 67 | 68 | def parse_log_to_list(log_path: str): 69 | with open(log_path, "r", encoding="utf-8") as f: 70 | messages, assistant = [], [] 71 | for line in f.readlines(): 72 | content: dict = ast.literal_eval(line) 73 | if content.get("messages"): 74 | messages.append(content) 75 | else: 76 | assistant.append(content) 77 | return messages, assistant 78 | 79 | 80 | def convert_chatlog_to_jsonl(log_path: str, target_path: str): 81 | """Convert single chatlog to jsonl""" 82 | message_list, assistant_list = parse_log_to_list(log_path) 83 | content_list = get_matches(messages=message_list, assistants=assistant_list) 84 | json_dump(content_list, target_path, indent_2=True) 85 | 86 | 87 | def get_log_files_from_folder(log_path: str): 88 | return ls(log_path, "*.log", relp=False) 89 | 90 | 91 | def convert_folder_to_jsonl(folder_path: str, target_path: str): 92 | """Convert chatlog folder to jsonl""" 93 | log_files = get_log_files_from_folder(folder_path) 94 | messages = [] 95 | assistants = [] 96 | for log_path in log_files: 97 | msg, ass = parse_log_to_list(log_path) 98 | 99 | msg_len, ass_len = len(msg), len(ass) 100 | if msg_len != ass_len: 101 | print(f"{log_path=} message({msg_len}) 与 assistant({ass_len}) 长度不匹配") 102 | messages.extend(msg) 103 | assistants.extend(ass) 104 | content_list = get_matches(messages=messages, assistants=assistants) 105 | json_dump(content_list, target_path, indent_2=True) 106 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to openai-forward 2 | 3 | First of all, thank you for considering contributing to our project! We appreciate your time and effort, and we value any contribution, whether it's reporting a bug, suggesting a new feature, or submitting a pull request. 4 | 5 | This document provides guidelines and best practices to help you contribute effectively. 6 | 7 | ## Table of Contents 8 | 9 | - [Code of Conduct](#code-of-conduct) 10 | - [Getting Started](#getting-started) 11 | - [How to Contribute](#how-to-contribute) 12 | - [Reporting Bugs](#reporting-bugs) 13 | - [Suggesting Enhancements](#suggesting-enhancements) 14 | - [Submitting Pull Requests](#submitting-pull-requests) 15 | - [Style Guidelines](#style-guidelines) 16 | - [Code Formatting](#code-formatting) 17 | - [Pre-Commit Hooks](#pre-commit-hooks) 18 | 19 | ## Code of Conduct 20 | 21 | By participating in this project, you agree to abide by our [Code of Conduct](CODE_OF_CONDUCT.md). Please read it to understand the expectations we have for everyone who contributes to this project. 22 | 23 | 24 | ## Getting Started 25 | 26 | To start contributing, follow these steps: 27 | 28 | 1. Fork the repository and clone your fork. 29 | 2. Create a new branch for your changes (use a descriptive name, such as `fix-bug-123` or `add-new-feature`). 30 | 3. Make your changes in the new branch. 31 | 4. Test your changes thoroughly. 32 | 5. Commit and push your changes to your fork. 33 | 6. Create a pull request following the guidelines in the [Submitting Pull Requests](#submitting-pull-requests) section. 34 | 35 | ## How to Contribute 36 | 37 | ### Reporting Bugs 38 | 39 | If you find a bug in the project, please create an issue on GitHub with the following information: 40 | 41 | - A clear, descriptive title for the issue. 42 | - A description of the problem, including steps to reproduce the issue. 43 | - Any relevant logs, screenshots, or other supporting information. 44 | 45 | ### Suggesting Enhancements 46 | 47 | If you have an idea for a new feature or improvement, please create an issue on GitHub with the following information: 48 | 49 | - A clear, descriptive title for the issue. 50 | - A detailed description of the proposed enhancement, including any benefits and potential drawbacks. 51 | - Any relevant examples, mockups, or supporting information. 52 | 53 | ### Submitting Pull Requests 54 | 55 | When submitting a pull request, please ensure that your changes meet the following criteria: 56 | 57 | - Your pull request should be atomic and focus on a single change. 58 | - Your pull request should include tests for your change. 59 | - You should have thoroughly tested your changes with multiple different prompts. 60 | - You should have considered potential risks and mitigations for your changes. 61 | - You should have documented your changes clearly and comprehensively. 62 | - You should not include any unrelated or "extra" small tweaks or changes. 63 | 64 | ## Style Guidelines 65 | 66 | ### Code Formatting 67 | 68 | We use the `black` code formatter to maintain a consistent coding style across the project. Please ensure that your code is formatted using `black` before submitting a pull request. You can install `black` using `pip`: 69 | 70 | ```bash 71 | pip install black 72 | ``` 73 | 74 | To format your code, run the following command in the project's root directory: 75 | 76 | ```bash 77 | black . 78 | ``` 79 | ### Pre-Commit Hooks 80 | We use pre-commit hooks to ensure that code formatting and other checks are performed automatically before each commit. To set up pre-commit hooks for this project, follow these steps: 81 | 82 | Install the pre-commit package using pip: 83 | ```bash 84 | pip install pre-commit 85 | ``` 86 | 87 | Run the following command in the project's root directory to install the pre-commit hooks: 88 | ```bash 89 | pre-commit install 90 | ``` 91 | 92 | Now, the pre-commit hooks will run automatically before each commit, checking your code formatting and other requirements. 93 | 94 | If you encounter any issues or have questions, feel free to reach out to the maintainers or open a new issue on GitHub. We're here to help and appreciate your efforts to contribute to the project. 95 | 96 | Happy coding, and once again, thank you for your contributions! -------------------------------------------------------------------------------- /openai_forward/nai.py: -------------------------------------------------------------------------------- 1 | from loguru import logger 2 | 3 | import httpx 4 | from fastapi import HTTPException, Request, status 5 | from fastapi.responses import StreamingResponse 6 | from starlette.background import BackgroundTask 7 | 8 | from openai_forward.tool import env2list 9 | 10 | class NAIBase: 11 | API_NOVEL = "https://api.novelai.net" 12 | TEXT_NOVEL = "https://text.novelai.net" 13 | IP_WHITELIST = env2list("IP_WHITELIST", sep=" ") 14 | IP_BLACKLIST = env2list("IP_BLACKLIST", sep=" ") 15 | 16 | timeout = 600 17 | 18 | def validate_request_host(self, ip): 19 | if self.IP_WHITELIST and ip not in self.IP_WHITELIST: 20 | raise HTTPException( 21 | status_code=status.HTTP_403_FORBIDDEN, 22 | detail=f"Forbidden, ip={ip} not in whitelist!", 23 | ) 24 | if self.IP_BLACKLIST and ip in self.IP_BLACKLIST: 25 | raise HTTPException( 26 | status_code=status.HTTP_403_FORBIDDEN, 27 | detail=f"Forbidden, ip={ip} in blacklist!", 28 | ) 29 | 30 | @classmethod 31 | async def _reverse_proxy(cls, request: Request): 32 | body = await request.json() 33 | BASE_URL = cls.API_NOVEL if 'clio' in body['model'] else cls.TEXT_NOVEL 34 | if 'parameters' in body and body['parameters'] is not None: 35 | if 'repetition_penalty_whitelist' in body['parameters']: 36 | repetition_penalty_whitelist = body['parameters']['repetition_penalty_whitelist'] if body['parameters']['repetition_penalty_whitelist'] is not None else [] 37 | new_whitelist = [] 38 | for sublist in repetition_penalty_whitelist: 39 | if isinstance(sublist, int): 40 | new_whitelist.append(sublist) 41 | else: 42 | new_whitelist.extend(sublist) 43 | body['parameters']['repetition_penalty_whitelist'] = new_whitelist 44 | if 'max_length' in body['parameters']: 45 | body['parameters']['max_length'] = min(int(body['parameters']['max_length']), 150) if body['parameters']['max_length'] is not None else 150 46 | if 'min_length' in body['parameters']: 47 | body['parameters']['min_length'] = min(int(body['parameters']['min_length']), 150) if body['parameters']['min_length'] is not None else 50 48 | client = httpx.AsyncClient(base_url=BASE_URL, http1=True, http2=False) 49 | url_path = request.url.path 50 | url = httpx.URL(path=url_path, query=request.url.query.encode("utf-8")) 51 | headers = dict(request.headers) 52 | auth = headers.pop("authorization", "") 53 | content_type = headers.pop("content-type", "application/json") 54 | auth_headers_dict = {"Content-Type": content_type, "Authorization": auth} 55 | 56 | req = client.build_request( 57 | request.method, 58 | url, 59 | json=body, 60 | headers=auth_headers_dict, 61 | timeout=cls.timeout, 62 | ) 63 | try: 64 | r = await client.send(req, stream=True) 65 | except (httpx.ConnectError, httpx.ConnectTimeout) as e: 66 | error_info = ( 67 | f"{type(e)}: {e} | " 68 | f"Please check if host={request.client.host} can access [{BASE_URL}] successfully?" 69 | ) 70 | logger.error(error_info) 71 | raise HTTPException( 72 | status_code=status.HTTP_504_GATEWAY_TIMEOUT, detail=error_info 73 | ) 74 | except Exception as e: 75 | logger.exception(f"{type(e)}:") 76 | raise HTTPException( 77 | status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=e 78 | ) 79 | 80 | aiter_bytes = r.aiter_bytes() 81 | return StreamingResponse( 82 | aiter_bytes, 83 | status_code=r.status_code, 84 | media_type=r.headers.get("content-type"), 85 | background=BackgroundTask(r.aclose), 86 | ) 87 | 88 | 89 | class NovelAI(NAIBase): 90 | def __init__(self): 91 | if self.IP_BLACKLIST or self.IP_WHITELIST: 92 | self.validate_host = True 93 | else: 94 | self.validate_host = False 95 | 96 | async def reverse_proxy(self, request: Request): 97 | if self.validate_host: 98 | self.validate_request_host(request.client.host) 99 | return await self._reverse_proxy(request) -------------------------------------------------------------------------------- /deploy.md: -------------------------------------------------------------------------------- 1 | 2 |
9 | OpenAI API 接口转发服务
10 | The fastest way to deploy openai api forwarding
11 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |