├── gateway
├── admin.py
├── route.py
├── login.py
├── gpts.py
├── v1.py
├── chatgpt.py
├── share.py
├── reverseProxy.py
└── backend.py
├── version.txt
├── docs
├── tokens.png
└── capsolver.jpg
├── .gitignore
├── .dockerignore
├── .env.example
├── Dockerfile
├── requirements.txt
├── utils
├── kv_utils.py
├── Logger.py
├── retry.py
├── Client.py
├── globals.py
└── config.py
├── docker-compose.yml
├── LICENSE
├── chatgpt
├── wssClient.py
├── chatLimit.py
├── refreshToken.py
├── authorization.py
├── turnstile.py
├── proofofWork.py
├── chatFormat.py
└── ChatService.py
├── api
├── models.py
├── tokens.py
├── chat2api.py
└── files.py
├── app.py
├── docker-compose-warp.yml
├── .github
└── workflows
│ └── build_docker.yml
├── templates
├── login.html
└── tokens.html
├── chat2api.py
└── README.md
/gateway/admin.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/gateway/route.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/version.txt:
--------------------------------------------------------------------------------
1 | 1.7.1-beta1
2 |
--------------------------------------------------------------------------------
/docs/tokens.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Niansuh/chat2api/HEAD/docs/tokens.png
--------------------------------------------------------------------------------
/docs/capsolver.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Niansuh/chat2api/HEAD/docs/capsolver.jpg
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .env
2 | *.pyc
3 | /.git/
4 | /.idea/
5 | /tmp/
6 | /data/
7 | /.venv/
8 | /.vscode/
9 |
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | .env
2 | *.pyc
3 | /.git/
4 | /.idea/
5 | /docs/
6 | /tmp/
7 | /data/
8 | /.venv/
9 | /.vscode/
10 |
--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------
1 | API_PREFIX=your_prefix
2 | CHATGPT_BASE_URL=https://chatgpt.com
3 | PROXY_URL=your_first_proxy, your_second_proxy
4 | SCHEDULED_REFRESH=false
5 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.11-slim
2 |
3 | WORKDIR /app
4 |
5 | COPY . /app
6 |
7 | RUN pip install --no-cache-dir -r requirements.txt
8 |
9 | EXPOSE 5005
10 |
11 | CMD ["python", "app.py"]
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | fastapi==0.115.3
2 | python-multipart==0.0.13
3 | curl_cffi==0.7.3
4 | uvicorn
5 | tiktoken
6 | python-dotenv
7 | websockets
8 | pillow
9 | pybase64
10 | jinja2
11 | APScheduler
12 | ua-generator
13 | pyjwt
14 |
--------------------------------------------------------------------------------
/gateway/login.py:
--------------------------------------------------------------------------------
1 | from fastapi import Request
2 | from fastapi.responses import HTMLResponse
3 |
4 | from app import app, templates
5 |
6 |
7 | @app.get("/login", response_class=HTMLResponse)
8 | async def login_html(request: Request):
9 | response = templates.TemplateResponse("login.html", {"request": request})
10 | return response
11 |
--------------------------------------------------------------------------------
/utils/kv_utils.py:
--------------------------------------------------------------------------------
1 | def set_value_for_key(data, target_key, new_value):
2 | if isinstance(data, dict):
3 | for key, value in data.items():
4 | if key == target_key:
5 | data[key] = new_value
6 | else:
7 | set_value_for_key(value, target_key, new_value)
8 | elif isinstance(data, list):
9 | for item in data:
10 | set_value_for_key(item, target_key, new_value)
11 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 |
3 | services:
4 | chat2api:
5 | image: niansuh/chat2api:latest
6 | container_name: chat2api
7 | restart: unless-stopped
8 | ports:
9 | - '5005:5005'
10 | volumes:
11 | - ./data:/app/data
12 | environment:
13 | - TZ=Asia/Shanghai
14 | - AUTHORIZATION=sk-xxx
15 |
16 | watchtower:
17 | image: containrrr/watchtower
18 | container_name: watchtower
19 | restart: unless-stopped
20 | volumes:
21 | - /var/run/docker.sock:/var/run/docker.sock
22 | command: --cleanup --interval 300 chat2api
23 |
--------------------------------------------------------------------------------
/utils/Logger.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | logging.basicConfig(level=logging.INFO, format='%(asctime)s | %(levelname)s | %(message)s')
4 |
5 |
6 | class Logger:
7 | @staticmethod
8 | def info(message):
9 | logging.info(str(message))
10 |
11 | @staticmethod
12 | def warning(message):
13 | logging.warning("\033[0;33m" + str(message) + "\033[0m")
14 |
15 | @staticmethod
16 | def error(message):
17 | logging.error("\033[0;31m" + "-" * 50 + '\n| ' + str(message) + "\033[0m" + "\n" + "└" + "-" * 80)
18 |
19 | @staticmethod
20 | def debug(message):
21 | logging.debug("\033[0;37m" + str(message) + "\033[0m")
22 |
23 |
24 | logger = Logger()
25 |
--------------------------------------------------------------------------------
/gateway/gpts.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | from fastapi import Request
4 | from fastapi.responses import Response
5 |
6 | from app import app
7 | from gateway.chatgpt import chatgpt_html
8 |
9 | with open("templates/gpts_context.json", "r", encoding="utf-8") as f:
10 | gpts_context = json.load(f)
11 |
12 |
13 | @app.get("/gpts")
14 | async def get_gpts():
15 | return {"kind": "store"}
16 |
17 |
18 | @app.get("/g/g-{gizmo_id}")
19 | async def get_gizmo_json(request: Request, gizmo_id: str):
20 | params = request.query_params
21 | if params.get("_data") == "routes/g.$gizmoId._index":
22 | return Response(content=json.dumps(gpts_context, indent=4), media_type="application/json")
23 | else:
24 | return await chatgpt_html(request)
25 |
--------------------------------------------------------------------------------
/gateway/v1.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | from fastapi import Request
4 | from fastapi.responses import Response
5 |
6 | from app import app
7 | from gateway.reverseProxy import chatgpt_reverse_proxy
8 | from utils.kv_utils import set_value_for_key
9 |
10 |
11 | @app.post("/v1/initialize")
12 | async def initialize(request: Request):
13 | initialize_response = (await chatgpt_reverse_proxy(request, f"/v1/initialize"))
14 | initialize_str = initialize_response.body.decode('utf-8')
15 | initialize_json = json.loads(initialize_str)
16 | set_value_for_key(initialize_json, "ip", "8.8.8.8")
17 | set_value_for_key(initialize_json, "country", "US")
18 | return Response(content=json.dumps(initialize_json, indent=4), media_type="application/json")
19 |
20 |
21 | @app.post("/v1/rgstr")
22 | async def rgstr():
23 | return Response(status_code=202, content=json.dumps({"success": True}, indent=4), media_type="application/json")
24 |
25 |
26 | @app.post("/ces/v1/{path:path}")
27 | async def ces_v1():
28 | return Response(status_code=202, content=json.dumps({"success": True}, indent=4), media_type="application/json")
29 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Niansuh AI
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/chatgpt/wssClient.py:
--------------------------------------------------------------------------------
1 | import json
2 | import time
3 |
4 | from utils.Logger import logger
5 | import utils.globals as globals
6 |
7 |
8 | def save_wss_map(wss_map):
9 | with open(globals.WSS_MAP_FILE, "w") as f:
10 | json.dump(wss_map, f, indent=4)
11 |
12 |
13 | async def token2wss(token):
14 | if not token:
15 | return False, None
16 | if token in globals.wss_map:
17 | wss_mode = globals.wss_map[token]["wss_mode"]
18 | if wss_mode:
19 | if int(time.time()) - globals.wss_map.get(token, {}).get("timestamp", 0) < 60 * 60:
20 | wss_url = globals.wss_map[token]["wss_url"]
21 | logger.info(f"token -> wss_url from cache")
22 | return wss_mode, wss_url
23 | else:
24 | logger.info(f"token -> wss_url expired")
25 | return wss_mode, None
26 | else:
27 | return False, None
28 | return False, None
29 |
30 |
31 | async def set_wss(token, wss_mode, wss_url=None):
32 | if not token:
33 | return True
34 | globals.wss_map[token] = {"timestamp": int(time.time()), "wss_url": wss_url, "wss_mode": wss_mode}
35 | save_wss_map(globals.wss_map)
36 | return True
37 |
--------------------------------------------------------------------------------
/gateway/chatgpt.py:
--------------------------------------------------------------------------------
1 | import json
2 | from urllib.parse import quote
3 |
4 | from fastapi import Request
5 | from fastapi.responses import HTMLResponse
6 |
7 | from app import app, templates
8 | from gateway.login import login_html
9 | from utils.kv_utils import set_value_for_key
10 |
11 | with open("templates/chatgpt_context.json", "r", encoding="utf-8") as f:
12 | chatgpt_context = json.load(f)
13 |
14 |
15 | @app.get("/", response_class=HTMLResponse)
16 | async def chatgpt_html(request: Request):
17 | token = request.query_params.get("token")
18 | if not token:
19 | token = request.cookies.get("token")
20 | if not token:
21 | return await login_html(request)
22 |
23 | if len(token) != 45 and not token.startswith("eyJhbGciOi"):
24 | token = quote(token)
25 |
26 | user_remix_context = chatgpt_context.copy()
27 | set_value_for_key(user_remix_context, "user", {"id": "user-chatgpt"})
28 | set_value_for_key(user_remix_context, "accessToken", token)
29 |
30 | response = templates.TemplateResponse("chatgpt.html", {"request": request, "remix_context": user_remix_context})
31 | response.set_cookie("token", value=token, expires="Thu, 01 Jan 2099 00:00:00 GMT")
32 | return response
33 |
--------------------------------------------------------------------------------
/api/models.py:
--------------------------------------------------------------------------------
1 | model_proxy = {
2 | "gpt-3.5-turbo": "gpt-3.5-turbo-0125",
3 | "gpt-3.5-turbo-16k": "gpt-3.5-turbo-16k-0613",
4 | "gpt-4": "gpt-4-0613",
5 | "gpt-4-32k": "gpt-4-32k-0613",
6 | "gpt-4-turbo-preview": "gpt-4-0125-preview",
7 | "gpt-4-vision-preview": "gpt-4-1106-vision-preview",
8 | "gpt-4-turbo": "gpt-4-turbo-2024-04-09",
9 | "gpt-4o": "gpt-4o-2024-05-13",
10 | "gpt-4o-mini": "gpt-4o-mini-2024-07-18",
11 | "o1-preview": "o1-preview-2024-09-12",
12 | "o1-mini": "o1-mini-2024-09-12",
13 | "claude-3-opus": "claude-3-opus-20240229",
14 | "claude-3-sonnet": "claude-3-sonnet-20240229",
15 | "claude-3-haiku": "claude-3-haiku-20240307",
16 | }
17 |
18 | model_system_fingerprint = {
19 | "gpt-3.5-turbo-0125": ["fp_b28b39ffa8"],
20 | "gpt-3.5-turbo-1106": ["fp_592ef5907d"],
21 | "gpt-4-0125-preview": ["fp_f38f4d6482", "fp_2f57f81c11", "fp_a7daf7c51e", "fp_a865e8ede4", "fp_13c70b9f70",
22 | "fp_b77cb481ed"],
23 | "gpt-4-1106-preview": ["fp_e467c31c3d", "fp_d986a8d1ba", "fp_99a5a401bb", "fp_123d5a9f90", "fp_0d1affc7a6",
24 | "fp_5c95a4634e"],
25 | "gpt-4-turbo-2024-04-09": ["fp_d1bac968b4"],
26 | "gpt-4o-2024-05-13": ["fp_3aa7262c27"],
27 | "gpt-4o-mini-2024-07-18": ["fp_c9aa9c0491"]
28 | }
29 |
--------------------------------------------------------------------------------
/chatgpt/chatLimit.py:
--------------------------------------------------------------------------------
1 | import time
2 | from datetime import datetime
3 |
4 | from utils.Logger import logger
5 |
6 | limit_details = {}
7 |
8 |
9 | def check_is_limit(detail, token, model):
10 | if token and isinstance(detail, dict) and detail.get('clears_in'):
11 | clear_time = int(time.time()) + detail.get('clears_in')
12 | limit_details.setdefault(token, {})[model] = clear_time
13 | logger.info(f"{token[:40]}: Reached {model} limit, will be cleared at {datetime.fromtimestamp(clear_time).replace(microsecond=0)}")
14 |
15 |
16 | async def handle_request_limit(token, model):
17 | try:
18 | if limit_details.get(token) and model in limit_details[token]:
19 | limit_time = limit_details[token][model]
20 | is_limit = limit_time > int(time.time())
21 | if is_limit:
22 | clear_date = datetime.fromtimestamp(limit_time).replace(microsecond=0)
23 | result = f"Request limit exceeded. You can continue with the default model now, or try again after {clear_date}"
24 | logger.info(result)
25 | return result
26 | else:
27 | del limit_details[token][model]
28 | return None
29 | except KeyError as e:
30 | logger.error(f"Key error: {e}")
31 | return None
32 | except Exception as e:
33 | logger.error(f"An unexpected error occurred: {e}")
34 | return None
35 |
--------------------------------------------------------------------------------
/utils/retry.py:
--------------------------------------------------------------------------------
1 | from fastapi import HTTPException
2 |
3 | from utils.Logger import logger
4 | from utils.configs import retry_times
5 |
6 |
7 | async def async_retry(func, *args, max_retries=retry_times, **kwargs):
8 | for attempt in range(max_retries + 1):
9 | try:
10 | result = await func(*args, **kwargs)
11 | return result
12 | except HTTPException as e:
13 | if attempt == max_retries:
14 | logger.error(f"Throw an exception {e.status_code}, {e.detail}")
15 | if e.status_code == 500:
16 | raise HTTPException(status_code=500, detail="Server error")
17 | raise HTTPException(status_code=e.status_code, detail=e.detail)
18 | logger.info(f"Retry {attempt + 1} status code {e.status_code}, {e.detail}. Retrying...")
19 |
20 |
21 | def retry(func, *args, max_retries=retry_times, **kwargs):
22 | for attempt in range(max_retries + 1):
23 | try:
24 | result = func(*args, **kwargs)
25 | return result
26 | except HTTPException as e:
27 | if attempt == max_retries:
28 | logger.error(f"Throw an exception {e.status_code}, {e.detail}")
29 | if e.status_code == 500:
30 | raise HTTPException(status_code=500, detail="Server error")
31 | raise HTTPException(status_code=e.status_code, detail=e.detail)
32 | logger.error(f"Retry {attempt + 1} status code {e.status_code}, {e.detail}. Retrying...")
33 |
--------------------------------------------------------------------------------
/app.py:
--------------------------------------------------------------------------------
1 | import warnings
2 |
3 | import uvicorn
4 | from fastapi import FastAPI, HTTPException
5 | from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
6 | from fastapi.middleware.cors import CORSMiddleware
7 | from fastapi.templating import Jinja2Templates
8 |
9 | from utils.configs import enable_gateway, api_prefix
10 |
11 | warnings.filterwarnings("ignore")
12 |
13 |
14 | log_config = uvicorn.config.LOGGING_CONFIG
15 | default_format = "%(asctime)s | %(levelname)s | %(message)s"
16 | access_format = r'%(asctime)s | %(levelname)s | %(client_addr)s: %(request_line)s %(status_code)s'
17 | log_config["formatters"]["default"]["fmt"] = default_format
18 | log_config["formatters"]["access"]["fmt"] = access_format
19 |
20 | app = FastAPI(
21 | docs_url=f"/{api_prefix}/docs", # Set Swagger UI documentation path
22 | redoc_url=f"/{api_prefix}/redoc", # Set Redoc documentation path
23 | openapi_url=f"/{api_prefix}/openapi.json" # Set OpenAPI JSON path
24 | )
25 |
26 | app.add_middleware(
27 | CORSMiddleware,
28 | allow_origins=["*"],
29 | allow_credentials=True,
30 | allow_methods=["*"],
31 | allow_headers=["*"],
32 | )
33 |
34 | templates = Jinja2Templates(directory="templates")
35 | security_scheme = HTTPBearer()
36 |
37 | from app import app
38 |
39 | import api.chat2api
40 |
41 | if enable_gateway:
42 | import gateway.share
43 | import gateway.login
44 | import gateway.chatgpt
45 | import gateway.gpts
46 | import gateway.admin
47 | import gateway.v1
48 | import gateway.backend
49 | else:
50 | @app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE", "OPTIONS", "HEAD", "PATCH", "TRACE"])
51 | async def reverse_proxy():
52 | raise HTTPException(status_code=404, detail="Gateway is disabled")
53 |
54 |
55 | if __name__ == "__main__":
56 | uvicorn.run("app:app", host="0.0.0.0", port=5005)
57 | # uvicorn.run("app:app", host="0.0.0.0", port=5005, ssl_keyfile="key.pem", ssl_certfile="cert.pem")
58 |
--------------------------------------------------------------------------------
/docker-compose-warp.yml:
--------------------------------------------------------------------------------
1 | services:
2 | warp:
3 | image: caomingjun/warp
4 | container_name: warp
5 | restart: always
6 | environment:
7 | - WARP_SLEEP=5
8 | cap_add:
9 | - MKNOD
10 | - AUDIT_WRITE
11 | - NET_ADMIN
12 | sysctls:
13 | - net.ipv6.conf.all.disable_ipv6=0
14 | - net.ipv4.conf.all.src_valid_mark=1
15 | volumes:
16 | - ./warpdata:/var/lib/cloudflare-warp
17 | networks:
18 | - internal_network # Use internal network and do not expose ports to the outside world
19 | healthcheck:
20 | test: ["CMD", "curl", "-f", "-s", "https://www.google.com"] # Request Google in silent mode and return 2xx status code if successful
21 | interval: 30s # Check every 30 seconds
22 | timeout: 10s # Request timeout 10 seconds
23 | retries: 3 # Marked as unhealthy after 3 failures
24 | start_period: 5s # After the container starts, wait 5 seconds before starting the check.
25 |
26 | chat2api:
27 | image: niansuh/chat2api:latest
28 | container_name: chat2api
29 | restart: unless-stopped
30 | ports:
31 | - '5005:5005' # Expose the chat2api service for external access
32 | environment:
33 | - TZ=Asia/Shanghai
34 | - AUTHORIZATION=sk-xxx
35 | - PROXY_URL=socks5://warp:1080 # Set PROXY_URL to the proxy address of the warp container
36 | depends_on:
37 | warp:
38 | condition: service_healthy # chat2api and warp are on the same internal network
39 | networks:
40 | - internal_network # chat2api and warp are on the same internal network
41 | volumes:
42 | - ./data:/app/data # Mount some data that needs to be saved
43 |
44 | watchtower:
45 | image: containrrr/watchtower
46 | container_name: watchtower
47 | restart: unless-stopped
48 | volumes:
49 | - /var/run/docker.sock:/var/run/docker.sock
50 | command: --cleanup --interval 300 chat2api
51 |
52 | networks:
53 | internal_network:
54 | driver: bridge # Define a bridged network
55 |
--------------------------------------------------------------------------------
/utils/Client.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | from curl_cffi.requests import AsyncSession
4 |
5 |
6 | class Client:
7 | def __init__(self, proxy=None, timeout=15, verify=True, impersonate='safari15_3'):
8 | self.proxies = {"http": proxy, "https": proxy}
9 | self.timeout = timeout
10 | self.verify = verify
11 |
12 | self.impersonate = impersonate
13 | # impersonate=self.impersonate
14 |
15 | # self.ja3 = ""
16 | # self.akamai = ""
17 | # ja3=self.ja3, akamai=self.akamai
18 | self.session = AsyncSession(proxies=self.proxies, timeout=self.timeout, impersonate=self.impersonate, verify=self.verify)
19 | self.session2 = AsyncSession(proxies=self.proxies, timeout=self.timeout, impersonate=self.impersonate, verify=self.verify)
20 |
21 | async def post(self, *args, **kwargs):
22 | r = await self.session.post(*args, **kwargs)
23 | return r
24 |
25 | async def post_stream(self, *args, headers=None, cookies=None, **kwargs):
26 | if self.session:
27 | headers = headers or self.session.headers
28 | cookies = cookies or self.session.cookies
29 | r = await self.session2.post(*args, headers=headers, cookies=cookies, **kwargs)
30 | return r
31 |
32 | async def get(self, *args, **kwargs):
33 | r = await self.session.get(*args, **kwargs)
34 | return r
35 |
36 | async def request(self, *args, **kwargs):
37 | r = await self.session.request(*args, **kwargs)
38 | return r
39 |
40 | async def put(self, *args, **kwargs):
41 | r = await self.session.put(*args, **kwargs)
42 | return r
43 |
44 | async def close(self):
45 | if self.session:
46 | try:
47 | await self.session.close()
48 | del self.session
49 | except Exception:
50 | pass
51 | if self.session2:
52 | try:
53 | await self.session2.close()
54 | del self.session2
55 | except Exception:
56 | pass
57 |
--------------------------------------------------------------------------------
/.github/workflows/build_docker.yml:
--------------------------------------------------------------------------------
1 | name: Build Docker Image
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | paths-ignore:
8 | - 'README.md'
9 | - 'docker-compose.yml'
10 | - 'docker-compose-warp.yml'
11 | - 'docs/**'
12 | - '.github/workflows/build_docker_main.yml'
13 | - '.github/workflows/build_docker_dev.yml'
14 | workflow_dispatch:
15 |
16 | jobs:
17 | main:
18 | runs-on: ubuntu-latest
19 |
20 | steps:
21 | - name: Check out the repository
22 | uses: actions/checkout@v2
23 |
24 | - name: Read the version from version.txt
25 | id: get_version
26 | run: |
27 | version=$(cat version.txt)
28 | echo "Current version: v$version"
29 | echo "::set-output name=version::v$version"
30 |
31 | - name: Commit and push version tag
32 | env:
33 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
34 | run: |
35 | version=${{ steps.get_version.outputs.version }}
36 | git config --local user.email "action@github.com"
37 | git config --local user.name "GitHub Action"
38 | git tag "$version"
39 | git push https://x-access-token:${GHCR_PAT}@github.com/niansuh/chat2api.git "$version"
40 |
41 | - name: Set up QEMU
42 | uses: docker/setup-qemu-action@v3
43 |
44 | - name: Set up Docker Buildx
45 | uses: docker/setup-buildx-action@v3
46 |
47 | - name: Log in to Docker Hub
48 | uses: docker/login-action@v3
49 | with:
50 | username: ${{ secrets.DOCKER_USERNAME }}
51 | password: ${{ secrets.DOCKER_PASSWORD }}
52 |
53 | - name: Docker meta
54 | id: meta
55 | uses: docker/metadata-action@v5
56 | with:
57 | images: niansuh/chat2api
58 | tags: |
59 | type=raw,value=latest,enable={{is_default_branch}}
60 | type=raw,value=${{ steps.get_version.outputs.version }}
61 |
62 | - name: Build and push
63 | uses: docker/build-push-action@v5
64 | with:
65 | context: .
66 | platforms: linux/amd64,linux/arm64
67 | file: Dockerfile
68 | push: true
69 | tags: ${{ steps.meta.outputs.tags }}
70 | labels: ${{ steps.meta.outputs.labels }}
71 |
--------------------------------------------------------------------------------
/chatgpt/refreshToken.py:
--------------------------------------------------------------------------------
1 | import json
2 | import random
3 | import time
4 |
5 | from fastapi import HTTPException
6 |
7 | from utils.Client import Client
8 | from utils.Logger import logger
9 | from utils.configs import proxy_url_list
10 | import utils.globals as globals
11 |
12 |
13 | async def rt2ac(refresh_token, force_refresh=False):
14 | if not force_refresh and (refresh_token in globals.refresh_map and int(time.time()) - globals.refresh_map.get(refresh_token, {}).get("timestamp", 0) < 5 * 24 * 60 * 60):
15 | access_token = globals.refresh_map[refresh_token]["token"]
16 | # logger.info(f"refresh_token -> access_token from cache")
17 | return access_token
18 | else:
19 | try:
20 | access_token = await chat_refresh(refresh_token)
21 | globals.refresh_map[refresh_token] = {"token": access_token, "timestamp": int(time.time())}
22 | with open(globals.REFRESH_MAP_FILE, "w") as f:
23 | json.dump(globals.refresh_map, f, indent=4)
24 | logger.info(f"refresh_token -> access_token with openai: {access_token}")
25 | return access_token
26 | except HTTPException as e:
27 | raise HTTPException(status_code=e.status_code, detail=e.detail)
28 |
29 |
30 | async def chat_refresh(refresh_token):
31 | data = {
32 | "client_id": "pdlLIX2Y72MIl2rhLhTE9VV9bN905kBh",
33 | "grant_type": "refresh_token",
34 | "redirect_uri": "com.openai.chat://auth0.openai.com/ios/com.openai.chat/callback",
35 | "refresh_token": refresh_token
36 | }
37 | client = Client(proxy=random.choice(proxy_url_list) if proxy_url_list else None)
38 | try:
39 | r = await client.post("https://auth0.openai.com/oauth/token", json=data, timeout=5)
40 | if r.status_code == 200:
41 | access_token = r.json()['access_token']
42 | return access_token
43 | else:
44 | if "invalid_grant" in r.text or "access_denied" in r.text:
45 | if refresh_token not in globals.error_token_list:
46 | globals.error_token_list.append(refresh_token)
47 | with open(globals.ERROR_TOKENS_FILE, "a", encoding="utf-8") as f:
48 | f.write(refresh_token + "\n")
49 | raise Exception(r.text)
50 | else:
51 | raise Exception(r.text[:300])
52 | except Exception as e:
53 | logger.error(f"Failed to refresh access_token `{refresh_token}`: {str(e)}")
54 | raise HTTPException(status_code=500, detail=f"Failed to refresh access_token.")
55 | finally:
56 | await client.close()
57 | del client
58 |
--------------------------------------------------------------------------------
/api/tokens.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | import tiktoken
4 |
5 |
6 | async def calculate_image_tokens(width, height, detail):
7 | if detail == "low":
8 | return 85
9 | else:
10 | max_dimension = max(width, height)
11 | if max_dimension > 2048:
12 | scale_factor = 2048 / max_dimension
13 | new_width = int(width * scale_factor)
14 | new_height = int(height * scale_factor)
15 | else:
16 | new_width = width
17 | new_height = height
18 |
19 | width, height = new_width, new_height
20 | min_dimension = min(width, height)
21 | if min_dimension > 768:
22 | scale_factor = 768 / min_dimension
23 | new_width = int(width * scale_factor)
24 | new_height = int(height * scale_factor)
25 | else:
26 | new_width = width
27 | new_height = height
28 |
29 | width, height = new_width, new_height
30 | num_masks_w = math.ceil(width / 512)
31 | num_masks_h = math.ceil(height / 512)
32 | total_masks = num_masks_w * num_masks_h
33 |
34 | tokens_per_mask = 170
35 | total_tokens = total_masks * tokens_per_mask + 85
36 |
37 | return total_tokens
38 |
39 |
40 | async def num_tokens_from_messages(messages, model=''):
41 | try:
42 | encoding = tiktoken.encoding_for_model(model)
43 | except KeyError:
44 | encoding = tiktoken.get_encoding("cl100k_base")
45 | if model == "gpt-3.5-turbo-0301":
46 | tokens_per_message = 4
47 | else:
48 | tokens_per_message = 3
49 | num_tokens = 0
50 | for message in messages:
51 | num_tokens += tokens_per_message
52 | for key, value in message.items():
53 | if isinstance(value, list):
54 | for item in value:
55 | if item.get("type") == "text":
56 | num_tokens += len(encoding.encode(item.get("text")))
57 | if item.get("type") == "image_url":
58 | pass
59 | else:
60 | num_tokens += len(encoding.encode(value))
61 | num_tokens += 3
62 | return num_tokens
63 |
64 |
65 | async def num_tokens_from_content(content, model=None):
66 | try:
67 | encoding = tiktoken.encoding_for_model(model)
68 | except KeyError:
69 | encoding = tiktoken.get_encoding("cl100k_base")
70 | encoded_content = encoding.encode(content)
71 | len_encoded_content = len(encoded_content)
72 | return len_encoded_content
73 |
74 |
75 | async def split_tokens_from_content(content, max_tokens, model=None):
76 | try:
77 | encoding = tiktoken.encoding_for_model(model)
78 | except KeyError:
79 | encoding = tiktoken.get_encoding("cl100k_base")
80 | encoded_content = encoding.encode(content)
81 | len_encoded_content = len(encoded_content)
82 | if len_encoded_content >= max_tokens:
83 | content = encoding.decode(encoded_content[:max_tokens])
84 | return content, max_tokens, "length"
85 | else:
86 | return content, len_encoded_content, "stop"
87 |
--------------------------------------------------------------------------------
/utils/globals.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 |
4 | import utils.configs as configs
5 | from utils.Logger import logger
6 |
7 | DATA_FOLDER = "data"
8 | TOKENS_FILE = os.path.join(DATA_FOLDER, "token.txt")
9 | REFRESH_MAP_FILE = os.path.join(DATA_FOLDER, "refresh_map.json")
10 | ERROR_TOKENS_FILE = os.path.join(DATA_FOLDER, "error_token.txt")
11 | WSS_MAP_FILE = os.path.join(DATA_FOLDER, "wss_map.json")
12 | FP_FILE = os.path.join(DATA_FOLDER, "fp_map.json")
13 | SEED_MAP_FILE = os.path.join(DATA_FOLDER, "seed_map.json")
14 | CONVERSATION_MAP_FILE = os.path.join(DATA_FOLDER, "conversation_map.json")
15 |
16 | count = 0
17 | token_list = []
18 | error_token_list = []
19 | refresh_map = {}
20 | wss_map = {}
21 | fp_map = {}
22 | seed_map = {}
23 | conversation_map = {}
24 | impersonate_list = [
25 | "chrome99",
26 | "chrome100",
27 | "chrome101",
28 | "chrome104",
29 | "chrome107",
30 | "chrome110",
31 | "chrome116",
32 | "chrome119",
33 | "chrome120",
34 | "chrome123",
35 | "edge99",
36 | "edge101",
37 | ] if not configs.impersonate_list else configs.impersonate_list
38 |
39 | if not os.path.exists(DATA_FOLDER):
40 | os.makedirs(DATA_FOLDER)
41 |
42 | if os.path.exists(REFRESH_MAP_FILE):
43 | with open(REFRESH_MAP_FILE, "r") as f:
44 | try:
45 | refresh_map = json.load(f)
46 | except:
47 | refresh_map = {}
48 | else:
49 | refresh_map = {}
50 |
51 | if os.path.exists(WSS_MAP_FILE):
52 | with open(WSS_MAP_FILE, "r") as f:
53 | try:
54 | wss_map = json.load(f)
55 | except:
56 | wss_map = {}
57 | else:
58 | wss_map = {}
59 |
60 | if os.path.exists(FP_FILE):
61 | with open(FP_FILE, "r", encoding="utf-8") as f:
62 | try:
63 | fp_map = json.load(f)
64 | except:
65 | fp_map = {}
66 | else:
67 | fp_map = {}
68 |
69 | if os.path.exists(SEED_MAP_FILE):
70 | with open(SEED_MAP_FILE, "r") as f:
71 | try:
72 | seed_map = json.load(f)
73 | except:
74 | seed_map = {}
75 | else:
76 | seed_map = {}
77 |
78 | if os.path.exists(CONVERSATION_MAP_FILE):
79 | with open(CONVERSATION_MAP_FILE, "r") as f:
80 | try:
81 | conversation_map = json.load(f)
82 | except:
83 | conversation_map = {}
84 | else:
85 | conversation_map = {}
86 |
87 | if os.path.exists(TOKENS_FILE):
88 | with open(TOKENS_FILE, "r", encoding="utf-8") as f:
89 | for line in f:
90 | if line.strip() and not line.startswith("#"):
91 | token_list.append(line.strip())
92 | else:
93 | with open(TOKENS_FILE, "w", encoding="utf-8") as f:
94 | pass
95 |
96 | if os.path.exists(ERROR_TOKENS_FILE):
97 | with open(ERROR_TOKENS_FILE, "r", encoding="utf-8") as f:
98 | for line in f:
99 | if line.strip() and not line.startswith("#"):
100 | error_token_list.append(line.strip())
101 | else:
102 | with open(ERROR_TOKENS_FILE, "w", encoding="utf-8") as f:
103 | pass
104 |
105 | if token_list:
106 | logger.info(f"Token list count: {len(token_list)}, Error token list count: {len(error_token_list)}")
107 | logger.info("-" * 60)
108 |
--------------------------------------------------------------------------------
/templates/login.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Login
7 |
8 |
13 |
14 |
15 |
16 |
Login
17 |
24 |
25 | Not
26 | RT
27 | And
28 | AT
29 | The input will be used as
30 | Seed
31 | Randomly select a backend account
32 |
33 |
34 |
35 |
65 |
66 |
81 |
82 |
83 |
--------------------------------------------------------------------------------
/utils/config.py:
--------------------------------------------------------------------------------
1 | import ast
2 | import os
3 |
4 | from dotenv import load_dotenv
5 |
6 | from utils.Logger import logger
7 |
8 | load_dotenv(encoding="ascii")
9 |
10 |
11 | def is_true(x):
12 | if isinstance(x, bool):
13 | return x
14 | if isinstance(x, str):
15 | return x.lower() in ['true', '1', 't', 'y', 'yes']
16 | elif isinstance(x, int):
17 | return x == 1
18 | else:
19 | return False
20 |
21 |
22 | api_prefix = os.getenv('API_PREFIX', None)
23 | authorization = os.getenv('AUTHORIZATION', '').replace(' ', '')
24 | chatgpt_base_url = os.getenv('CHATGPT_BASE_URL', 'https://chatgpt.com').replace(' ', '')
25 | auth_key = os.getenv('AUTH_KEY', None)
26 | x_sign = os.getenv('X_SIGN', None)
27 |
28 | ark0se_token_url = os.getenv('ARK' + 'OSE_TOKEN_URL', '').replace(' ', '')
29 | if not ark0se_token_url:
30 | ark0se_token_url = os.getenv('ARK0SE_TOKEN_URL', None)
31 | proxy_url = os.getenv('PROXY_URL', '').replace(' ', '')
32 | export_proxy_url = os.getenv('EXPORT_PROXY_URL', None)
33 | impersonate_list_str = os.getenv('IMPERSONATE', '[]')
34 | user_agents_list_str = os.getenv('USER_AGENTS', '[]')
35 |
36 | cf_file_url = os.getenv('CF_FILE_URL', None)
37 | turnstile_solver_url = os.getenv('TURNSTILE_SOLVER_URL', None)
38 |
39 | history_disabled = is_true(os.getenv('HISTORY_DISABLED', True))
40 | pow_difficulty = os.getenv('POW_DIFFICULTY', '000032')
41 | retry_times = int(os.getenv('RETRY_TIMES', 3))
42 | conversation_only = is_true(os.getenv('CONVERSATION_ONLY', False))
43 | enable_limit = is_true(os.getenv('ENABLE_LIMIT', True))
44 | upload_by_url = is_true(os.getenv('UPLOAD_BY_URL', False))
45 | check_model = is_true(os.getenv('CHECK_MODEL', False))
46 | scheduled_refresh = is_true(os.getenv('SCHEDULED_REFRESH', False))
47 | random_token = is_true(os.getenv('RANDOM_TOKEN', True))
48 | oai_language = os.getenv('OAI_LANGUAGE', 'en-US')
49 |
50 | authorization_list = authorization.split(',') if authorization else []
51 | chatgpt_base_url_list = chatgpt_base_url.split(',') if chatgpt_base_url else []
52 | ark0se_token_url_list = ark0se_token_url.split(',') if ark0se_token_url else []
53 | proxy_url_list = proxy_url.split(',') if proxy_url else []
54 | impersonate_list = ast.literal_eval(impersonate_list_str)
55 | user_agents_list = ast.literal_eval(user_agents_list_str)
56 |
57 | enable_gateway = is_true(os.getenv('ENABLE_GATEWAY', False))
58 | auto_seed = is_true(os.getenv('AUTO_SEED', True))
59 | no_sentinel = is_true(os.getenv('NO_SENTINEL', False))
60 |
61 | with open('version.txt') as f:
62 | version = f.read().strip()
63 |
64 | logger.info("-" * 60)
65 | logger.info(f"Chat2Api {version} | https://github.com/Niansuh/chat2api")
66 | logger.info("-" * 60)
67 | logger.info("Environment variables:")
68 | logger.info("------------------------- Security -------------------------")
69 | logger.info("API_PREFIX: " + str(api_prefix))
70 | logger.info("AUTHORIZATION: " + str(authorization_list))
71 | logger.info("AUTH_KEY: " + str(auth_key))
72 | logger.info("------------------------- Request --------------------------")
73 | logger.info("CHATGPT_BASE_URL: " + str(chatgpt_base_url_list))
74 | logger.info("PROXY_URL: " + str(proxy_url_list))
75 | logger.info("EXPORT_PROXY_URL: " + str(export_proxy_url))
76 | logger.info("IMPERSONATE: " + str(impersonate_list))
77 | logger.info("USER_AGENTS: " + str(user_agents_list))
78 | logger.info("---------------------- Functionality -----------------------")
79 | logger.info("HISTORY_DISABLED: " + str(history_disabled))
80 | logger.info("POW_DIFFICULTY: " + str(pow_difficulty))
81 | logger.info("RETRY_TIMES: " + str(retry_times))
82 | logger.info("CONVERSATION_ONLY: " + str(conversation_only))
83 | logger.info("ENABLE_LIMIT: " + str(enable_limit))
84 | logger.info("UPLOAD_BY_URL: " + str(upload_by_url))
85 | logger.info("CHECK_MODEL: " + str(check_model))
86 | logger.info("SCHEDULED_REFRESH: " + str(scheduled_refresh))
87 | logger.info("RANDOM_TOKEN: " + str(random_token))
88 | logger.info("OAI_LANGUAGE: " + str(oai_language))
89 | logger.info("------------------------- Gateway --------------------------")
90 | logger.info("ENABLE_GATEWAY: " + str(enable_gateway))
91 | logger.info("AUTO_SEED: " + str(auto_seed))
92 | logger.info("-" * 60)
93 |
--------------------------------------------------------------------------------
/templates/tokens.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Tokens Management
7 |
8 |
54 |
55 |
56 |
57 |
58 |
Current number of available tokens:{{ tokens_count }}
59 |
64 |
65 |
Click Clear to clear uploaded and error tokens.
66 |
69 |
70 |
71 |
72 |
73 |
Error Tokens
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
--------------------------------------------------------------------------------
/chatgpt/authorization.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import json
3 | import random
4 | import uuid
5 |
6 | import ua_generator
7 | from ua_generator.options import Options
8 | from ua_generator.data.version import VersionRange
9 | from fastapi import HTTPException
10 |
11 | import utils.configs as configs
12 | import utils.globals as globals
13 | from chatgpt.refreshToken import rt2ac
14 | from utils.Logger import logger
15 |
16 |
17 | def get_req_token(req_token, seed=None):
18 | if configs.auto_seed:
19 | available_token_list = list(set(globals.token_list) - set(globals.error_token_list))
20 | length = len(available_token_list)
21 | if seed and length > 0:
22 | if seed not in globals.seed_map.keys():
23 | globals.seed_map[seed] = {"token": random.choice(available_token_list), "conversations": []}
24 | with open(globals.SEED_MAP_FILE, "w") as f:
25 | json.dump(globals.seed_map, f, indent=4)
26 | else:
27 | req_token = globals.seed_map[seed]["token"]
28 | return req_token
29 |
30 | if req_token in configs.authorization_list:
31 | if len(available_token_list) > 0:
32 | if configs.random_token:
33 | req_token = random.choice(available_token_list)
34 | return req_token
35 | else:
36 | globals.count += 1
37 | globals.count %= length
38 | return available_token_list[globals.count]
39 | else:
40 | return None
41 | else:
42 | return req_token
43 | else:
44 | seed = req_token
45 | if seed not in globals.seed_map.keys():
46 | raise HTTPException(status_code=401, detail={"error": "Invalid Seed"})
47 | return globals.seed_map[seed]["token"]
48 |
49 |
50 | def get_fp(req_token):
51 | fp = globals.fp_map.get(req_token, {})
52 | if fp and fp.get("user-agent") and fp.get("impersonate"):
53 | if "proxy_url" in fp.keys() and fp["proxy_url"] is None and fp["proxy_url"] not in configs.proxy_url_list:
54 | fp["proxy_url"] = random.choice(configs.proxy_url_list) if configs.proxy_url_list else None
55 | globals.fp_map[req_token] = fp
56 | with open(globals.FP_FILE, "w", encoding="utf-8") as f:
57 | json.dump(globals.fp_map, f, indent=4)
58 | if globals.impersonate_list and "impersonate" in fp.keys() and fp["impersonate"] not in globals.impersonate_list:
59 | fp["impersonate"] = random.choice(globals.impersonate_list)
60 | globals.fp_map[req_token] = fp
61 | with open(globals.FP_FILE, "w", encoding="utf-8") as f:
62 | json.dump(globals.fp_map, f, indent=4)
63 | if configs.user_agents_list and "user-agent" in fp.keys() and fp["user-agent"] not in configs.user_agents_list:
64 | fp["user-agent"] = random.choice(configs.user_agents_list)
65 | globals.fp_map[req_token] = fp
66 | with open(globals.FP_FILE, "w", encoding="utf-8") as f:
67 | json.dump(globals.fp_map, f, indent=4)
68 | fp = {k.lower(): v for k, v in fp.items()}
69 | return fp
70 | else:
71 | options = Options(version_ranges={
72 | 'chrome': VersionRange(min_version=124),
73 | 'edge': VersionRange(min_version=124),
74 | })
75 | ua = ua_generator.generate(device='desktop', browser=('chrome', 'edge', 'firefox', 'safari'),
76 | platform=('windows', 'macos'), options=options)
77 | fp = {
78 | "user-agent": ua.text if not configs.user_agents_list else random.choice(configs.user_agents_list),
79 | "sec-ch-ua-platform": ua.ch.platform,
80 | "sec-ch-ua": ua.ch.brands,
81 | "sec-ch-ua-mobile": ua.ch.mobile,
82 | "impersonate": random.choice(globals.impersonate_list),
83 | "proxy_url": random.choice(configs.proxy_url_list) if configs.proxy_url_list else None,
84 | "oai-device-id": str(uuid.uuid4())
85 | }
86 | if not req_token:
87 | return fp
88 | else:
89 | globals.fp_map[req_token] = fp
90 | with open(globals.FP_FILE, "w", encoding="utf-8") as f:
91 | json.dump(globals.fp_map, f, indent=4)
92 | return fp
93 |
94 |
95 | async def verify_token(req_token):
96 | if not req_token:
97 | if configs.authorization_list:
98 | logger.error("Unauthorized with empty token.")
99 | raise HTTPException(status_code=401)
100 | else:
101 | return None
102 | else:
103 | if req_token.startswith("eyJhbGciOi") or req_token.startswith("fk-"):
104 | access_token = req_token
105 | return access_token
106 | elif len(req_token) == 45:
107 | try:
108 | if req_token in globals.error_token_list:
109 | raise HTTPException(status_code=401, detail="Error RefreshToken")
110 |
111 | access_token = await rt2ac(req_token, force_refresh=False)
112 | return access_token
113 | except HTTPException as e:
114 | raise HTTPException(status_code=e.status_code, detail=e.detail)
115 | else:
116 | return req_token
117 |
118 |
119 | async def refresh_all_tokens(force_refresh=False):
120 | for token in list(set(globals.token_list) - set(globals.error_token_list)):
121 | if len(token) == 45:
122 | try:
123 | await asyncio.sleep(0.5)
124 | await rt2ac(token, force_refresh=force_refresh)
125 | except HTTPException:
126 | pass
127 | logger.info("All tokens refreshed.")
128 |
--------------------------------------------------------------------------------
/api/chat2api.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import types
3 |
4 | from apscheduler.schedulers.asyncio import AsyncIOScheduler
5 | from fastapi import Request, HTTPException, Form, Security
6 | from fastapi.responses import HTMLResponse, StreamingResponse, JSONResponse
7 | from fastapi.security import HTTPAuthorizationCredentials
8 | from starlette.background import BackgroundTask
9 |
10 | import utils.globals as globals
11 | from app import app, templates, security_scheme
12 | from chatgpt.ChatService import ChatService
13 | from chatgpt.authorization import refresh_all_tokens
14 | from utils.Logger import logger
15 | from utils.configs import api_prefix, scheduled_refresh
16 | from utils.retry import async_retry
17 |
18 | scheduler = AsyncIOScheduler()
19 |
20 |
21 | @app.on_event("startup")
22 | async def app_start():
23 | if scheduled_refresh:
24 | scheduler.add_job(id='refresh', func=refresh_all_tokens, trigger='cron', hour=3, minute=0, day='*/2',
25 | kwargs={'force_refresh': True})
26 | scheduler.start()
27 | asyncio.get_event_loop().call_later(0, lambda: asyncio.create_task(refresh_all_tokens(force_refresh=False)))
28 |
29 |
30 | async def to_send_conversation(request_data, req_token):
31 | chat_service = ChatService(req_token)
32 | try:
33 | await chat_service.set_dynamic_data(request_data)
34 | await chat_service.get_chat_requirements()
35 | return chat_service
36 | except HTTPException as e:
37 | await chat_service.close_client()
38 | raise HTTPException(status_code=e.status_code, detail=e.detail)
39 | except Exception as e:
40 | await chat_service.close_client()
41 | logger.error(f"Server error, {str(e)}")
42 | raise HTTPException(status_code=500, detail="Server error")
43 |
44 |
45 | async def process(request_data, req_token):
46 | chat_service = await to_send_conversation(request_data, req_token)
47 | await chat_service.prepare_send_conversation()
48 | res = await chat_service.send_conversation()
49 | return chat_service, res
50 |
51 |
52 | @app.post(f"/{api_prefix}/v1/chat/completions" if api_prefix else "/v1/chat/completions")
53 | async def send_conversation(request: Request, credentials: HTTPAuthorizationCredentials = Security(security_scheme)):
54 | req_token = credentials.credentials
55 | try:
56 | request_data = await request.json()
57 | except Exception:
58 | raise HTTPException(status_code=400, detail={"error": "Invalid JSON body"})
59 | chat_service, res = await async_retry(process, request_data, req_token)
60 | try:
61 | if isinstance(res, types.AsyncGeneratorType):
62 | background = BackgroundTask(chat_service.close_client)
63 | return StreamingResponse(res, media_type="text/event-stream", background=background)
64 | else:
65 | background = BackgroundTask(chat_service.close_client)
66 | return JSONResponse(res, media_type="application/json", background=background)
67 | except HTTPException as e:
68 | await chat_service.close_client()
69 | if e.status_code == 500:
70 | logger.error(f"Server error, {str(e)}")
71 | raise HTTPException(status_code=500, detail="Server error")
72 | raise HTTPException(status_code=e.status_code, detail=e.detail)
73 | except Exception as e:
74 | await chat_service.close_client()
75 | logger.error(f"Server error, {str(e)}")
76 | raise HTTPException(status_code=500, detail="Server error")
77 |
78 |
79 | @app.get(f"/{api_prefix}/tokens" if api_prefix else "/tokens", response_class=HTMLResponse)
80 | async def upload_html(request: Request):
81 | tokens_count = len(set(globals.token_list) - set(globals.error_token_list))
82 | return templates.TemplateResponse("tokens.html",
83 | {"request": request, "api_prefix": api_prefix, "tokens_count": tokens_count})
84 |
85 |
86 | @app.post(f"/{api_prefix}/tokens/upload" if api_prefix else "/tokens/upload")
87 | async def upload_post(text: str = Form(...)):
88 | lines = text.split("\n")
89 | for line in lines:
90 | if line.strip() and not line.startswith("#"):
91 | globals.token_list.append(line.strip())
92 | with open(globals.TOKENS_FILE, "a", encoding="utf-8") as f:
93 | f.write(line.strip() + "\n")
94 | logger.info(f"Token count: {len(globals.token_list)}, Error token count: {len(globals.error_token_list)}")
95 | tokens_count = len(set(globals.token_list) - set(globals.error_token_list))
96 | return {"status": "success", "tokens_count": tokens_count}
97 |
98 |
99 | @app.post(f"/{api_prefix}/tokens/clear" if api_prefix else "/tokens/clear")
100 | async def upload_post():
101 | globals.token_list.clear()
102 | globals.error_token_list.clear()
103 | with open(globals.TOKENS_FILE, "w", encoding="utf-8") as f:
104 | pass
105 | logger.info(f"Token count: {len(globals.token_list)}, Error token count: {len(globals.error_token_list)}")
106 | tokens_count = len(set(globals.token_list) - set(globals.error_token_list))
107 | return {"status": "success", "tokens_count": tokens_count}
108 |
109 |
110 | @app.post(f"/{api_prefix}/tokens/error" if api_prefix else "/tokens/error")
111 | async def error_tokens():
112 | error_tokens_list = list(set(globals.error_token_list))
113 | return {"status": "success", "error_tokens": error_tokens_list}
114 |
115 |
116 | @app.get(f"/{api_prefix}/tokens/add/{{token}}" if api_prefix else "/tokens/add/{token}")
117 | async def add_token(token: str):
118 | if token.strip() and not token.startswith("#"):
119 | globals.token_list.append(token.strip())
120 | with open(globals.TOKENS_FILE, "a", encoding="utf-8") as f:
121 | f.write(token.strip() + "\n")
122 | logger.info(f"Token count: {len(globals.token_list)}, Error token count: {len(globals.error_token_list)}")
123 | tokens_count = len(set(globals.token_list) - set(globals.error_token_list))
124 | return {"status": "success", "tokens_count": tokens_count}
125 |
--------------------------------------------------------------------------------
/chat2api.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import types
3 | import warnings
4 |
5 | from apscheduler.schedulers.asyncio import AsyncIOScheduler
6 | from fastapi import FastAPI, Request, Depends, HTTPException, Form
7 | from fastapi.middleware.cors import CORSMiddleware
8 | from fastapi.responses import HTMLResponse
9 | from fastapi.responses import StreamingResponse, JSONResponse
10 | from fastapi.security import OAuth2PasswordBearer
11 | from fastapi.templating import Jinja2Templates
12 | from starlette.background import BackgroundTask
13 |
14 | from chatgpt.ChatService import ChatService
15 | from chatgpt.authorization import refresh_all_tokens
16 | import chatgpt.globals as globals
17 | from chatgpt.reverseProxy import chatgpt_reverse_proxy
18 | from utils.Logger import logger
19 | from utils.config import api_prefix, scheduled_refresh
20 | from utils.retry import async_retry
21 |
22 | warnings.filterwarnings("ignore")
23 |
24 | app = FastAPI()
25 | scheduler = AsyncIOScheduler()
26 | templates = Jinja2Templates(directory="templates")
27 | oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token", auto_error=False)
28 |
29 | app.add_middleware(
30 | CORSMiddleware,
31 | allow_origins=["*"],
32 | allow_credentials=True,
33 | allow_methods=["*"],
34 | allow_headers=["*"],
35 | )
36 |
37 |
38 | @app.on_event("startup")
39 | async def app_start():
40 | if scheduled_refresh:
41 | scheduler.add_job(id='refresh', func=refresh_all_tokens, trigger='cron', hour=3, minute=0, day='*/4', kwargs={'force_refresh': True})
42 | scheduler.start()
43 | asyncio.get_event_loop().call_later(0, lambda: asyncio.create_task(refresh_all_tokens(force_refresh=False)))
44 |
45 |
46 | async def to_send_conversation(request_data, req_token):
47 | chat_service = ChatService(req_token)
48 | try:
49 | await chat_service.set_dynamic_data(request_data)
50 | await chat_service.get_chat_requirements()
51 | return chat_service
52 | except HTTPException as e:
53 | await chat_service.close_client()
54 | raise HTTPException(status_code=e.status_code, detail=e.detail)
55 | except Exception as e:
56 | await chat_service.close_client()
57 | logger.error(f"Server error, {str(e)}")
58 | raise HTTPException(status_code=500, detail="Server error")
59 |
60 |
61 | async def process(request_data, req_token):
62 | chat_service = await to_send_conversation(request_data, req_token)
63 | await chat_service.prepare_send_conversation()
64 | res = await chat_service.send_conversation()
65 | return chat_service, res
66 |
67 |
68 | @app.post(f"/{api_prefix}/v1/chat/completions" if api_prefix else "/v1/chat/completions")
69 | async def send_conversation(request: Request, req_token: str = Depends(oauth2_scheme)):
70 | try:
71 | request_data = await request.json()
72 | except Exception:
73 | raise HTTPException(status_code=400, detail={"error": "Invalid JSON body"})
74 | chat_service, res = await async_retry(process, request_data, req_token)
75 | try:
76 | if isinstance(res, types.AsyncGeneratorType):
77 | background = BackgroundTask(chat_service.close_client)
78 | return StreamingResponse(res, media_type="text/event-stream", background=background)
79 | else:
80 | background = BackgroundTask(chat_service.close_client)
81 | return JSONResponse(res, media_type="application/json", background=background)
82 | except HTTPException as e:
83 | await chat_service.close_client()
84 | if e.status_code == 500:
85 | logger.error(f"Server error, {str(e)}")
86 | raise HTTPException(status_code=500, detail="Server error")
87 | raise HTTPException(status_code=e.status_code, detail=e.detail)
88 | except Exception as e:
89 | await chat_service.close_client()
90 | logger.error(f"Server error, {str(e)}")
91 | raise HTTPException(status_code=500, detail="Server error")
92 |
93 |
94 | @app.get(f"/{api_prefix}/tokens" if api_prefix else "/tokens", response_class=HTMLResponse)
95 | async def upload_html(request: Request):
96 | tokens_count = len(set(globals.token_list) - set(globals.error_token_list))
97 | return templates.TemplateResponse("tokens.html",
98 | {"request": request, "api_prefix": api_prefix, "tokens_count": tokens_count})
99 |
100 |
101 | @app.post(f"/{api_prefix}/tokens/upload" if api_prefix else "/tokens/upload")
102 | async def upload_post(text: str = Form(...)):
103 | lines = text.split("\n")
104 | for line in lines:
105 | if line.strip() and not line.startswith("#"):
106 | globals.token_list.append(line.strip())
107 | with open("data/token.txt", "a", encoding="utf-8") as f:
108 | f.write(line.strip() + "\n")
109 | logger.info(f"Token count: {len(globals.token_list)}, Error token count: {len(globals.error_token_list)}")
110 | tokens_count = len(set(globals.token_list) - set(globals.error_token_list))
111 | return {"status": "success", "tokens_count": tokens_count}
112 |
113 |
114 | @app.post(f"/{api_prefix}/tokens/clear" if api_prefix else "/tokens/clear")
115 | async def upload_post():
116 | globals.token_list.clear()
117 | globals.error_token_list.clear()
118 | with open("data/token.txt", "w", encoding="utf-8") as f:
119 | pass
120 | logger.info(f"Token count: {len(globals.token_list)}, Error token count: {len(globals.error_token_list)}")
121 | tokens_count = len(set(globals.token_list) - set(globals.error_token_list))
122 | return {"status": "success", "tokens_count": tokens_count}
123 |
124 |
125 | @app.post(f"/{api_prefix}/tokens/error" if api_prefix else "/tokens/error")
126 | async def error_tokens():
127 | error_tokens_list = list(set(globals.error_token_list))
128 | return {"status": "success", "error_tokens": error_tokens_list}
129 |
130 |
131 | @app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE", "OPTIONS", "HEAD", "PATCH", "TRACE"])
132 | async def reverse_proxy(request: Request, path: str):
133 | return await chatgpt_reverse_proxy(request, path)
134 |
--------------------------------------------------------------------------------
/api/files.py:
--------------------------------------------------------------------------------
1 | import io
2 |
3 | import pybase64
4 | from PIL import Image
5 |
6 | from utils.Client import Client
7 | from utils.configs import export_proxy_url, cf_file_url
8 |
9 |
10 | async def get_file_content(url):
11 | if url.startswith("data:"):
12 | mime_type, base64_data = url.split(';')[0].split(':')[1], url.split(',')[1]
13 | file_content = pybase64.b64decode(base64_data)
14 | return file_content, mime_type
15 | else:
16 | client = Client()
17 | try:
18 | if cf_file_url:
19 | body = {"file_url": url}
20 | r = await client.post(cf_file_url, timeout=60, json=body)
21 | else:
22 | r = await client.get(url, proxy=export_proxy_url, timeout=60)
23 | if r.status_code != 200:
24 | return None, None
25 | file_content = r.content
26 | mime_type = r.headers.get('Content-Type', '').split(';')[0].strip()
27 | return file_content, mime_type
28 | finally:
29 | await client.close()
30 | del client
31 |
32 |
33 | async def determine_file_use_case(mime_type):
34 | multimodal_types = ["image/jpeg", "image/webp", "image/png", "image/gif"]
35 | my_files_types = ["text/x-php", "application/msword", "text/x-c", "text/html",
36 | "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
37 | "application/json", "text/javascript", "application/pdf",
38 | "text/x-java", "text/x-tex", "text/x-typescript", "text/x-sh",
39 | "text/x-csharp", "application/vnd.openxmlformats-officedocument.presentationml.presentation",
40 | "text/x-c++", "application/x-latext", "text/markdown", "text/plain",
41 | "text/x-ruby", "text/x-script.python"]
42 |
43 | if mime_type in multimodal_types:
44 | return "multimodal"
45 | elif mime_type in my_files_types:
46 | return "my_files"
47 | else:
48 | return "ace_upload"
49 |
50 |
51 | async def get_image_size(file_content):
52 | with Image.open(io.BytesIO(file_content)) as img:
53 | return img.width, img.height
54 |
55 |
56 | async def get_file_extension(mime_type):
57 | extension_mapping = {
58 | "image/jpeg": ".jpg",
59 | "image/png": ".png",
60 | "image/gif": ".gif",
61 | "image/webp": ".webp",
62 | "text/x-php": ".php",
63 | "application/msword": ".doc",
64 | "text/x-c": ".c",
65 | "text/html": ".html",
66 | "application/vnd.openxmlformats-officedocument.wordprocessingml.document": ".docx",
67 | "application/json": ".json",
68 | "text/javascript": ".js",
69 | "application/pdf": ".pdf",
70 | "text/x-java": ".java",
71 | "text/x-tex": ".tex",
72 | "text/x-typescript": ".ts",
73 | "text/x-sh": ".sh",
74 | "text/x-csharp": ".cs",
75 | "application/vnd.openxmlformats-officedocument.presentationml.presentation": ".pptx",
76 | "text/x-c++": ".cpp",
77 | "application/x-latex": ".latex",
78 | "text/markdown": ".md",
79 | "text/plain": ".txt",
80 | "text/x-ruby": ".rb",
81 | "text/x-script.python": ".py",
82 | "application/zip": ".zip",
83 | "application/x-zip-compressed": ".zip",
84 | "application/x-tar": ".tar",
85 | "application/x-compressed-tar": ".tar.gz",
86 | "application/vnd.rar": ".rar",
87 | "application/x-rar-compressed": ".rar",
88 | "application/x-7z-compressed": ".7z",
89 | "application/octet-stream": ".bin",
90 | "audio/mpeg": ".mp3",
91 | "audio/wav": ".wav",
92 | "audio/ogg": ".ogg",
93 | "audio/aac": ".aac",
94 | "video/mp4": ".mp4",
95 | "video/x-msvideo": ".avi",
96 | "video/x-matroska": ".mkv",
97 | "video/webm": ".webm",
98 | "application/rtf": ".rtf",
99 | "application/vnd.ms-excel": ".xls",
100 | "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": ".xlsx",
101 | "text/css": ".css",
102 | "text/xml": ".xml",
103 | "application/xml": ".xml",
104 | "application/vnd.android.package-archive": ".apk",
105 | "application/vnd.apple.installer+xml": ".mpkg",
106 | "application/x-bzip": ".bz",
107 | "application/x-bzip2": ".bz2",
108 | "application/x-csh": ".csh",
109 | "application/x-debian-package": ".deb",
110 | "application/x-dvi": ".dvi",
111 | "application/java-archive": ".jar",
112 | "application/x-java-jnlp-file": ".jnlp",
113 | "application/vnd.mozilla.xul+xml": ".xul",
114 | "application/vnd.ms-fontobject": ".eot",
115 | "application/ogg": ".ogx",
116 | "application/x-font-ttf": ".ttf",
117 | "application/font-woff": ".woff",
118 | "application/x-shockwave-flash": ".swf",
119 | "application/vnd.visio": ".vsd",
120 | "application/xhtml+xml": ".xhtml",
121 | "application/vnd.ms-powerpoint": ".ppt",
122 | "application/vnd.oasis.opendocument.text": ".odt",
123 | "application/vnd.oasis.opendocument.spreadsheet": ".ods",
124 | "application/x-xpinstall": ".xpi",
125 | "application/vnd.google-earth.kml+xml": ".kml",
126 | "application/vnd.google-earth.kmz": ".kmz",
127 | "application/x-font-otf": ".otf",
128 | "application/vnd.ms-excel.addin.macroEnabled.12": ".xlam",
129 | "application/vnd.ms-excel.sheet.binary.macroEnabled.12": ".xlsb",
130 | "application/vnd.ms-excel.template.macroEnabled.12": ".xltm",
131 | "application/vnd.ms-powerpoint.addin.macroEnabled.12": ".ppam",
132 | "application/vnd.ms-powerpoint.presentation.macroEnabled.12": ".pptm",
133 | "application/vnd.ms-powerpoint.slideshow.macroEnabled.12": ".ppsm",
134 | "application/vnd.ms-powerpoint.template.macroEnabled.12": ".potm",
135 | "application/vnd.ms-word.document.macroEnabled.12": ".docm",
136 | "application/vnd.ms-word.template.macroEnabled.12": ".dotm",
137 | "application/x-ms-application": ".application",
138 | "application/x-ms-wmd": ".wmd",
139 | "application/x-ms-wmz": ".wmz",
140 | "application/x-ms-xbap": ".xbap",
141 | "application/vnd.ms-xpsdocument": ".xps",
142 | "application/x-silverlight-app": ".xap"
143 | }
144 | return extension_mapping.get(mime_type, "")
145 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Original Author: https://github.com/lanqian528/chat2api (CN Version)
2 |
3 | # CHAT2API
4 |
5 | 🤖 A simple ChatGPT TO API proxy
6 |
7 | 🌟 Free, unlimited `GPT-3.5` available without an account
8 |
9 | 💥 Support AccessToken account, support `GPT-4`, `GPT-4o`, `GPTs`
10 |
11 | 🔍 The reply format is completely consistent with the real API and adapts to almost all clients.
12 |
13 | ## Function
14 |
15 | > completed
16 | > - [x] Streaming, non-streaming
17 | > - [x] Login-free GPT3.5 conversation
18 | > - [x] GPT-3.5 dialogue (the incoming model name does not contain gpt-4, then gpt-3.5 will be used by default, which is text-davinci-002-render-sha)
19 | > - [x] GPT-4 dialogue (incoming model names include: gpt-4, gpt-4o, gpt-4-moblie to use the corresponding model, you need to pass in AccessToken)
20 | > - [x] GPT-4 drawing, coding, networking
21 | > - [x] Support GPTs (incoming model name: gpt-4-gizmo-g-*)
22 | > - [x] Upload images and files (the format is API corresponding format, supports url and base64)
23 | > - [x] WebUI (http://127.0.0.1:5005, does not support login)
24 | > - [x] Can be used as a gateway and can be deployed on multiple machines
25 | > - [x] Multi-account polling, supporting AccessToken and RefreshToken at the same time
26 | > - [x] Tokens management, supports uploading and clearing
27 |
28 | > TODO
29 | > - [ ] None yet, welcome to submit an issue
30 | >
31 |
32 | ## Tokens management
33 |
34 | First configure the environment variable `AUTHORIZATION`, and then run the program
35 |
36 | Visit `/tokens` or `/api_prefix/tokens` to view the number of existing Tokens, upload new Tokens, or clear Tokens
37 |
38 | When requesting, pass in the value you configured in `AUTHORIZATION` to poll multiple accounts. `AUTHORIZATION` can be configured with multiple values separated by commas.
39 |
40 | 
41 |
42 | ## Environment variables
43 |
44 | Each environment variable has a default value. If you do not understand the meaning of the environment variable, please do not set it, let alone pass an empty value.
45 |
46 | ```
47 | # Security related
48 | API_PREFIX=your_prefix // API prefix password. If not set, it is easy to be accessed. After setting, you need to request /your_prefix/v1/chat/completions
49 | AUTHORIZATION=sk-xxxxxxxx,sk-yyyyyyyy // Go first /tokens Upload ac or rt, pass it in when requesting AUTHORIZATION Multiple accounts can be polled
50 | # Request related
51 | CHATGPT_BASE_URL=https://chatgpt.com // ChatGPT gateway address. After setting, the requested website will be changed. Multiple gateways are separated by commas.
52 | PROXY_URL=your_first_proxy, your_second_proxy //Proxy URL, multiple proxies are separated by commas
53 | ARKOSE_TOKEN_URL=https://arkose.example.com/token // Get the address of Arkose token, instructions are provided above
54 | # Function related
55 | HISTORY_DISABLED=true // Whether not to save the chat record and return conversation_id, true means not to save and not return
56 | POW_DIFFICULTY=000032 // The difficulty of proof of work to be solved. The smaller the string, the longer the calculation time. It is recommended to 000032
57 | RETRY_TIMES=3 //Number of retries on error
58 | ENABLE_GATEWAY=true // Whether to enable gateway mode (WEBUI), true means enabled
59 | CONVERSATION_ONLY=false // The gateway used supports being enabled when the server handles pow and arkose. If enabled, the conversation interface will be used directly.
60 | ```
61 |
62 | ## Deploy
63 |
64 | ### Deploy directly
65 |
66 | ```bash
67 | git clone https://github.com/Niansuh/chat2api
68 | cd chat2api
69 | pip install -r requirements.txt
70 | python app.py
71 | ```
72 |
73 | ### Docker deployment
74 |
75 | You need to install Docker and Docker Compose.
76 |
77 | ```bash
78 | docker run -d \
79 | --name chat2api \
80 | -p 5005:5005 \
81 | niansuh/chat2api:latest
82 | ```
83 |
84 | ### (Recommended, PLUS account available) Docker Compose deployment
85 |
86 | Create a new directory, such as chat2api, and enter that directory:
87 |
88 | ```bash
89 | mkdir chat2api
90 | cd chat2api
91 | ```
92 |
93 | Download the docker-compose.yml file in the library in this directory:
94 |
95 | ```bash
96 | wget https://raw.githubusercontent.com/Niansuh/chat2api/main/docker-compose.yml
97 | ```
98 |
99 | Modify the environment variables in the docker-compose.yml file and save:
100 |
101 | ```bash
102 | docker-compose up -d
103 | ```
104 |
105 | ## Use
106 |
107 | 1. To use it on the web, directly access the following address, which only supports binding GPT3.5:
108 |
109 | ```
110 | http://127.0.0.1:5005
111 | ```
112 |
113 | 2. Using API, supports passing AccessToken or RefreshToken, available GPT-4, GPT-4o, GPTs:
114 |
115 | ```bash
116 | curl --location 'http://127.0.0.1:5005/v1/chat/completions' \
117 | --header 'Content-Type: application/json' \
118 | --header 'Authorization: Bearer {{OpenAI APIKEY}}' \
119 | --data '{
120 | "model": "gpt-3.5-turbo",
121 | "messages": [{"role": "user", "content": "Say this is a test!"}],
122 | "stream": true
123 | }'
124 | ```
125 | Pass in your account's `AccessToken` or `RefreshToken` as `OpenAI APIKEY`
126 |
127 | If the `AUTHORIZATION` environment variable is set, the set value can be passed in as `OpenAI APIKEY` for multi-Tokens polling
128 |
129 | > - Obtain `AccessToken`: After logging in to chatgpt official website, open https://chatgpt.com/api/auth/session to obtain the value of `accessToken`
130 | > - `RefreshToken` acquisition: No acquisition method is provided here.
131 | > - Login-free gpt3.5 No need to pass in Token
132 |
133 |
134 | ## ArkoseToken
135 |
136 | > #### Currently supports external services providing ArkoseToken
137 | >
138 | > #### It is recommended to use docker-compose for deployment, which has built-in Arkose service
139 |
140 | 1. Set the environment variable ARKOSE_TOKEN_URL
141 |
142 | 2. When `ArkoseToken` is needed, `chat2api` will send a `POST` request to `ARKOSE_TOKEN_URL`
143 |
144 | 3. Please provide external services in the following format:
145 |
146 | - Request body:
147 |
148 | ```request body
149 | {"blob": "rFYaxQNEApDlx/Db.KyrE79pAAFBs70CYtbM4pMNUsc7jIkLGdiDs7vziHRGe78bqWXDo0AYyq2A10qIlcTt89lBYXJqCbONC/nD8C199pEZ/c9ocVKKtM27jZQ7fyOpWd9p5qjKeXT4xEGBFpoE3Re1DwdQeijYp7VMJQyw7RYN+IDB1QEx3aKSO6aTI+ivnhw9ztfn/p1SkvAyyOhur/ArF08WQ+rXQpxpttaSQlzMsIwlYbuUUuYE2f9JrQaYG7qip1DKvju111P6wTNy4QVlMXG32VrzaOWh4nmQ0lOcZ1DmN6u2aeJZotffHV2zOOQAqqnParidTbN+qFre2t77ZwBuGKGqLyT8LeOp02GdFwcyw0kkeX+L7vwYAzBpjA5ky0r0X+i8HpzWt8QCyWzEW9kHn9LLCTwg2MOumzjb66Ad4WDe+C1bAcOKuEyXiYh+a1cWZAOdzEuxEg90yCfI7DZR94BsoDR85gEC/Og88i098u5HV7hZZEOQ6J8fmi68FSyPkN7oLCmBsZCMAZqzapNP/MkeIMExrdw7Jf/PtMrZN4bwM56mWfyIJf5h/zXu8PUajVwE9Pj/M5VtB0spZg49JNeHExosVCAB0C0JW+T8vEIwoqiY4pRQ0lbMHTQZFpU2xURTgcgh+m6g1SEYR1FY3de1XnzfiTQq1RTNJPydj5xpt6r6okr8yIJdRhmVXlQI+pS7vi3+Lls2hnpr7L+l1mcUIMPZNBCs3AUFJNpp6SwQjZkPvKggg1p+uS6PdvKRizM9O9+FKc103AhuSia8KTrvU8tWhBhCzIHCD4LNfnkjuBWSdbDttva4AEXUoPuKkQCWaBzq4lQPUIHFOM9HmNe738vVkNdAuOYffxDNegcpIxLVgZGfbgLQ="}
150 | ```
151 |
152 | - Response body:
153 |
154 | ```response body
155 | {"token": "45017c7bb17115f36.7290869304|r=ap-southeast-1|meta=3|metabgclr=transparent|metaiconclr=%23757575|guitextcolor=%23000000|pk=0A1D34FC-659D-4E23-B17B-694DCFCF6A6C|at=40|sup=1|rid=3|ag=101|cdn_url=https%3A%2F%2Ftcr9i.openai.com%2Fcdn%2Ffc|lurl=https%3A%2F%2Faudio-ap-southeast-1.arkoselabs.com|surl=https%3A%2F%2Ftcr9i.openai.com|smurl=https%3A%2F%2Ftcr9i.openai.com%2Fcdn%2Ffc%2Fassets%2Fstyle-manager"}
156 | ```
157 |
158 | ## Common problem
159 |
160 | > - Error code:
161 | > - `401`: The current IP does not support login-free, please try changing the IP address, or setting a proxy in the environment variable `PROXY_URL`, or your authentication fails.
162 | > - `403`: Please check the specific error information in the log
163 | > - `429`: The current IP request has exceeded the limit within 1 hour. Please try again later or change the IP.
164 | > - `500`: Server internal error, request failed.
165 | > - `502`: The server gateway is wrong, or the network is unavailable. Please try changing the network environment.
166 |
167 | > - What is known:
168 | > - Many Japanese IPs do not support Bintang. It is recommended to use American IPs for Bintang 3.5.
169 | > - 99% of accounts support free `GPT-4o`, but it is opened according to the IP region. Currently, Japan and Singapore IP are known to have a higher probability of being opened.
170 | > - What is the environment variable `AUTHORIZATION`?
171 | > - It is an authentication that you set for chat2api. After setting it, you can use the saved Tokens for polling. When requesting, it is passed in as `APIKEY`
172 |
173 | > - How to obtain AccessToken?
174 | > - After logging in to the chatgpt official website, open https://chatgpt.com/api/auth/session to obtain the value of `accessToken`
175 | > - PLUS account reports error `403`?
176 | > - PLUS account needs to configure `ArkoseToken`, please configure it according to the above
177 | > - ArkoseToken What is it and how to get it?
178 | > - Please refer to the instructions above. For more information, please refer to https://www.arkoselabs.com/
179 |
180 |
181 | ## Sponsor
182 |
183 | [](https://capsolver.com)
184 |
185 |
186 | ## License
187 |
188 | MIT License
189 |
--------------------------------------------------------------------------------
/gateway/share.py:
--------------------------------------------------------------------------------
1 | import json
2 | import random
3 | import time
4 |
5 | import jwt
6 | from fastapi import Request, HTTPException, Security
7 | from fastapi.responses import Response
8 | from fastapi.security import HTTPAuthorizationCredentials
9 |
10 | import utils.globals as globals
11 | from app import app, security_scheme
12 | from chatgpt.authorization import get_fp, verify_token
13 | from gateway.reverseProxy import get_real_req_token
14 | from utils.Client import Client
15 | from utils.Logger import logger
16 | from utils.configs import proxy_url_list, chatgpt_base_url_list, authorization_list
17 |
18 | base_headers = {
19 | 'accept': '*/*',
20 | 'accept-encoding': 'gzip, deflate, br, zstd',
21 | 'accept-language': 'en-US,en;q=0.9',
22 | 'content-type': 'application/json',
23 | 'oai-language': 'en-US',
24 | 'priority': 'u=1, i',
25 | 'sec-fetch-dest': 'empty',
26 | 'sec-fetch-mode': 'cors',
27 | 'sec-fetch-site': 'same-origin',
28 | }
29 |
30 |
31 | def verify_authorization(bearer_token):
32 | if not bearer_token:
33 | raise HTTPException(status_code=401, detail="Authorization header is missing")
34 | if bearer_token not in authorization_list:
35 | raise HTTPException(status_code=401, detail="Invalid authorization")
36 |
37 |
38 | @app.get("/seedtoken")
39 | async def get_seedtoken(request: Request, credentials: HTTPAuthorizationCredentials = Security(security_scheme)):
40 | verify_authorization(credentials.credentials)
41 | try:
42 | params = request.query_params
43 | seed = params.get("seed")
44 |
45 | if seed:
46 | if seed not in globals.seed_map:
47 | raise HTTPException(status_code=404, detail=f"Seed '{seed}' not found")
48 | return {
49 | "status": "success",
50 | "data": {
51 | "seed": seed,
52 | "token": globals.seed_map[seed]["token"]
53 | }
54 | }
55 |
56 | token_map = {
57 | seed: data["token"]
58 | for seed, data in globals.seed_map.items()
59 | }
60 | return {"status": "success", "data": token_map}
61 |
62 | except Exception as e:
63 | raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
64 |
65 |
66 | @app.post("/seedtoken")
67 | async def set_seedtoken(request: Request, credentials: HTTPAuthorizationCredentials = Security(security_scheme)):
68 | verify_authorization(credentials.credentials)
69 | data = await request.json()
70 |
71 | seed = data.get("seed")
72 | token = data.get("token")
73 |
74 | if seed not in globals.seed_map:
75 | globals.seed_map[seed] = {
76 | "token": token,
77 | "conversations": []
78 | }
79 | else:
80 | globals.seed_map[seed]["token"] = token
81 |
82 | with open(globals.SEED_MAP_FILE, "w", encoding="utf-8") as f:
83 | json.dump(globals.seed_map, f, indent=4)
84 |
85 | return {"status": "success", "message": "Token updated successfully"}
86 |
87 |
88 | @app.delete("/seedtoken")
89 | async def delete_seedtoken(request: Request, credentials: HTTPAuthorizationCredentials = Security(security_scheme)):
90 | verify_authorization(credentials.credentials)
91 |
92 | try:
93 | data = await request.json()
94 | seed = data.get("seed")
95 |
96 | if seed == "clear":
97 | globals.seed_map.clear()
98 | with open(globals.SEED_MAP_FILE, "w", encoding="utf-8") as f:
99 | json.dump(globals.seed_map, f, indent=4)
100 | return {"status": "success", "message": "All seeds deleted successfully"}
101 |
102 | if not seed:
103 | raise HTTPException(status_code=400, detail="Missing required field: seed")
104 |
105 | if seed not in globals.seed_map:
106 | raise HTTPException(status_code=404, detail=f"Seed '{seed}' not found")
107 | del globals.seed_map[seed]
108 |
109 | with open(globals.SEED_MAP_FILE, "w", encoding="utf-8") as f:
110 | json.dump(globals.seed_map, f, indent=4)
111 |
112 | return {
113 | "status": "success",
114 | "message": f"Seed '{seed}' deleted successfully"
115 | }
116 |
117 | except json.JSONDecodeError:
118 | raise HTTPException(status_code=400, detail="Invalid JSON data")
119 | except Exception as e:
120 | raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
121 |
122 |
123 | async def chatgpt_account_check(access_token):
124 | auth_info = {}
125 | client = Client(proxy=random.choice(proxy_url_list) if proxy_url_list else None)
126 | try:
127 | host_url = random.choice(chatgpt_base_url_list) if chatgpt_base_url_list else "https://chatgpt.com"
128 | req_token = await get_real_req_token(access_token)
129 | access_token = await verify_token(req_token)
130 | fp = get_fp(req_token)
131 | proxy_url = fp.pop("proxy_url", None)
132 | impersonate = fp.pop("impersonate", "safari15_3")
133 |
134 | headers = base_headers.copy()
135 | headers.update({"authorization": f"Bearer {access_token}"})
136 | headers.update(fp)
137 |
138 | client = Client(proxy=proxy_url, impersonate=impersonate)
139 | r = await client.get(f"{host_url}/backend-api/models?history_and_training_disabled=false", headers=headers,
140 | timeout=10)
141 | if r.status_code != 200:
142 | raise HTTPException(status_code=r.status_code, detail=r.text)
143 | models = r.json()
144 | r = await client.get(f"{host_url}/backend-api/accounts/check/v4-2023-04-27", headers=headers, timeout=10)
145 | if r.status_code != 200:
146 | raise HTTPException(status_code=r.status_code, detail=r.text)
147 | accounts_info = r.json()
148 |
149 | auth_info.update({"models": models["models"]})
150 | auth_info.update({"accounts_info": accounts_info})
151 |
152 | account_ordering = accounts_info.get("account_ordering", [])
153 | is_deactivated = None
154 | plan_type = None
155 | team_ids = []
156 | for account in account_ordering:
157 | this_is_deactivated = accounts_info['accounts'].get(account, {}).get("account", {}).get("is_deactivated",
158 | False)
159 | this_plan_type = accounts_info['accounts'].get(account, {}).get("account", {}).get("plan_type", "free")
160 |
161 | if this_is_deactivated and is_deactivated is None:
162 | is_deactivated = True
163 | else:
164 | is_deactivated = False
165 |
166 | if "team" in this_plan_type:
167 | plan_type = this_plan_type
168 | team_ids.append(account)
169 | elif plan_type is None:
170 | plan_type = this_plan_type
171 |
172 | auth_info.update({"accountCheckInfo": {
173 | "is_deactivated": is_deactivated,
174 | "plan_type": plan_type,
175 | "team_ids": team_ids
176 | }})
177 |
178 | return auth_info
179 | except Exception as e:
180 | logger.error(f"chatgpt_account_check: {e}")
181 | return {}
182 | finally:
183 | await client.close()
184 |
185 |
186 | async def chatgpt_refresh(refresh_token):
187 | client = Client(proxy=random.choice(proxy_url_list) if proxy_url_list else None)
188 | try:
189 | data = {
190 | "client_id": "pdlLIX2Y72MIl2rhLhTE9VV9bN905kBh",
191 | "grant_type": "refresh_token",
192 | "redirect_uri": "com.openai.chat://auth0.openai.com/ios/com.openai.chat/callback",
193 | "refresh_token": refresh_token
194 | }
195 | r = await client.post("https://auth0.openai.com/oauth/token", json=data, timeout=10)
196 | if r.status_code != 200:
197 | raise HTTPException(status_code=r.status_code, detail=r.text)
198 | res = r.json()
199 | auth_info = {}
200 | auth_info.update(res)
201 | auth_info.update({"refresh_token": refresh_token})
202 | auth_info.update({"accessToken": res.get("access_token", "")})
203 | return auth_info
204 | except Exception as e:
205 | logger.error(f"chatgpt_refresh: {e}")
206 | return {}
207 | finally:
208 | await client.close()
209 |
210 |
211 | @app.post("/auth/refresh")
212 | async def refresh(request: Request):
213 | auth_info = {}
214 | form_data = await request.form()
215 |
216 | auth_info.update(form_data)
217 |
218 | access_token = auth_info.get("access_token", auth_info.get("accessToken", ""))
219 | refresh_token = auth_info.get("refresh_token", "")
220 |
221 | if not refresh_token and not access_token:
222 | raise HTTPException(status_code=401, detail="refresh_token or access_token is required")
223 |
224 | need_refresh = True
225 | if access_token:
226 | try:
227 | access_token_info = jwt.decode(access_token, options={"verify_signature": False})
228 | exp = access_token_info.get("exp", 0)
229 | if exp > int(time.time()) + 60 * 60 * 24 * 5:
230 | need_refresh = False
231 | except Exception as e:
232 | logger.error(f"access_token: {e}")
233 |
234 | if refresh_token and need_refresh:
235 | chatgpt_refresh_info = await chatgpt_refresh(refresh_token)
236 | if chatgpt_refresh_info:
237 | auth_info.update(chatgpt_refresh_info)
238 | access_token = auth_info.get("accessToken", "")
239 | account_check_info = await chatgpt_account_check(access_token)
240 | if account_check_info:
241 | auth_info.update(account_check_info)
242 | auth_info.update({"accessToken": access_token})
243 | return Response(content=json.dumps(auth_info), media_type="application/json")
244 | elif access_token:
245 | account_check_info = await chatgpt_account_check(access_token)
246 | if account_check_info:
247 | auth_info.update(account_check_info)
248 | auth_info.update({"accessToken": access_token})
249 | return Response(content=json.dumps(auth_info), media_type="application/json")
250 |
251 | raise HTTPException(status_code=401, detail="Unauthorized")
252 |
253 |
--------------------------------------------------------------------------------
/gateway/reverseProxy.py:
--------------------------------------------------------------------------------
1 | import json
2 | import random
3 | import time
4 | import uuid
5 | from datetime import datetime, timezone
6 |
7 | from fastapi import Request, HTTPException
8 | from fastapi.responses import StreamingResponse, Response
9 | from starlette.background import BackgroundTask
10 |
11 | import utils.globals as globals
12 | from chatgpt.authorization import verify_token, get_req_token, get_fp
13 | from utils.Client import Client
14 | from utils.Logger import logger
15 | from utils.configs import chatgpt_base_url_list
16 |
17 |
18 | def generate_current_time():
19 | current_time = datetime.now(timezone.utc)
20 | formatted_time = current_time.isoformat(timespec='microseconds').replace('+00:00', 'Z')
21 | return formatted_time
22 |
23 |
24 | headers_reject_list = [
25 | "x-real-ip",
26 | "x-forwarded-for",
27 | "x-forwarded-proto",
28 | "x-forwarded-port",
29 | "x-forwarded-host",
30 | "x-forwarded-server",
31 | "cf-warp-tag-id",
32 | "cf-visitor",
33 | "cf-ray",
34 | "cf-connecting-ip",
35 | "cf-ipcountry",
36 | "cdn-loop",
37 | "remote-host",
38 | "x-frame-options",
39 | "x-xss-protection",
40 | "x-content-type-options",
41 | "content-security-policy",
42 | "host",
43 | "cookie",
44 | "connection",
45 | "content-length",
46 | "content-encoding",
47 | "x-middleware-prefetch",
48 | "x-nextjs-data",
49 | "purpose",
50 | "x-forwarded-uri",
51 | "x-forwarded-path",
52 | "x-forwarded-method",
53 | "x-forwarded-protocol",
54 | "x-forwarded-scheme",
55 | "cf-request-id",
56 | "cf-worker",
57 | "cf-access-client-id",
58 | "cf-access-client-device-type",
59 | "cf-access-client-device-model",
60 | "cf-access-client-device-name",
61 | "cf-access-client-device-brand",
62 | "x-middleware-prefetch",
63 | "x-forwarded-for",
64 | "x-forwarded-host",
65 | "x-forwarded-proto",
66 | "x-forwarded-server",
67 | "x-real-ip",
68 | "x-forwarded-port",
69 | "cf-connecting-ip",
70 | "cf-ipcountry",
71 | "cf-ray",
72 | "cf-visitor",
73 | ]
74 |
75 |
76 | async def get_real_req_token(token):
77 | req_token = get_req_token(token)
78 | if len(req_token) == 45 or req_token.startswith("eyJhbGciOi"):
79 | return req_token
80 | else:
81 | req_token = get_req_token(None, token)
82 | return req_token
83 |
84 |
85 | def save_conversation(token, conversation_id, title=None):
86 | if conversation_id not in globals.conversation_map:
87 | conversation_detail = {
88 | "id": conversation_id,
89 | "title": title,
90 | "update_time": generate_current_time()
91 | }
92 | globals.conversation_map[conversation_id] = conversation_detail
93 | else:
94 | globals.conversation_map[conversation_id]["update_time"] = generate_current_time()
95 | if title:
96 | globals.conversation_map[conversation_id]["title"] = title
97 | if conversation_id not in globals.seed_map[token]["conversations"]:
98 | globals.seed_map[token]["conversations"].insert(0, conversation_id)
99 | else:
100 | globals.seed_map[token]["conversations"].remove(conversation_id)
101 | globals.seed_map[token]["conversations"].insert(0, conversation_id)
102 | with open(globals.CONVERSATION_MAP_FILE, "w", encoding="utf-8") as f:
103 | json.dump(globals.conversation_map, f, indent=4)
104 | with open(globals.SEED_MAP_FILE, "w", encoding="utf-8") as f:
105 | json.dump(globals.seed_map, f, indent=4)
106 | if title:
107 | logger.info(f"Conversation ID: {conversation_id}, Title: {title}")
108 |
109 |
110 | async def content_generator(r, token):
111 | conversation_id = None
112 | title = None
113 | async for chunk in r.aiter_content():
114 | try:
115 | if (len(token) != 45 and not token.startswith("eyJhbGciOi")) and (not conversation_id or not title):
116 | chat_chunk = chunk.decode('utf-8')
117 | if chat_chunk.startswith("data: {"):
118 | if "\n\nevent: delta" in chat_chunk:
119 | index = chat_chunk.find("\n\nevent: delta")
120 | chunk_data = chat_chunk[6:index]
121 | elif "\n\ndata: {" in chat_chunk:
122 | index = chat_chunk.find("\n\ndata: {")
123 | chunk_data = chat_chunk[6:index]
124 | else:
125 | chunk_data = chat_chunk[6:]
126 | chunk_data = chunk_data.strip()
127 | if conversation_id is None:
128 | conversation_id = json.loads(chunk_data).get("conversation_id")
129 | save_conversation(token, conversation_id)
130 | title = globals.conversation_map[conversation_id].get("title")
131 | if title is None:
132 | if "title" in chunk_data:
133 | pass
134 | title = json.loads(chunk_data).get("title")
135 | if title:
136 | save_conversation(token, conversation_id, title)
137 | except Exception as e:
138 | # logger.error(e)
139 | # logger.error(chunk.decode('utf-8'))
140 | pass
141 | yield chunk
142 |
143 |
144 | async def chatgpt_reverse_proxy(request: Request, path: str):
145 | try:
146 | origin_host = request.url.netloc
147 | if request.url.is_secure:
148 | petrol = "https"
149 | else:
150 | petrol = "http"
151 | if "x-forwarded-proto" in request.headers:
152 | petrol = request.headers["x-forwarded-proto"]
153 | if "cf-visitor" in request.headers:
154 | cf_visitor = json.loads(request.headers["cf-visitor"])
155 | petrol = cf_visitor.get("scheme", petrol)
156 |
157 | params = dict(request.query_params)
158 | request_cookies = dict(request.cookies)
159 |
160 | headers = {
161 | key: value for key, value in request.headers.items()
162 | if (key.lower() not in ["host", "origin", "referer", "priority",
163 | "oai-device-id"] and key.lower() not in headers_reject_list)
164 | }
165 |
166 | base_url = random.choice(chatgpt_base_url_list) if chatgpt_base_url_list else "https://chatgpt.com"
167 | if "assets/" in path:
168 | base_url = "https://cdn.oaistatic.com"
169 | if "file-" in path and "backend-api" not in path:
170 | base_url = "https://files.oaiusercontent.com"
171 | if "v1/" in path:
172 | base_url = "https://ab.chatgpt.com"
173 |
174 | token = request.cookies.get("token", "")
175 | req_token = await get_real_req_token(token)
176 | fp = get_fp(req_token)
177 | proxy_url = fp.pop("proxy_url", None)
178 | impersonate = fp.pop("impersonate", "safari15_3")
179 | user_agent = fp.get("user-agent")
180 | headers.update(fp)
181 |
182 | headers.update({
183 | "accept-language": "en-US,en;q=0.9",
184 | "host": base_url.replace("https://", "").replace("http://", ""),
185 | "origin": base_url,
186 | "referer": f"{base_url}/"
187 | })
188 | if "ab.chatgpt.com" in base_url:
189 | if "statsig-api-key" not in headers:
190 | headers.update({
191 | "statsig-sdk-type": "js-client",
192 | "statsig-api-key": "client-tnE5GCU2F2cTxRiMbvTczMDT1jpwIigZHsZSdqiy4u",
193 | "statsig-sdk-version": "5.1.0",
194 | "statsig-client-time": int(time.time() * 1000)
195 | })
196 |
197 | token = headers.get("authorization", "").replace("Bearer ", "")
198 | if token:
199 | req_token = await get_real_req_token(token)
200 | access_token = await verify_token(req_token)
201 | headers.update({
202 | "authorization": f"Bearer {access_token}",
203 | "oai-device-id": fp.get("oai-device-id", str(uuid.uuid4()))
204 | })
205 |
206 | data = await request.body()
207 |
208 | client = Client(proxy=proxy_url, impersonate=impersonate)
209 | try:
210 | background = BackgroundTask(client.close)
211 | r = await client.request(request.method, f"{base_url}/{path}", params=params, headers=headers,
212 | cookies=request_cookies, data=data, stream=True, allow_redirects=False)
213 | if r.status_code == 307 or r.status_code == 302 or r.status_code == 301:
214 | return Response(status_code=307,
215 | headers={"Location": r.headers.get("Location")
216 | .replace("ab.chatgpt.com", origin_host)
217 | .replace("chatgpt.com", origin_host)
218 | .replace("cdn.oaistatic.com", origin_host)
219 | .replace("https", petrol)}, background=background)
220 | elif 'stream' in r.headers.get("content-type", ""):
221 | logger.info(f"Request token: {req_token}")
222 | logger.info(f"Request proxy: {proxy_url}")
223 | logger.info(f"Request UA: {user_agent}")
224 | logger.info(f"Request impersonate: {impersonate}")
225 | return StreamingResponse(content_generator(r, token), media_type=r.headers.get("content-type", ""),
226 | background=background)
227 | else:
228 | if "/backend-api/conversation" in path or "/register-websocket" in path:
229 | response = Response(content=(await r.atext()), media_type=r.headers.get("content-type"),
230 | status_code=r.status_code, background=background)
231 | else:
232 | content = await r.atext()
233 | content = (content
234 | .replace("ab.chatgpt.com", origin_host)
235 | .replace("cdn.oaistatic.com", origin_host)
236 | # .replace("files.oaiusercontent.com", origin_host)
237 | .replace("chatgpt.com", origin_host)
238 | .replace("https", petrol))
239 | rheaders = dict(r.headers)
240 | content_type = rheaders.get("content-type", "")
241 | cache_control = rheaders.get("cache-control", "")
242 | expires = rheaders.get("expires", "")
243 | rheaders = {
244 | "cache-control": cache_control,
245 | "content-type": content_type,
246 | "expires": expires
247 | }
248 | response = Response(content=content, headers=rheaders,
249 | status_code=r.status_code, background=background)
250 | return response
251 | except Exception:
252 | await client.close()
253 | except HTTPException as e:
254 | raise e
255 | except Exception as e:
256 | raise HTTPException(status_code=500, detail=str(e))
257 |
--------------------------------------------------------------------------------
/chatgpt/turnstile.py:
--------------------------------------------------------------------------------
1 | import pybase64
2 | import json
3 | import random
4 | import time
5 | from typing import Any, Callable, Dict, List, Union
6 |
7 |
8 | class OrderedMap:
9 | def __init__(self):
10 | self.keys = []
11 | self.values = {}
12 |
13 | def add(self, key: str, value: Any):
14 | if key not in self.values:
15 | self.keys.append(key)
16 | self.values[key] = value
17 |
18 | def to_json(self):
19 | return json.dumps({k: self.values[k] for k in self.keys})
20 |
21 |
22 | TurnTokenList = List[List[Any]]
23 | FloatMap = Dict[float, Any]
24 | StringMap = Dict[str, Any]
25 | FuncType = Callable[..., Any]
26 |
27 |
28 | def get_turnstile_token(dx: str, p: str) -> Union[str, None]:
29 | try:
30 | decoded_bytes = pybase64.b64decode(dx)
31 | return process_turnstile_token(decoded_bytes.decode(), p)
32 | except Exception as e:
33 | print(f"Error in get_turnstile_token: {e}")
34 | return None
35 |
36 |
37 | def process_turnstile_token(dx: str, p: str) -> str:
38 | result = []
39 | p_length = len(p)
40 | if p_length != 0:
41 | for i, r in enumerate(dx):
42 | result.append(chr(ord(r) ^ ord(p[i % p_length])))
43 | else:
44 | result = list(dx)
45 | return ''.join(result)
46 |
47 |
48 | def is_slice(input_val: Any) -> bool:
49 | return isinstance(input_val, (list, tuple))
50 |
51 |
52 | def is_float(input_val: Any) -> bool:
53 | return isinstance(input_val, float)
54 |
55 |
56 | def is_string(input_val: Any) -> bool:
57 | return isinstance(input_val, str)
58 |
59 |
60 | def to_str(input_val: Any) -> str:
61 | if input_val is None:
62 | return "undefined"
63 | elif is_float(input_val):
64 | return str(input_val)
65 | elif is_string(input_val):
66 | special_cases = {
67 | "window.Math": "[object Math]",
68 | "window.Reflect": "[object Reflect]",
69 | "window.performance": "[object Performance]",
70 | "window.localStorage": "[object Storage]",
71 | "window.Object": "function Object() { [native code] }",
72 | "window.Reflect.set": "function set() { [native code] }",
73 | "window.performance.now": "function () { [native code] }",
74 | "window.Object.create": "function create() { [native code] }",
75 | "window.Object.keys": "function keys() { [native code] }",
76 | "window.Math.random": "function random() { [native code] }"
77 | }
78 | return special_cases.get(input_val, input_val)
79 | elif isinstance(input_val, list) and all(isinstance(item, str) for item in input_val):
80 | return ','.join(input_val)
81 | else:
82 | return str(input_val)
83 |
84 |
85 | def get_func_map() -> FloatMap:
86 | process_map: FloatMap = {}
87 |
88 | def func_1(e: float, t: float):
89 | e_str = to_str(process_map[e])
90 | t_str = to_str(process_map[t])
91 | res = process_turnstile_token(e_str, t_str)
92 | process_map[e] = res
93 |
94 | def func_2(e: float, t: Any):
95 | process_map[e] = t
96 |
97 | def func_5(e: float, t: float):
98 | n = process_map[e]
99 | tres = process_map[t]
100 | if is_slice(n):
101 | nt = n + [tres]
102 | process_map[e] = nt
103 | else:
104 | if is_string(n) or is_string(tres):
105 | res = to_str(n) + to_str(tres)
106 | elif is_float(n) and is_float(tres):
107 | res = n + tres
108 | else:
109 | res = "NaN"
110 | process_map[e] = res
111 |
112 | def func_6(e: float, t: float, n: float):
113 | tv = process_map[t]
114 | nv = process_map[n]
115 | if is_string(tv) and is_string(nv):
116 | res = f"{tv}.{nv}"
117 | if res == "window.document.location":
118 | process_map[e] = "https://chatgpt.com/"
119 | else:
120 | process_map[e] = res
121 | else:
122 | print("func type 6 error")
123 |
124 | def func_24(e: float, t: float, n: float):
125 | tv = process_map[t]
126 | nv = process_map[n]
127 | if is_string(tv) and is_string(nv):
128 | process_map[e] = f"{tv}.{nv}"
129 | else:
130 | print("func type 24 error")
131 |
132 | def func_7(e: float, *args):
133 | n = [process_map[arg] for arg in args]
134 | ev = process_map[e]
135 | if isinstance(ev, str):
136 | if ev == "window.Reflect.set":
137 | obj = n[0]
138 | key_str = str(n[1])
139 | val = n[2]
140 | obj.add(key_str, val)
141 | elif callable(ev):
142 | ev(*n)
143 |
144 | def func_17(e: float, t: float, *args):
145 | i = [process_map[arg] for arg in args]
146 | tv = process_map[t]
147 | res = None
148 | if isinstance(tv, str):
149 | if tv == "window.performance.now":
150 | current_time = time.time_ns()
151 | elapsed_ns = current_time - int(start_time * 1e9)
152 | res = (elapsed_ns + random.random()) / 1e6
153 | elif tv == "window.Object.create":
154 | res = OrderedMap()
155 | elif tv == "window.Object.keys":
156 | if isinstance(i[0], str) and i[0] == "window.localStorage":
157 | res = ["STATSIG_LOCAL_STORAGE_INTERNAL_STORE_V4", "STATSIG_LOCAL_STORAGE_STABLE_ID",
158 | "client-correlated-secret", "oai/apps/capExpiresAt", "oai-did",
159 | "STATSIG_LOCAL_STORAGE_LOGGING_REQUEST", "UiState.isNavigationCollapsed.1"]
160 | elif tv == "window.Math.random":
161 | res = random.random()
162 | elif callable(tv):
163 | res = tv(*i)
164 | process_map[e] = res
165 |
166 | def func_8(e: float, t: float):
167 | process_map[e] = process_map[t]
168 |
169 | def func_14(e: float, t: float):
170 | tv = process_map[t]
171 | if is_string(tv):
172 | token_list = json.loads(tv)
173 | process_map[e] = token_list
174 | else:
175 | print("func type 14 error")
176 |
177 | def func_15(e: float, t: float):
178 | tv = process_map[t]
179 | process_map[e] = json.dumps(tv)
180 |
181 | def func_18(e: float):
182 | ev = process_map[e]
183 | e_str = to_str(ev)
184 | decoded = pybase64.b64decode(e_str).decode()
185 | process_map[e] = decoded
186 |
187 | def func_19(e: float):
188 | ev = process_map[e]
189 | e_str = to_str(ev)
190 | encoded = pybase64.b64encode(e_str.encode()).decode()
191 | process_map[e] = encoded
192 |
193 | def func_20(e: float, t: float, n: float, *args):
194 | o = [process_map[arg] for arg in args]
195 | ev = process_map[e]
196 | tv = process_map[t]
197 | if ev == tv:
198 | nv = process_map[n]
199 | if callable(nv):
200 | nv(*o)
201 | else:
202 | print("func type 20 error")
203 |
204 | def func_21(*args):
205 | pass
206 |
207 | def func_23(e: float, t: float, *args):
208 | i = list(args)
209 | ev = process_map[e]
210 | tv = process_map[t]
211 | if ev is not None:
212 | if callable(tv):
213 | tv(*i)
214 |
215 | process_map.update({
216 | 1: func_1, 2: func_2, 5: func_5, 6: func_6, 24: func_24, 7: func_7,
217 | 17: func_17, 8: func_8, 10: "window", 14: func_14, 15: func_15,
218 | 18: func_18, 19: func_19, 20: func_20, 21: func_21, 23: func_23
219 | })
220 |
221 | return process_map
222 |
223 | start_time = 0
224 |
225 |
226 | def process_turnstile(dx: str, p: str) -> str:
227 | global start_time
228 | start_time = time.time()
229 | tokens = get_turnstile_token(dx, p)
230 | if tokens is None:
231 | return ""
232 |
233 | token_list = json.loads(tokens)
234 | # print(token_list)
235 | res = ""
236 | process_map = get_func_map()
237 |
238 | def func_3(e: str):
239 | nonlocal res
240 | res = pybase64.b64encode(e.encode()).decode()
241 |
242 | process_map[3] = func_3
243 | process_map[9] = token_list
244 | process_map[16] = p
245 |
246 | for token in token_list:
247 | try:
248 | e = token[0]
249 | t = token[1:]
250 | f = process_map.get(e)
251 | if callable(f):
252 | f(*t)
253 | else:
254 | pass
255 | # print(f"Warning: No function found for key {e}")
256 | except Exception as exc:
257 | pass
258 | # print(f"Error processing token {token}: {exc}")
259 |
260 | return res
261 |
262 |
263 | if __name__ == "__main__":
264 | result = process_turnstile(
265 | "PBp5bWF1cHlLe1ttQhRfaTdmXEpidGdEYU5JdGJpR3xfHFVuGHVEY0tZVG18Vh54RWJ5CXpxKXl3SUZ7b2FZAWJaTBl6RGQZURh8BndUcRlQVgoYalAca2QUX24ffQZgdVVbbmBrAH9FV08Rb2oVVgBeQVRrWFp5VGZMYWNyMnoSN0FpaQgFT1l1f3h7c1RtcQUqY1kZbFJ5BQRiZEJXS3RvHGtieh9PaBlHaXhVWnVLRUlKdwsdbUtbKGFaAlN4a0V/emUJe2J2dl9BZkAxZWU/WGocRUBnc3VyT3F4WkJmYSthdBIGf0RwQ2FjAUBnd3ZEelgbVUEIDAJjS1VZbU9sSWFjfk55J2lZFV0HWX1cbVV5dWdAfkFIAVQVbloUXQtYaAR+VXhUF1BZdG4CBHRyK21AG1JaHhBFaBwCWUlocyQGVT4NBzNON2ASFVtXeQRET1kARndjUEBDT2RKeQN7RmJjeVtvZGpDeWJ1EHxafVd+Wk1AbzdLVTpafkd9dWZKeARecGJrS0xcenZIEEJQOmcFa01menFOeVRiSGFZC1JnWUA0SU08QGgeDFFgY34YWXAdZHYaHRhANFRMOV0CZmBfVExTWh9lZlVpSnx6eQURb2poa2RkQVJ0cmF0bwJbQgB6RlRbQHRQaQFKBHtENwVDSWpgHAlbTU1hXEpwdBh2eBlNY3l2UEhnblx7AmpaQ08JDDAzJUVAbn5IA2d8XX5ZFVlrYWhSXWlYQlEdZlQ/QUwuYwJgTG5GZghSRHdCYk1CWWBjclp0aWo3TWMSQmFaaAdge05FbmFhH3hxCFZuIX1BY01WVW5ABx5jfG1ZbjcZEiwwPFYQVm0sdHV8Xnl7alRuemgKZUwICklweW1heHR5Q3UqYVoSR3BCaldIc3Z8SmJOS212CAY5AmMkYmMaRn5UXEthZFsHYFx7ZHRnYV5tcFBZeHocQxUXXU0bYk0VFUZ0ZgFrSWcMRksCAwdJEBBncF12fGUVdnFNQnl4ZQB9WUclYGMRe04TQUZMf0FEbEthW357HEN2aVhAdHAMH0NPdWFicm1YbzNRBSkWMDUAOVdXbBlfRz51ah54YG5iVX9sR2t6RF1pR1RGU20MABBWQy55T3dQfmlUfmFrA35gY2AdDiBWMWVlP1hqHEVAZ3NzfE9/c1pCZWErYXQSB2BKcENjew1baXB9Rm1aG1VBCAkJY01aWW1NbklgZH5Oek1rTX9FFEB7RHNGEG9pKH1eRgFSZGJJdkcMQHUSY0IRQRkzUmFgBG90cklvVwNZThIHQXYABjFJaApCWh1qUEhnWVpiBHxDRDlAHg8kFVcCY1dCUk8VRm9obEN9e21EdnluWxN7eWt8RnFOekRTRXZKXkNPWH40YGMRXHwfRHZ7Z1JKS2R9XG1XR09qCGlaZmZ/QXwnfloWTQxIflxbSVNdSUZgHBRLKCwpQwwmXzB2NFRMOVxUTFNfH3BoRVhfWkcBYghVaSh0ZWMFeG9qBWp5eENNeGNldncHR0wBezVPTjdlSGcOTndjVkAUVl99YQFkRUE2YlNKe3ppeml2V2lvYkhGHjtbNHIALywsMScPEjEFO3Q1MQ0UGDYvK148ETYxIzEcD0gzchNcLSs+LAJxJiEQKBd5MCsXCRclFA0gBRg3axk1HTkBGyoUPRhwCwI2OAIRB2gUBRcjATt6ORQ9JDANOHFlEQITIC8VOS4GAC49GDscBBQMNQ4hDQtQZHYMHmk3BRFHeHZvcXNvd01+WXxPFF9pN2ZaSmR3Z0RkQkl7YmlHbzMsSS8HEy4PPggxGAAYBBcuJREBEQA7LAMANgEiNiZgFR5Mchs0eH83ERFsGCceZTESe2MeEgQSGwgXIgIbb38FFBAWEC1GFC42OQ0CCwcudSIpOwY6MRw7IjwYAgAYD3UbOA8AaHoHPiUkBgQmTA4FUxgAOCoJKxNmVSoANDIzAjdlDxA6ISIOKhQDEhwLPS82IT4CUFIsOyIwLD4+BBsDAww1AnMqHAIlMiMTGT0oAQlUE3QDQhIUACMxDwhGLxEXHQsSIV0FLgMaAgJ2LgsEHyEPLBcKOBtfUhg9MiAXPT5fHhA1Wg8+BxoPLgYcGS0WRSsELjIZKg8EJw4lFQAoUCcTcxASLS9BOTsZD3ERGRUhOD1YUjJxWBEBdnc9PwkQNytyED0zAQtaG3Y2ACsWXSsoPV4+DBQ2DyQ+bg0MHxVHKhAqNh8QPVkNET5fAis5Jh0uGxACKA8kOyo6IBkHIgkKdx0sAgA8SAQVHCkCLwcoBnQHGRAeAxAXOQAdKxhrNxMLJQYrKwAxHnFcOA4HIlEEAVkVDigqAwMoORQQKFkaOy0pISMoRmYDPyFLCRIqVhwCImITET04Gx8QPTMWWRQDcgstAioLGSkBTjw7ECYLeSgraxFoazw2CQcrJgU1cQ0fAB4YEykpIQMEPgJ0NUY0Lhc8IBEEWQtyNSkeECEmHitRFhsULgUrASkfO3E6XDsqLTAVcg8pFCwUaT8rPiMALzskFQQNJBkfKgUxBwscAj4YWhYHDxoXEBRwHgUUMx4gCxsCGBRJAz5yABsCAxIPFSo2AQILLSs7NS4EAGEnFBANJBgTOV0FLWJSKAUQeRkDKyAjCjYqIwEUBwAUPT5iBgohDzYmBAEBJS4pCSspGgUQBDsuD3wvKFd7HwE/EQ8ZFQgRICYEAgUuRhovHFYdM15eNwIgZBgmBVIoJGBnACRXChIKQR8lDVh2CicfKTIBcxwzNionIg4PEVI0FyMQOTkaABI3JSoAByVTKAItJn1ULjcEOG4gBjoqDnAQDjsGHzA2cF92CTIlAhMdchoJABA6KQEyajcgBAM+IhwyE292OTQ0IzUsAVY8EBcxMRxoKgEhBRQSGTMLfQsgFDp1PDQsCgEFKAkIASA8EhF4IgpjIzMJJC4WcyYcEQkPPSMBHlUSfFkuPCQnKiMaAGYWEC80EQIeex9wJjszCSQMFg4iDDcvVxMEBR17Knw0OnMVRyc4fj9ROQpiABoWFxAscR0Na3gBHWdyPjcOBCMleBQgKR4rLQViBhcLGnEgDDZ4ACoPJhQQIH4nHBoDNhkWCyUWDRgVFx4YAwAzFjAELCUPNScjDQ4hDB54Gwg4K2g3BmMBKjkwGggiFAo0Iwp6BBQeDxYwBz4VKCIzeDQmJjYeXTUmHCZpcygrAQt3NAFrBjsmGhtWJz8uUiR3CjorPy4NJXUuOjYIBDoMDGM4MwxxNiMNGg4SES01GHA1O3EIOSo7LQUXHnEeOgIjPXENLjQSfn4OVSkSAgcFBQIxDQUuajUPOj0MFwwcZhMnVzQOCQMDAWBWZBUPPx4oBAA5YA5qBwcrEwQ+IjppEz47Ji4CE2YNKTEzAUcjBgAoFFwyKHwbCz8pARUrDgIIMgg1H2MXGTUBFx0XAgMdEj0HOQ4MIionOyE2cUcxHAA7Iw0sNTkBDUU9GRsbPgkzOBwNKD9hHBdVJipxVTYRAgMmGAIVKxc2JREoNxgtMysDHggNExYWBh8FHwUfBQ8/KQYONiUrLjkfIwpxHDgYCTw1MDEMMBU2JRErK2crDzZdCy94UjAOC00MMgFCKTJxZw8mdgoSCzQMcAtzDC8hMBw7CHJ/GjQ+Cw4aDAVyMTMwEi8gHhUfNB8sDi4hWTQ0GDdJdSEVNggXAhY7Knd3MQ4KGhoZDm11DysqLxI8NXYZCXMDMngaMQg5PSsYKjYxJRJzdx8jOzQlIwklEwgtDhEMdwskLAs3Izg7LQscJi4IeyE3GiAbDAYrHzEzEjcxKicAdSteCTMqJHsUMSEXMT0kJD4Ga3V2Kk4rMSUZHS8qMAsqHTsEPR8RXzArXzc2OgYQOy4oPXc1AQM+DhpuMDFRFTMrBn8pCQkCdCE/MDILKG8uGllRNRlGRy0NGjsyFGoTKSUsOiwkAi8sNRJUNgQ0czEuFgUNMShjBAsBDDErbywzKBoKKzkeOncPDR42HCskNGg7BjEMVgAvOyApLQ5WPgAVHiM+Jz8eOA8BOSI7Xwo4JGIJNjYdCz0MFmAuPhEbLzc3VjUQAGwoHjATcSAGdwUVCjIqMDA1OyQNUB5gGRw6UwpkNS0eECoqbCt2KzQEdD1jBzEZOxQdIjBoMxVqCyoEBToSDB5xPz44LA9MCDAKMAZhLgZZACwMKAYDPWgHODIGHiwMIDUpZ2YEMA04By8INQl3ClQLLC8wCDIIXG8/PSARMDYQLxQyeh8qFTg7MhhUDzkLKwNzDT8RPQ84JC0dDTAqGDA7KxkoKDAcPzh1KQo9LzkeN3YMIxc4HzsBNxorAj0jQX90CCMlPQ4FMTYPfDgwDA0sMyoJHyw6EigMCwULUBsDcnsAdQUAKRAMFBIqLQwCGCkLLmoOJQIEOSU/JQ0JFQgmDx02LwgrIjMLHQQ9DCw+cgoRJREWZAQkCyoyNgskJip0JDg5cy1BXXIzJAl3GCQCdggwZXEbBmcPNAwwCAV9fAkGDDUUBhBmKTgyKAo0KRklcRc/IxY5KQ8SACIKEgg4FVUuDx0FUVoiK3IuEiQEGQkkYToJDhcPJhVTfA8zMiMhFgxnAystCycgLTweB1A0GAMuACIBVEUKHSYiCR0UJA0ENQsRBwUPCgEpMCcvGyUKdxcvH3U5OAwRegMnCiE1IxYiOgsGEGoOAhg/DxJ9IggHCzESCgMsJgJ9awodFDksDRAyCyA1NwodDCwJOFcWCw0yNwokfTUKLwt3IwolIwwocTcbRRAeCwoMHiUZOWkeCRclHihWMyVVcTcfVQEkJjAyMyReOT0jEFwMC1UPPyMwATQnO1oxHz8DNSIoAScYMBMtDi8iFgwgHwwKMAxnDjsXDQooCx4YHSY4JQYYPgQ0Cz0PVkQEEQYqKCIWPTELLBsxElgUMBcENhMKPQQRbyQVRhJdREdUW0tUYB4MX2BjeAU8bxEfZUVYW1VHTF5OSQV/f1xBMU5Jamd7QX9fbWd4H3p1ZhNuYmRFVHRyZHRnBltCCnxGV1YxeEQcDUp3ZlJAFFhafWEKFUlQQ25cOW9iHm90Yk5teXpaSGdhXHsBYStPTR1fdG5wHUIAZ0ZuZWVTeFQVWWliaFxSGFRQOARhQlRVQFVpBmBObEZmAUlKdU9gW0VFbHJkXW0Ffko6cmVTfEx3CXdvV1x+eWMDE2h1IXlJZ0J1VkNKe1cGBnZkcE1gdFJbbXdsWntMECo=",
266 | "gAAAAACWzMwMzIsIlRodSBKdWwgMTEgMjAyNCAwMzoxMDo0NiBHTVQrMDgwMCAo5Lit5Zu95qCH5YeG5pe26Ze0KSIsNDI5NDcwNTE1MiwxLCJNb3ppbGxhLzUuMCAoV2luZG93cyBOVCAxMC4wOyBXaW42NDsgeDY0KSBBcHBsZVdlYktpdC81MzcuMzYgKEtIVE1MLCBsaWtlIEdlY2tvKSBDaHJvbWUvMTI2LjAuMC4wIFNhZmFyaS81MzcuMzYgRWRnLzEyNi4wLjAuMCIsImh0dHBzOi8vY2RuLm9haXN0YXRpYy5jb20vX25leHQvc3RhdGljL2NodW5rcy9wYWdlcy9fYXBwLWMwOWZmNWY0MjQwMjcwZjguanMiLCJjL1pGWGkxeTNpMnpaS0EzSVQwNzRzMy9fIiwiemgtQ04iLCJ6aC1DTixlbixlbi1HQixlbi1VUyIsMTM1LCJ3ZWJraXRUZW1wb3JhcnlTdG9yYWdl4oiSW29iamVjdCBEZXByZWNhdGVkU3RvcmFnZVF1b3RhXSIsIl9yZWFjdExpc3RlbmluZ3NxZjF0ejFzNmsiLCJmZXRjaCIsMzY1NCwiNWU1NDUzNzItMzcyNy00ZDAyLTkwMDYtMzMwMDRjMWJmYTQ2Il0="
267 | )
268 | print(result)
269 |
--------------------------------------------------------------------------------
/gateway/backend.py:
--------------------------------------------------------------------------------
1 | import json
2 | import random
3 | import re
4 | import time
5 | import uuid
6 |
7 | from fastapi import Request, HTTPException
8 | from fastapi.responses import RedirectResponse, StreamingResponse, Response
9 | from starlette.background import BackgroundTask
10 | from starlette.concurrency import run_in_threadpool
11 |
12 | import utils.globals as globals
13 | from app import app
14 | from chatgpt.authorization import verify_token, get_fp
15 | from chatgpt.proofofWork import get_answer_token, get_config, get_requirements_token
16 | from gateway.chatgpt import chatgpt_html
17 | from gateway.reverseProxy import chatgpt_reverse_proxy, content_generator, get_real_req_token, headers_reject_list
18 | from utils.Client import Client
19 | from utils.Logger import logger
20 | from utils.configs import x_sign, turnstile_solver_url, chatgpt_base_url_list, no_sentinel
21 |
22 | banned_paths = [
23 | "backend-api/accounts/logout_all",
24 | "backend-api/accounts/deactivate",
25 | "backend-api/payments/checkout",
26 | "backend-api/user_system_messages",
27 | "backend-api/memories",
28 | "backend-api/settings/clear_account_user_memory",
29 | "backend-api/conversations/[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}"
30 | "backend-api/accounts/[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}/invites",
31 | "admin",
32 | ]
33 | redirect_paths = ["auth/logout"]
34 | chatgpt_paths = ["c/[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}"]
35 |
36 |
37 | @app.get("/backend-api/accounts/check/v4-2023-04-27")
38 | async def check_account(request: Request):
39 | token = request.headers.get("Authorization").replace("Bearer ", "")
40 | check_account_response = await chatgpt_reverse_proxy(request, "backend-api/accounts/check/v4-2023-04-27")
41 | if len(token) == 45 or token.startswith("eyJhbGciOi"):
42 | return check_account_response
43 | else:
44 | check_account_str = check_account_response.body.decode('utf-8')
45 | check_account_info = json.loads(check_account_str)
46 | for key in check_account_info.get("accounts", {}).keys():
47 | account_id = check_account_info["accounts"][key]["account"]["account_id"]
48 | globals.seed_map[token]["user_id"] = \
49 | check_account_info["accounts"][key]["account"]["account_user_id"].split("__")[0]
50 | check_account_info["accounts"][key]["account"]["account_user_id"] = f"user-chatgpt__{account_id}"
51 | with open(globals.SEED_MAP_FILE, "w", encoding="utf-8") as f:
52 | json.dump(globals.seed_map, f, indent=4)
53 | return check_account_info
54 |
55 |
56 | @app.get("/backend-api/gizmos/bootstrap")
57 | async def get_gizmos_bootstrap(request: Request):
58 | token = request.headers.get("Authorization", "").replace("Bearer ", "")
59 | if len(token) == 45 or token.startswith("eyJhbGciOi"):
60 | return await chatgpt_reverse_proxy(request, "backend-api/gizmos/bootstrap")
61 | else:
62 | return {"gizmos": []}
63 |
64 |
65 | @app.get("/backend-api/gizmos/pinned")
66 | async def get_gizmos_pinned(request: Request):
67 | token = request.headers.get("Authorization", "").replace("Bearer ", "")
68 | if len(token) == 45 or token.startswith("eyJhbGciOi"):
69 | return await chatgpt_reverse_proxy(request, "backend-api/gizmos/pinned")
70 | else:
71 | return {"items": [], "cursor": None}
72 |
73 |
74 | @app.get("/public-api/gizmos/discovery/recent")
75 | async def get_gizmos_discovery_recent(request: Request):
76 | token = request.headers.get("Authorization", "").replace("Bearer ", "")
77 | if len(token) == 45 or token.startswith("eyJhbGciOi"):
78 | return await chatgpt_reverse_proxy(request, "public-api/gizmos/discovery/recent")
79 | else:
80 | return {
81 | "info": {
82 | "id": "recent",
83 | "title": "Recently Used",
84 | },
85 | "list": {
86 | "items": [],
87 | "cursor": None
88 | }
89 | }
90 |
91 |
92 | @app.api_route("/backend-api/conversations", methods=["GET", "PATCH"])
93 | async def get_conversations(request: Request):
94 | token = request.headers.get("Authorization", "").replace("Bearer ", "")
95 | if len(token) == 45 or token.startswith("eyJhbGciOi"):
96 | return await chatgpt_reverse_proxy(request, "backend-api/conversations")
97 | if request.method == "GET":
98 | limit = int(request.query_params.get("limit", 28))
99 | offset = int(request.query_params.get("offset", 0))
100 | is_archived = request.query_params.get("is_archived", None)
101 | items = []
102 | for conversation_id in globals.seed_map.get(token, {}).get("conversations", []):
103 | conversation = globals.conversation_map.get(conversation_id, None)
104 | if conversation:
105 | if is_archived == "true":
106 | if conversation.get("is_archived", False):
107 | items.append(conversation)
108 | else:
109 | if not conversation.get("is_archived", False):
110 | items.append(conversation)
111 | items = items[int(offset):int(offset) + int(limit)]
112 | conversations = {
113 | "items": items,
114 | "total": len(items),
115 | "limit": limit,
116 | "offset": offset,
117 | "has_missing_conversations": False
118 | }
119 | return Response(content=json.dumps(conversations, indent=4), media_type="application/json")
120 | else:
121 | raise HTTPException(status_code=403, detail="Forbidden")
122 |
123 |
124 | @app.get("/backend-api/conversation/{conversation_id}")
125 | async def update_conversation(request: Request, conversation_id: str):
126 | token = request.headers.get("Authorization", "").replace("Bearer ", "")
127 | conversation_details_response = await chatgpt_reverse_proxy(request,
128 | f"backend-api/conversation/{conversation_id}")
129 | if len(token) == 45 or token.startswith("eyJhbGciOi"):
130 | return conversation_details_response
131 | else:
132 | conversation_details_str = conversation_details_response.body.decode('utf-8')
133 | conversation_details = json.loads(conversation_details_str)
134 | if conversation_id in globals.seed_map[token][
135 | "conversations"] and conversation_id in globals.conversation_map:
136 | globals.conversation_map[conversation_id]["title"] = conversation_details.get("title", None)
137 | globals.conversation_map[conversation_id]["is_archived"] = conversation_details.get("is_archived",
138 | False)
139 | globals.conversation_map[conversation_id]["conversation_template_id"] = conversation_details.get(
140 | "conversation_template_id", None)
141 | globals.conversation_map[conversation_id]["gizmo_id"] = conversation_details.get("gizmo_id", None)
142 | globals.conversation_map[conversation_id]["async_status"] = conversation_details.get("async_status",
143 | None)
144 | with open(globals.CONVERSATION_MAP_FILE, "w", encoding="utf-8") as f:
145 | json.dump(globals.conversation_map, f, indent=4)
146 | return conversation_details_response
147 |
148 |
149 | @app.patch("/backend-api/conversation/{conversation_id}")
150 | async def patch_conversation(request: Request, conversation_id: str):
151 | token = request.headers.get("Authorization", "").replace("Bearer ", "")
152 | patch_response = (await chatgpt_reverse_proxy(request, f"backend-api/conversation/{conversation_id}"))
153 | if len(token) == 45 or token.startswith("eyJhbGciOi"):
154 | return patch_response
155 | else:
156 | data = await request.json()
157 | if conversation_id in globals.seed_map[token][
158 | "conversations"] and conversation_id in globals.conversation_map:
159 | if not data.get("is_visible", True):
160 | globals.conversation_map.pop(conversation_id)
161 | globals.seed_map[token]["conversations"].remove(conversation_id)
162 | with open(globals.SEED_MAP_FILE, "w", encoding="utf-8") as f:
163 | json.dump(globals.seed_map, f, indent=4)
164 | else:
165 | globals.conversation_map[conversation_id].update(data)
166 | with open(globals.CONVERSATION_MAP_FILE, "w", encoding="utf-8") as f:
167 | json.dump(globals.conversation_map, f, indent=4)
168 | return patch_response
169 |
170 |
171 | @app.get("/backend-api/me")
172 | async def get_me(request: Request):
173 | token = request.headers.get("Authorization", "").replace("Bearer ", "")
174 | if len(token) == 45 or token.startswith("eyJhbGciOi"):
175 | return await chatgpt_reverse_proxy(request, "backend-api/me")
176 | else:
177 | me = {
178 | "object": "user",
179 | "id": "org-chatgpt",
180 | "email": "chatgpt@openai.com",
181 | "name": "ChatGPT",
182 | "picture": "https://cdn.auth0.com/avatars/ai.png",
183 | "created": int(time.time()),
184 | "phone_number": None,
185 | "mfa_flag_enabled": False,
186 | "amr": [],
187 | "groups": [],
188 | "orgs": {
189 | "object": "list",
190 | "data": [
191 | {
192 | "object": "organization",
193 | "id": "org-chatgpt",
194 | "created": 1715641300,
195 | "title": "Personal",
196 | "name": "user-chatgpt",
197 | "description": "Personal org for chatgpt@openai.com",
198 | "personal": True,
199 | "settings": {
200 | "threads_ui_visibility": "NONE",
201 | "usage_dashboard_visibility": "ANY_ROLE",
202 | "disable_user_api_keys": False
203 | },
204 | "parent_org_id": None,
205 | "is_default": True,
206 | "role": "owner",
207 | "is_scale_tier_authorized_purchaser": None,
208 | "is_scim_managed": False,
209 | "projects": {
210 | "object": "list",
211 | "data": []
212 | },
213 | "groups": [],
214 | "geography": None
215 | }
216 | ]
217 | },
218 | "has_payg_project_spend_limit": True
219 | }
220 | return Response(content=json.dumps(me, indent=4), media_type="application/json")
221 |
222 |
223 | @app.post("/backend-api/edge")
224 | async def edge():
225 | return Response(status_code=204)
226 |
227 |
228 | if no_sentinel:
229 | @app.post("/backend-api/sentinel/chat-requirements")
230 | async def sentinel_chat_conversations():
231 | return {
232 | "arkose": {
233 | "dx": None,
234 | "required": False
235 | },
236 | "persona": "chatgpt-paid",
237 | "proofofwork": {
238 | "difficulty": None,
239 | "required": False,
240 | "seed": None
241 | },
242 | "token": str(uuid.uuid4()),
243 | "turnstile": {
244 | "dx": None,
245 | "required": False
246 | }
247 | }
248 |
249 |
250 | @app.post("/backend-api/conversation")
251 | async def chat_conversations(request: Request):
252 | token = request.headers.get("Authorization", "").replace("Bearer ", "")
253 | req_token = await get_real_req_token(token)
254 | access_token = await verify_token(req_token)
255 | fp = get_fp(req_token)
256 | proxy_url = fp.pop("proxy_url", None)
257 | impersonate = fp.pop("impersonate", "safari15_3")
258 | user_agent = fp.get("user-agent",
259 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 Edg/130.0.0.0")
260 |
261 | host_url = random.choice(chatgpt_base_url_list) if chatgpt_base_url_list else "https://chatgpt.com"
262 | proof_token = None
263 | turnstile_token = None
264 |
265 | headers = {
266 | key: value for key, value in request.headers.items()
267 | if (key.lower() not in ["host", "origin", "referer", "priority",
268 | "oai-device-id"] and key.lower() not in headers_reject_list)
269 | }
270 | headers.update(fp)
271 | headers.update({
272 | "authorization": f"Bearer {access_token}",
273 | "oai-device-id": fp.get("oai-device-id", str(uuid.uuid4()))
274 | })
275 |
276 | client = Client(proxy=proxy_url, impersonate=impersonate)
277 |
278 | config = get_config(user_agent)
279 | p = get_requirements_token(config)
280 | data = {'p': p}
281 | r = await client.post(f'{host_url}/backend-api/sentinel/chat-requirements', headers=headers, json=data,
282 | timeout=10)
283 | resp = r.json()
284 | turnstile = resp.get('turnstile', {})
285 | turnstile_required = turnstile.get('required')
286 | if turnstile_required:
287 | turnstile_dx = turnstile.get("dx")
288 | try:
289 | if turnstile_solver_url:
290 | res = await client.post(turnstile_solver_url,
291 | json={"url": "https://chatgpt.com", "p": p, "dx": turnstile_dx})
292 | turnstile_token = res.json().get("t")
293 | except Exception as e:
294 | logger.info(f"Turnstile ignored: {e}")
295 |
296 | proofofwork = resp.get('proofofwork', {})
297 | proofofwork_required = proofofwork.get('required')
298 | if proofofwork_required:
299 | proofofwork_diff = proofofwork.get("difficulty")
300 | proofofwork_seed = proofofwork.get("seed")
301 | proof_token, solved = await run_in_threadpool(
302 | get_answer_token, proofofwork_seed, proofofwork_diff, config
303 | )
304 | if not solved:
305 | raise HTTPException(status_code=403, detail="Failed to solve proof of work")
306 | chat_token = resp.get('token')
307 | headers.update({
308 | "openai-sentinel-chat-requirements-token": chat_token,
309 | "openai-sentinel-proof-token": proof_token,
310 | "openai-sentinel-turnstile-token": turnstile_token,
311 | })
312 |
313 | params = dict(request.query_params)
314 | data = await request.body()
315 | request_cookies = dict(request.cookies)
316 | background = BackgroundTask(client.close)
317 | r = await client.post_stream(f"{host_url}/backend-api/conversation", params=params, headers=headers,
318 | cookies=request_cookies, data=data, stream=True, allow_redirects=False)
319 | rheaders = r.headers
320 | if x_sign:
321 | rheaders.update({"x-sign": x_sign})
322 | if 'stream' in rheaders.get("content-type", ""):
323 | logger.info(f"Request token: {req_token}")
324 | logger.info(f"Request proxy: {proxy_url}")
325 | logger.info(f"Request UA: {user_agent}")
326 | logger.info(f"Request impersonate: {impersonate}")
327 | return StreamingResponse(content_generator(r, token), headers=rheaders,
328 | media_type=rheaders.get("content-type"), background=background)
329 | else:
330 | return Response(content=(await r.atext()), headers=rheaders, media_type=rheaders.get("content-type"),
331 | status_code=r.status_code, background=background)
332 |
333 |
334 | @app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE", "OPTIONS", "HEAD", "PATCH", "TRACE"])
335 | async def reverse_proxy(request: Request, path: str):
336 | token = request.headers.get("Authorization", "").replace("Bearer ", "")
337 | if len(token) != 45 and not token.startswith("eyJhbGciOi"):
338 | for banned_path in banned_paths:
339 | if re.match(banned_path, path):
340 | raise HTTPException(status_code=403, detail="Forbidden")
341 |
342 | for chatgpt_path in chatgpt_paths:
343 | if re.match(chatgpt_path, path):
344 | return await chatgpt_html(request)
345 |
346 | for redirect_path in redirect_paths:
347 | if re.match(redirect_path, path):
348 | redirect_url = str(request.base_url)
349 | response = RedirectResponse(url=f"{redirect_url}login", status_code=302)
350 | return response
351 |
352 | return await chatgpt_reverse_proxy(request, path)
353 |
--------------------------------------------------------------------------------
/chatgpt/proofofWork.py:
--------------------------------------------------------------------------------
1 | import hashlib
2 | import json
3 | import random
4 | import re
5 | import time
6 | import uuid
7 | from datetime import datetime, timedelta, timezone
8 | from html.parser import HTMLParser
9 |
10 | import pybase64
11 |
12 | from utils.Logger import logger
13 | from utils.configs import conversation_only
14 |
15 | cores = [16, 24, 32]
16 | screens = [3000, 4000, 6000]
17 | timeLayout = "%a %b %d %Y %H:%M:%S"
18 |
19 | cached_scripts = []
20 | cached_dpl = ""
21 | cached_time = 0
22 | cached_require_proof = ""
23 |
24 | navigator_key = [
25 | "registerProtocolHandler−function registerProtocolHandler() { [native code] }",
26 | "storage−[object StorageManager]",
27 | "locks−[object LockManager]",
28 | "appCodeName−Mozilla",
29 | "permissions−[object Permissions]",
30 | "appVersion−5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
31 | "share−function share() { [native code] }",
32 | "webdriver−false",
33 | "managed−[object NavigatorManagedData]",
34 | "canShare−function canShare() { [native code] }",
35 | "vendor−Google Inc.",
36 | "vendor−Google Inc.",
37 | "mediaDevices−[object MediaDevices]",
38 | "vibrate−function vibrate() { [native code] }",
39 | "storageBuckets−[object StorageBucketManager]",
40 | "mediaCapabilities−[object MediaCapabilities]",
41 | "getGamepads−function getGamepads() { [native code] }",
42 | "bluetooth−[object Bluetooth]",
43 | "share−function share() { [native code] }",
44 | "cookieEnabled−true",
45 | "virtualKeyboard−[object VirtualKeyboard]",
46 | "product−Gecko",
47 | "mediaDevices−[object MediaDevices]",
48 | "canShare−function canShare() { [native code] }",
49 | "getGamepads−function getGamepads() { [native code] }",
50 | "product−Gecko",
51 | "xr−[object XRSystem]",
52 | "clipboard−[object Clipboard]",
53 | "storageBuckets−[object StorageBucketManager]",
54 | "unregisterProtocolHandler−function unregisterProtocolHandler() { [native code] }",
55 | "productSub−20030107",
56 | "login−[object NavigatorLogin]",
57 | "vendorSub−",
58 | "login−[object NavigatorLogin]",
59 | "userAgent−Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
60 | "getInstalledRelatedApps−function getInstalledRelatedApps() { [native code] }",
61 | "userAgent−Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
62 | "mediaDevices−[object MediaDevices]",
63 | "locks−[object LockManager]",
64 | "webkitGetUserMedia−function webkitGetUserMedia() { [native code] }",
65 | "vendor−Google Inc.",
66 | "xr−[object XRSystem]",
67 | "mediaDevices−[object MediaDevices]",
68 | "virtualKeyboard−[object VirtualKeyboard]",
69 | "userAgent−Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
70 | "virtualKeyboard−[object VirtualKeyboard]",
71 | "appName−Netscape",
72 | "storageBuckets−[object StorageBucketManager]",
73 | "presentation−[object Presentation]",
74 | "onLine−true",
75 | "mimeTypes−[object MimeTypeArray]",
76 | "credentials−[object CredentialsContainer]",
77 | "presentation−[object Presentation]",
78 | "getGamepads−function getGamepads() { [native code] }",
79 | "vendorSub−",
80 | "virtualKeyboard−[object VirtualKeyboard]",
81 | "serviceWorker−[object ServiceWorkerContainer]",
82 | "xr−[object XRSystem]",
83 | "product−Gecko",
84 | "keyboard−[object Keyboard]",
85 | "gpu−[object GPU]",
86 | "getInstalledRelatedApps−function getInstalledRelatedApps() { [native code] }",
87 | "webkitPersistentStorage−[object DeprecatedStorageQuota]",
88 | "doNotTrack",
89 | "clearAppBadge−function clearAppBadge() { [native code] }",
90 | "presentation−[object Presentation]",
91 | "serial−[object Serial]",
92 | "locks−[object LockManager]",
93 | "requestMIDIAccess−function requestMIDIAccess() { [native code] }",
94 | "locks−[object LockManager]",
95 | "requestMediaKeySystemAccess−function requestMediaKeySystemAccess() { [native code] }",
96 | "vendor−Google Inc.",
97 | "pdfViewerEnabled−true",
98 | "language−zh-CN",
99 | "setAppBadge−function setAppBadge() { [native code] }",
100 | "geolocation−[object Geolocation]",
101 | "userAgentData−[object NavigatorUAData]",
102 | "mediaCapabilities−[object MediaCapabilities]",
103 | "requestMIDIAccess−function requestMIDIAccess() { [native code] }",
104 | "getUserMedia−function getUserMedia() { [native code] }",
105 | "mediaDevices−[object MediaDevices]",
106 | "webkitPersistentStorage−[object DeprecatedStorageQuota]",
107 | "userAgent−Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
108 | "sendBeacon−function sendBeacon() { [native code] }",
109 | "hardwareConcurrency−32",
110 | "appVersion−5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
111 | "credentials−[object CredentialsContainer]",
112 | "storage−[object StorageManager]",
113 | "cookieEnabled−true",
114 | "pdfViewerEnabled−true",
115 | "windowControlsOverlay−[object WindowControlsOverlay]",
116 | "scheduling−[object Scheduling]",
117 | "pdfViewerEnabled−true",
118 | "hardwareConcurrency−32",
119 | "xr−[object XRSystem]",
120 | "userAgent−Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
121 | "webdriver−false",
122 | "getInstalledRelatedApps−function getInstalledRelatedApps() { [native code] }",
123 | "getInstalledRelatedApps−function getInstalledRelatedApps() { [native code] }",
124 | "bluetooth−[object Bluetooth]"
125 | ]
126 | document_key = ['_reactListeningo743lnnpvdg', 'location']
127 | window_key = [
128 | "0",
129 | "window",
130 | "self",
131 | "document",
132 | "name",
133 | "location",
134 | "customElements",
135 | "history",
136 | "navigation",
137 | "locationbar",
138 | "menubar",
139 | "personalbar",
140 | "scrollbars",
141 | "statusbar",
142 | "toolbar",
143 | "status",
144 | "closed",
145 | "frames",
146 | "length",
147 | "top",
148 | "opener",
149 | "parent",
150 | "frameElement",
151 | "navigator",
152 | "origin",
153 | "external",
154 | "screen",
155 | "innerWidth",
156 | "innerHeight",
157 | "scrollX",
158 | "pageXOffset",
159 | "scrollY",
160 | "pageYOffset",
161 | "visualViewport",
162 | "screenX",
163 | "screenY",
164 | "outerWidth",
165 | "outerHeight",
166 | "devicePixelRatio",
167 | "clientInformation",
168 | "screenLeft",
169 | "screenTop",
170 | "styleMedia",
171 | "onsearch",
172 | "isSecureContext",
173 | "trustedTypes",
174 | "performance",
175 | "onappinstalled",
176 | "onbeforeinstallprompt",
177 | "crypto",
178 | "indexedDB",
179 | "sessionStorage",
180 | "localStorage",
181 | "onbeforexrselect",
182 | "onabort",
183 | "onbeforeinput",
184 | "onbeforematch",
185 | "onbeforetoggle",
186 | "onblur",
187 | "oncancel",
188 | "oncanplay",
189 | "oncanplaythrough",
190 | "onchange",
191 | "onclick",
192 | "onclose",
193 | "oncontentvisibilityautostatechange",
194 | "oncontextlost",
195 | "oncontextmenu",
196 | "oncontextrestored",
197 | "oncuechange",
198 | "ondblclick",
199 | "ondrag",
200 | "ondragend",
201 | "ondragenter",
202 | "ondragleave",
203 | "ondragover",
204 | "ondragstart",
205 | "ondrop",
206 | "ondurationchange",
207 | "onemptied",
208 | "onended",
209 | "onerror",
210 | "onfocus",
211 | "onformdata",
212 | "oninput",
213 | "oninvalid",
214 | "onkeydown",
215 | "onkeypress",
216 | "onkeyup",
217 | "onload",
218 | "onloadeddata",
219 | "onloadedmetadata",
220 | "onloadstart",
221 | "onmousedown",
222 | "onmouseenter",
223 | "onmouseleave",
224 | "onmousemove",
225 | "onmouseout",
226 | "onmouseover",
227 | "onmouseup",
228 | "onmousewheel",
229 | "onpause",
230 | "onplay",
231 | "onplaying",
232 | "onprogress",
233 | "onratechange",
234 | "onreset",
235 | "onresize",
236 | "onscroll",
237 | "onsecuritypolicyviolation",
238 | "onseeked",
239 | "onseeking",
240 | "onselect",
241 | "onslotchange",
242 | "onstalled",
243 | "onsubmit",
244 | "onsuspend",
245 | "ontimeupdate",
246 | "ontoggle",
247 | "onvolumechange",
248 | "onwaiting",
249 | "onwebkitanimationend",
250 | "onwebkitanimationiteration",
251 | "onwebkitanimationstart",
252 | "onwebkittransitionend",
253 | "onwheel",
254 | "onauxclick",
255 | "ongotpointercapture",
256 | "onlostpointercapture",
257 | "onpointerdown",
258 | "onpointermove",
259 | "onpointerrawupdate",
260 | "onpointerup",
261 | "onpointercancel",
262 | "onpointerover",
263 | "onpointerout",
264 | "onpointerenter",
265 | "onpointerleave",
266 | "onselectstart",
267 | "onselectionchange",
268 | "onanimationend",
269 | "onanimationiteration",
270 | "onanimationstart",
271 | "ontransitionrun",
272 | "ontransitionstart",
273 | "ontransitionend",
274 | "ontransitioncancel",
275 | "onafterprint",
276 | "onbeforeprint",
277 | "onbeforeunload",
278 | "onhashchange",
279 | "onlanguagechange",
280 | "onmessage",
281 | "onmessageerror",
282 | "onoffline",
283 | "ononline",
284 | "onpagehide",
285 | "onpageshow",
286 | "onpopstate",
287 | "onrejectionhandled",
288 | "onstorage",
289 | "onunhandledrejection",
290 | "onunload",
291 | "crossOriginIsolated",
292 | "scheduler",
293 | "alert",
294 | "atob",
295 | "blur",
296 | "btoa",
297 | "cancelAnimationFrame",
298 | "cancelIdleCallback",
299 | "captureEvents",
300 | "clearInterval",
301 | "clearTimeout",
302 | "close",
303 | "confirm",
304 | "createImageBitmap",
305 | "fetch",
306 | "find",
307 | "focus",
308 | "getComputedStyle",
309 | "getSelection",
310 | "matchMedia",
311 | "moveBy",
312 | "moveTo",
313 | "open",
314 | "postMessage",
315 | "print",
316 | "prompt",
317 | "queueMicrotask",
318 | "releaseEvents",
319 | "reportError",
320 | "requestAnimationFrame",
321 | "requestIdleCallback",
322 | "resizeBy",
323 | "resizeTo",
324 | "scroll",
325 | "scrollBy",
326 | "scrollTo",
327 | "setInterval",
328 | "setTimeout",
329 | "stop",
330 | "structuredClone",
331 | "webkitCancelAnimationFrame",
332 | "webkitRequestAnimationFrame",
333 | "chrome",
334 | "caches",
335 | "cookieStore",
336 | "ondevicemotion",
337 | "ondeviceorientation",
338 | "ondeviceorientationabsolute",
339 | "launchQueue",
340 | "documentPictureInPicture",
341 | "getScreenDetails",
342 | "queryLocalFonts",
343 | "showDirectoryPicker",
344 | "showOpenFilePicker",
345 | "showSaveFilePicker",
346 | "originAgentCluster",
347 | "onpageswap",
348 | "onpagereveal",
349 | "credentialless",
350 | "speechSynthesis",
351 | "onscrollend",
352 | "webkitRequestFileSystem",
353 | "webkitResolveLocalFileSystemURL",
354 | "sendMsgToSolverCS",
355 | "webpackChunk_N_E",
356 | "__next_set_public_path__",
357 | "next",
358 | "__NEXT_DATA__",
359 | "__SSG_MANIFEST_CB",
360 | "__NEXT_P",
361 | "_N_E",
362 | "regeneratorRuntime",
363 | "__REACT_INTL_CONTEXT__",
364 | "DD_RUM",
365 | "_",
366 | "filterCSS",
367 | "filterXSS",
368 | "__SEGMENT_INSPECTOR__",
369 | "__NEXT_PRELOADREADY",
370 | "Intercom",
371 | "__MIDDLEWARE_MATCHERS",
372 | "__STATSIG_SDK__",
373 | "__STATSIG_JS_SDK__",
374 | "__STATSIG_RERENDER_OVERRIDE__",
375 | "_oaiHandleSessionExpired",
376 | "__BUILD_MANIFEST",
377 | "__SSG_MANIFEST",
378 | "__intercomAssignLocation",
379 | "__intercomReloadLocation"
380 | ]
381 |
382 |
383 | class ScriptSrcParser(HTMLParser):
384 | def handle_starttag(self, tag, attrs):
385 | global cached_scripts, cached_dpl, cached_time
386 | if tag == "script":
387 | attrs_dict = dict(attrs)
388 | if "src" in attrs_dict:
389 | src = attrs_dict["src"]
390 | cached_scripts.append(src)
391 | match = re.search(r"c/[^/]*/_", src)
392 | if match:
393 | cached_dpl = match.group(0)
394 | cached_time = int(time.time())
395 |
396 |
397 | def get_data_build_from_html(html_content):
398 | global cached_scripts, cached_dpl, cached_time
399 | parser = ScriptSrcParser()
400 | parser.feed(html_content)
401 | if not cached_scripts:
402 | cached_scripts.append("https://chatgpt.com/backend-api/sentinel/sdk.js")
403 | if not cached_dpl:
404 | match = re.search(r']*data-build="([^"]*)"', html_content)
405 | if match:
406 | data_build = match.group(1)
407 | cached_dpl = data_build
408 | cached_time = int(time.time())
409 | logger.info(f"Found dpl: {cached_dpl}")
410 |
411 |
412 | async def get_dpl(service):
413 | global cached_scripts, cached_dpl, cached_time
414 | if int(time.time()) - cached_time < 15 * 60:
415 | return True
416 | headers = service.base_headers.copy()
417 | cached_scripts = []
418 | cached_dpl = ""
419 | try:
420 | if conversation_only:
421 | return True
422 | r = await service.s.get(f"{service.host_url}/", headers=headers, timeout=5)
423 | r.raise_for_status()
424 | get_data_build_from_html(r.text)
425 | if not cached_dpl:
426 | raise Exception("No Cached DPL")
427 | else:
428 | return True
429 | except Exception as e:
430 | logger.info(f"Failed to get dpl: {e}")
431 | cached_dpl = None
432 | cached_time = int(time.time())
433 | return False
434 |
435 |
436 | def get_parse_time():
437 | now = datetime.now(timezone(timedelta(hours=-5)))
438 | return now.strftime(timeLayout) + " GMT-0500 (Eastern Standard Time)"
439 |
440 |
441 | def get_config(user_agent):
442 | core = random.choice(cores)
443 | screen = random.choice(screens)
444 | config = [
445 | core + screen,
446 | get_parse_time(),
447 | 4294705152,
448 | 0,
449 | user_agent,
450 | random.choice(cached_scripts) if cached_scripts else None,
451 | cached_dpl,
452 | "en-US",
453 | "en-US,es-US,en,es",
454 | 0,
455 | random.choice(navigator_key),
456 | random.choice(document_key),
457 | random.choice(window_key),
458 | time.perf_counter(),
459 | str(uuid.uuid4()),
460 | ]
461 | return config
462 |
463 |
464 | def get_answer_token(seed, diff, config):
465 | start = time.time()
466 | answer, solved = generate_answer(seed, diff, config)
467 | end = time.time()
468 | logger.info(f'diff: {diff}, time: {int((end - start) * 1e6) / 1e3}ms, solved: {solved}')
469 | return "gAAAAAB" + answer, solved
470 |
471 |
472 | def generate_answer(seed, diff, config):
473 | diff_len = len(diff)
474 | seed_encoded = seed.encode()
475 | static_config_part1 = (json.dumps(config[:3], separators=(',', ':'), ensure_ascii=False)[:-1] + ',').encode()
476 | static_config_part2 = (',' + json.dumps(config[4:9], separators=(',', ':'), ensure_ascii=False)[1:-1] + ',').encode()
477 | static_config_part3 = (',' + json.dumps(config[10:], separators=(',', ':'), ensure_ascii=False)[1:]).encode()
478 |
479 | target_diff = bytes.fromhex(diff)
480 |
481 | for i in range(500000):
482 | dynamic_json_i = str(i).encode()
483 | dynamic_json_j = str(i >> 1).encode()
484 | final_json_bytes = static_config_part1 + dynamic_json_i + static_config_part2 + dynamic_json_j + static_config_part3
485 | base_encode = pybase64.b64encode(final_json_bytes)
486 | hash_value = hashlib.sha3_512(seed_encoded + base_encode).digest()
487 | if hash_value[:diff_len] <= target_diff:
488 | return base_encode.decode(), True
489 |
490 | return "wQ8Lk5FbGpA2NcR9dShT6gYjU7VxZ4D" + pybase64.b64encode(f'"{seed}"'.encode()).decode(), False
491 |
492 |
493 | def get_requirements_token(config):
494 | require, solved = generate_answer(format(random.random()), "0fffff", config)
495 | return 'gAAAAAC' + require
496 |
497 |
498 | if __name__ == "__main__":
499 | # cached_scripts.append(
500 | # "https://cdn.oaistatic.com/_next/static/cXh69klOLzS0Gy2joLDRS/_ssgManifest.js?dpl=453ebaec0d44c2decab71692e1bfe39be35a24b3")
501 | # cached_dpl = "453ebaec0d44c2decab71692e1bfe39be35a24b3"
502 | # cached_time = int(time.time())
503 | # for i in range(10):
504 | # seed = format(random.random())
505 | # diff = "000032"
506 | # config = get_config("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome")
507 | # answer = get_answer_token(seed, diff, config)
508 | cached_scripts.append(
509 | "https://cdn.oaistatic.com/_next/static/cXh69klOLzS0Gy2joLDRS/_ssgManifest.js?dpl=453ebaec0d44c2decab71692e1bfe39be35a24b3")
510 | cached_dpl = "dpl=453ebaec0d44c2decab71692e1bfe39be35a24b3"
511 | config = get_config("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36")
512 | get_requirements_token(config)
513 |
--------------------------------------------------------------------------------
/chatgpt/chatFormat.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import json
3 | import random
4 | import re
5 | import string
6 | import time
7 | import uuid
8 |
9 | import pybase64
10 | import websockets
11 | from fastapi import HTTPException
12 |
13 | from api.files import get_file_content
14 | from api.models import model_system_fingerprint
15 | from api.tokens import split_tokens_from_content, calculate_image_tokens, num_tokens_from_messages
16 | from utils.Logger import logger
17 |
18 | moderation_message = "I'm sorry, I cannot provide or engage in any content related to pornography, violence, or any unethical material. If you have any other questions or need assistance, please feel free to let me know. I'll do my best to provide support and assistance."
19 |
20 |
21 | async def format_not_stream_response(response, prompt_tokens, max_tokens, model):
22 | chat_id = f"chatcmpl-{''.join(random.choice(string.ascii_letters + string.digits) for _ in range(29))}"
23 | system_fingerprint_list = model_system_fingerprint.get(model, None)
24 | system_fingerprint = random.choice(system_fingerprint_list) if system_fingerprint_list else None
25 | created_time = int(time.time())
26 | all_text = ""
27 | async for chunk in response:
28 | try:
29 | if chunk.startswith("data: [DONE]"):
30 | break
31 | elif not chunk.startswith("data: "):
32 | continue
33 | else:
34 | chunk = json.loads(chunk[6:])
35 | if not chunk["choices"][0].get("delta"):
36 | continue
37 | all_text += chunk["choices"][0]["delta"]["content"]
38 | except Exception as e:
39 | logger.error(f"Error: {chunk}, error: {str(e)}")
40 | continue
41 | content, completion_tokens, finish_reason = await split_tokens_from_content(all_text, max_tokens, model)
42 | message = {
43 | "role": "assistant",
44 | "content": content,
45 | }
46 | usage = {
47 | "prompt_tokens": prompt_tokens,
48 | "completion_tokens": completion_tokens,
49 | "total_tokens": prompt_tokens + completion_tokens
50 | }
51 | if not message.get("content"):
52 | raise HTTPException(status_code=403, detail="No content in the message.")
53 |
54 | data = {
55 | "id": chat_id,
56 | "object": "chat.completion",
57 | "created": created_time,
58 | "model": model,
59 | "choices": [
60 | {
61 | "index": 0,
62 | "message": message,
63 | "logprobs": None,
64 | "finish_reason": finish_reason
65 | }
66 | ],
67 | "usage": usage
68 | }
69 | if system_fingerprint:
70 | data["system_fingerprint"] = system_fingerprint
71 | return data
72 |
73 |
74 | async def wss_stream_response(websocket, conversation_id):
75 | while not websocket.closed:
76 | try:
77 | message = await asyncio.wait_for(websocket.recv(), timeout=10)
78 | if message:
79 | resultObj = json.loads(message)
80 | sequenceId = resultObj.get("sequenceId", None)
81 | if not sequenceId:
82 | continue
83 | data = resultObj.get("data", {})
84 | if conversation_id != data.get("conversation_id", ""):
85 | continue
86 | sequenceId = resultObj.get('sequenceId')
87 | if sequenceId and sequenceId % 80 == 0:
88 | await websocket.send(
89 | json.dumps(
90 | {"type": "sequenceAck", "sequenceId": sequenceId}
91 | )
92 | )
93 | decoded_bytes = pybase64.b64decode(data.get("body", None))
94 | yield decoded_bytes
95 | else:
96 | print("No message received within the specified time.")
97 | except asyncio.TimeoutError:
98 | logger.error("Timeout! No message received within the specified time.")
99 | break
100 | except websockets.ConnectionClosed as e:
101 | if e.code == 1000:
102 | logger.error("WebSocket closed normally with code 1000 (OK)")
103 | yield b"data: [DONE]\n\n"
104 | else:
105 | logger.error(f"WebSocket closed with error code {e.code}")
106 | except Exception as e:
107 | logger.error(f"Error: {str(e)}")
108 | continue
109 |
110 |
111 | async def head_process_response(response):
112 | async for chunk in response:
113 | chunk = chunk.decode("utf-8")
114 | if chunk.startswith("data: {"):
115 | chunk_old_data = json.loads(chunk[6:])
116 | message = chunk_old_data.get("message", {})
117 | if not message and "error" in chunk_old_data:
118 | return response, False
119 | role = message.get('author', {}).get('role')
120 | if role == 'user' or role == 'system':
121 | continue
122 |
123 | status = message.get("status")
124 | if status == "in_progress":
125 | return response, True
126 | return response, False
127 |
128 |
129 | async def stream_response(service, response, model, max_tokens):
130 | chat_id = f"chatcmpl-{''.join(random.choice(string.ascii_letters + string.digits) for _ in range(29))}"
131 | system_fingerprint_list = model_system_fingerprint.get(model, None)
132 | system_fingerprint = random.choice(system_fingerprint_list) if system_fingerprint_list else None
133 | created_time = int(time.time())
134 | completion_tokens = 0
135 | len_last_content = 0
136 | len_last_citation = 0
137 | last_message_id = None
138 | last_role = None
139 | last_content_type = None
140 | model_slug = None
141 | end = False
142 |
143 | chunk_new_data = {
144 | "id": chat_id,
145 | "object": "chat.completion.chunk",
146 | "created": created_time,
147 | "model": model,
148 | "choices": [
149 | {
150 | "index": 0,
151 | "delta": {"role": "assistant", "content": ""},
152 | "logprobs": None,
153 | "finish_reason": None
154 | }
155 | ]
156 | }
157 | if system_fingerprint:
158 | chunk_new_data["system_fingerprint"] = system_fingerprint
159 | yield f"data: {json.dumps(chunk_new_data)}\n\n"
160 |
161 | async for chunk in response:
162 | chunk = chunk.decode("utf-8")
163 | if end:
164 | logger.info(f"Response Model: {model_slug}")
165 | yield "data: [DONE]\n\n"
166 | break
167 | try:
168 | if chunk.startswith("data: {"):
169 | chunk_old_data = json.loads(chunk[6:])
170 | finish_reason = None
171 | message = chunk_old_data.get("message", {})
172 | conversation_id = chunk_old_data.get("conversation_id")
173 | role = message.get('author', {}).get('role')
174 | if role == 'user' or role == 'system':
175 | continue
176 |
177 | status = message.get("status")
178 | message_id = message.get("id")
179 | content = message.get("content", {})
180 | recipient = message.get("recipient", "")
181 | meta_data = message.get("metadata", {})
182 | initial_text = meta_data.get("initial_text", "")
183 | model_slug = meta_data.get("model_slug", model_slug)
184 |
185 | if not message and chunk_old_data.get("type") == "moderation":
186 | delta = {"role": "assistant", "content": moderation_message}
187 | finish_reason = "stop"
188 | end = True
189 | elif status == "in_progress":
190 | outer_content_type = content.get("content_type")
191 | if outer_content_type == "text":
192 | part = content.get("parts", [])[0]
193 | if not part:
194 | if role == 'assistant' and last_role != 'assistant':
195 | if last_role == None:
196 | new_text = ""
197 | else:
198 | new_text = f"\n"
199 | elif role == 'tool' and last_role != 'tool':
200 | new_text = f">{initial_text}\n"
201 | else:
202 | new_text = ""
203 | else:
204 | if last_message_id and last_message_id != message_id:
205 | continue
206 | citation = message.get("metadata", {}).get("citations", [])
207 | if len(citation) > len_last_citation:
208 | inside_metadata = citation[-1].get("metadata", {})
209 | citation_title = inside_metadata.get("title", "")
210 | citation_url = inside_metadata.get("url", "")
211 | new_text = f' **[[""]]({citation_url} "{citation_title}")** '
212 | len_last_citation = len(citation)
213 | else:
214 | if role == 'assistant' and last_role != 'assistant':
215 | if recipient == 'dalle.text2im':
216 | new_text = f"\n```{recipient}\n{part[len_last_content:]}"
217 | elif last_role == None:
218 | new_text = part[len_last_content:]
219 | else:
220 | new_text = f"\n\n{part[len_last_content:]}"
221 | elif role == 'tool' and last_role != 'tool':
222 | new_text = f">{initial_text}\n{part[len_last_content:]}"
223 | elif role == 'tool':
224 | new_text = part[len_last_content:].replace("\n\n", "\n")
225 | else:
226 | new_text = part[len_last_content:]
227 | len_last_content = len(part)
228 | else:
229 | text = content.get("text", "")
230 | if outer_content_type == "code" and last_content_type != "code":
231 | language = content.get("language", "")
232 | if not language or language == "unknown":
233 | language = recipient
234 | new_text = "\n```" + language + "\n" + text[len_last_content:]
235 | elif outer_content_type == "execution_output" and last_content_type != "execution_output":
236 | new_text = "\n```" + "Output" + "\n" + text[len_last_content:]
237 | else:
238 | new_text = text[len_last_content:]
239 | len_last_content = len(text)
240 | if last_content_type == "code" and outer_content_type != "code":
241 | new_text = "\n```\n" + new_text
242 | elif last_content_type == "execution_output" and outer_content_type != "execution_output":
243 | new_text = "\n```\n" + new_text
244 |
245 | delta = {"content": new_text}
246 | last_content_type = outer_content_type
247 | if completion_tokens >= max_tokens:
248 | delta = {}
249 | finish_reason = "length"
250 | end = True
251 | elif status == "finished_successfully":
252 | if content.get("content_type") == "multimodal_text":
253 | parts = content.get("parts", [])
254 | delta = {}
255 | for part in parts:
256 | if isinstance(part, str):
257 | continue
258 | inner_content_type = part.get('content_type')
259 | if inner_content_type == "image_asset_pointer":
260 | last_content_type = "image_asset_pointer"
261 | file_id = part.get('asset_pointer').replace('file-service://', '')
262 | logger.debug(f"file_id: {file_id}")
263 | image_download_url = await service.get_download_url(file_id)
264 | logger.debug(f"image_download_url: {image_download_url}")
265 | if image_download_url:
266 | delta = {"content": f"\n```\n\n"}
267 | else:
268 | delta = {"content": f"\n```\nFailed to load the image.\n"}
269 | elif message.get("end_turn"):
270 | part = content.get("parts", [])[0]
271 | new_text = part[len_last_content:]
272 | if not new_text:
273 | matches = re.findall(r'\(sandbox:(.*?)\)', part)
274 | if matches:
275 | file_url_content = ""
276 | for i, sandbox_path in enumerate(matches):
277 | file_download_url = await service.get_response_file_url(conversation_id, message_id, sandbox_path)
278 | if file_download_url:
279 | file_url_content += f"\n```\n\n\n"
280 | delta = {"content": file_url_content}
281 | else:
282 | delta = {}
283 | else:
284 | delta = {"content": new_text}
285 | finish_reason = "stop"
286 | end = True
287 | else:
288 | len_last_content = 0
289 | if meta_data.get("finished_text"):
290 | delta = {"content": f"\n{meta_data.get('finished_text')}\n"}
291 | else:
292 | continue
293 | else:
294 | continue
295 | last_message_id = message_id
296 | last_role = role
297 | if not end and not delta.get("content"):
298 | delta = {"role": "assistant", "content": ""}
299 | chunk_new_data["choices"][0]["delta"] = delta
300 | chunk_new_data["choices"][0]["finish_reason"] = finish_reason
301 | if not service.history_disabled:
302 | chunk_new_data.update({
303 | "message_id": message_id,
304 | "conversation_id": conversation_id,
305 | })
306 | completion_tokens += 1
307 | yield f"data: {json.dumps(chunk_new_data)}\n\n"
308 | elif chunk.startswith("data: [DONE]"):
309 | logger.info(f"Response Model: {model_slug}")
310 | yield "data: [DONE]\n\n"
311 | else:
312 | continue
313 | except Exception as e:
314 | if chunk.startswith("data: "):
315 | chunk_data = json.loads(chunk[6:])
316 | if chunk_data.get("error"):
317 | logger.error(f"Error: {chunk_data.get('error')}")
318 | yield "data: [DONE]\n\n"
319 | break
320 | logger.error(f"Error: {chunk}, details: {str(e)}")
321 | continue
322 |
323 |
324 | def get_url_from_content(content):
325 | if isinstance(content, str) and content.startswith('http'):
326 | try:
327 | url = re.match(
328 | r'(?i)\b((?:[a-z][\w-]+:(?:/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))',
329 | content.split(' ')[0])[0]
330 | content = content.replace(url, '').strip()
331 | return url, content
332 | except Exception:
333 | return None, content
334 | return None, content
335 |
336 |
337 | def format_messages_with_url(content):
338 | url_list = []
339 | while True:
340 | url, content = get_url_from_content(content)
341 | if url:
342 | url_list.append(url)
343 | logger.info(f"Found a file_url from messages: {url}")
344 | else:
345 | break
346 | if not url_list:
347 | return content
348 | new_content = [
349 | {
350 | "type": "text",
351 | "text": content
352 | }
353 | ]
354 | for url in url_list:
355 | new_content.append({
356 | "type": "image_url",
357 | "image_url": {
358 | "url": url
359 | }
360 | })
361 | return new_content
362 |
363 |
364 | async def api_messages_to_chat(service, api_messages, upload_by_url=False):
365 | file_tokens = 0
366 | chat_messages = []
367 | for api_message in api_messages:
368 | role = api_message.get('role')
369 | content = api_message.get('content')
370 | if upload_by_url:
371 | if isinstance(content, str):
372 | content = format_messages_with_url(content)
373 | if isinstance(content, list):
374 | parts = []
375 | attachments = []
376 | content_type = "multimodal_text"
377 | for i in content:
378 | if i.get("type") == "text":
379 | parts.append(i.get("text"))
380 | elif i.get("type") == "image_url":
381 | image_url = i.get("image_url")
382 | url = image_url.get("url")
383 | detail = image_url.get("detail", "auto")
384 | file_content, mime_type = await get_file_content(url)
385 | file_meta = await service.upload_file(file_content, mime_type)
386 | if file_meta:
387 | file_id = file_meta["file_id"]
388 | file_size = file_meta["size_bytes"]
389 | file_name = file_meta["file_name"]
390 | mime_type = file_meta["mime_type"]
391 | use_case = file_meta["use_case"]
392 | if mime_type.startswith("image/"):
393 | width, height = file_meta["width"], file_meta["height"]
394 | file_tokens += await calculate_image_tokens(width, height, detail)
395 | parts.append({
396 | "content_type": "image_asset_pointer",
397 | "asset_pointer": f"file-service://{file_id}",
398 | "size_bytes": file_size,
399 | "width": width,
400 | "height": height
401 | })
402 | attachments.append({
403 | "id": file_id,
404 | "size": file_size,
405 | "name": file_name,
406 | "mime_type": mime_type,
407 | "width": width,
408 | "height": height
409 | })
410 | else:
411 | if not use_case == "ace_upload":
412 | await service.check_upload(file_id)
413 | file_tokens += file_size // 1000
414 | attachments.append({
415 | "id": file_id,
416 | "size": file_size,
417 | "name": file_name,
418 | "mime_type": mime_type,
419 | })
420 | metadata = {
421 | "attachments": attachments
422 | }
423 | else:
424 | content_type = "text"
425 | parts = [content]
426 | metadata = {}
427 | chat_message = {
428 | "id": f"{uuid.uuid4()}",
429 | "author": {"role": role},
430 | "content": {"content_type": content_type, "parts": parts},
431 | "metadata": metadata
432 | }
433 | chat_messages.append(chat_message)
434 | text_tokens = await num_tokens_from_messages(api_messages, service.resp_model)
435 | prompt_tokens = text_tokens + file_tokens
436 | return chat_messages, prompt_tokens
437 |
--------------------------------------------------------------------------------
/chatgpt/ChatService.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import json
3 | import random
4 | import uuid
5 |
6 | from fastapi import HTTPException
7 | from starlette.concurrency import run_in_threadpool
8 |
9 | from api.files import get_image_size, get_file_extension, determine_file_use_case
10 | from api.models import model_proxy
11 | from chatgpt.authorization import get_req_token, verify_token, get_fp
12 | from chatgpt.chatFormat import api_messages_to_chat, stream_response, format_not_stream_response, head_process_response
13 | from chatgpt.chatLimit import check_is_limit, handle_request_limit
14 | from chatgpt.proofofWork import get_config, get_dpl, get_answer_token, get_requirements_token
15 |
16 | from utils.Client import Client
17 | from utils.Logger import logger
18 | from utils.configs import (
19 | chatgpt_base_url_list,
20 | ark0se_token_url_list,
21 | history_disabled,
22 | pow_difficulty,
23 | conversation_only,
24 | enable_limit,
25 | upload_by_url,
26 | check_model,
27 | auth_key,
28 | turnstile_solver_url,
29 | oai_language,
30 | )
31 |
32 |
33 | class ChatService:
34 | def __init__(self, origin_token=None):
35 | # self.user_agent = random.choice(user_agents_list) if user_agents_list else "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
36 | self.req_token = get_req_token(origin_token)
37 | self.chat_token = "gAAAAAB"
38 | self.s = None
39 | self.ws = None
40 |
41 | async def set_dynamic_data(self, data):
42 | if self.req_token:
43 | req_len = len(self.req_token.split(","))
44 | if req_len == 1:
45 | self.access_token = await verify_token(self.req_token)
46 | self.account_id = None
47 | else:
48 | self.access_token = await verify_token(self.req_token.split(",")[0])
49 | self.account_id = self.req_token.split(",")[1]
50 | else:
51 | logger.info("Request token is empty, use no-auth 3.5")
52 | self.access_token = None
53 | self.account_id = None
54 |
55 | self.fp = get_fp(self.req_token)
56 | self.proxy_url = self.fp.get("proxy_url")
57 | self.user_agent = self.fp.get("user-agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 Edg/130.0.0.0")
58 | self.impersonate = self.fp.get("impersonate", "safari15_3")
59 | logger.info(f"Request token: {self.req_token}")
60 | logger.info(f"Request proxy: {self.proxy_url}")
61 | logger.info(f"Request UA: {self.user_agent}")
62 | logger.info(f"Request impersonate: {self.impersonate}")
63 |
64 | self.data = data
65 | await self.set_model()
66 | if enable_limit and self.req_token:
67 | limit_response = await handle_request_limit(self.req_token, self.req_model)
68 | if limit_response:
69 | raise HTTPException(status_code=429, detail=limit_response)
70 |
71 | self.account_id = self.data.get('Chatgpt-Account-Id', self.account_id)
72 | self.parent_message_id = self.data.get('parent_message_id')
73 | self.conversation_id = self.data.get('conversation_id')
74 | self.history_disabled = self.data.get('history_disabled', history_disabled)
75 |
76 | self.api_messages = self.data.get("messages", [])
77 | self.prompt_tokens = 0
78 | self.max_tokens = self.data.get("max_tokens", 2147483647)
79 | if not isinstance(self.max_tokens, int):
80 | self.max_tokens = 2147483647
81 |
82 | # self.proxy_url = random.choice(proxy_url_list) if proxy_url_list else None
83 |
84 | self.host_url = random.choice(chatgpt_base_url_list) if chatgpt_base_url_list else "https://chatgpt.com"
85 | self.ark0se_token_url = random.choice(ark0se_token_url_list) if ark0se_token_url_list else None
86 |
87 | self.s = Client(proxy=self.proxy_url, impersonate=self.impersonate)
88 |
89 | self.oai_device_id = str(uuid.uuid4())
90 | self.persona = None
91 | self.ark0se_token = None
92 | self.proof_token = None
93 | self.turnstile_token = None
94 |
95 | self.chat_headers = None
96 | self.chat_request = None
97 |
98 | self.base_headers = {
99 | 'accept': '*/*',
100 | 'accept-encoding': 'gzip, deflate, br, zstd',
101 | 'accept-language': 'en-US,en;q=0.9',
102 | 'content-type': 'application/json',
103 | 'oai-device-id': self.oai_device_id,
104 | 'oai-language': oai_language,
105 | 'origin': self.host_url,
106 | 'priority': 'u=1, i',
107 | 'referer': f'{self.host_url}/',
108 | 'sec-ch-ua': self.fp.get("sec-ch-ua", '"Chromium";v="124", "Microsoft Edge";v="124", "Not-A.Brand";v="99"'),
109 | 'sec-ch-ua-mobile': self.fp.get("sec-ch-ua-mobile", "?0"),
110 | 'sec-ch-ua-platform': self.fp.get("sec-ch-ua-platform", '"Windows"'),
111 | 'sec-fetch-dest': 'empty',
112 | 'sec-fetch-mode': 'cors',
113 | 'sec-fetch-site': 'same-origin',
114 | 'user-agent': self.user_agent
115 | }
116 | if self.access_token:
117 | self.base_url = self.host_url + "/backend-api"
118 | self.base_headers['authorization'] = f'Bearer {self.access_token}'
119 | if self.account_id:
120 | self.base_headers['chatgpt-account-id'] = self.account_id
121 | else:
122 | self.base_url = self.host_url + "/backend-anon"
123 |
124 | if auth_key:
125 | self.base_headers['authkey'] = auth_key
126 |
127 | await get_dpl(self)
128 |
129 | async def set_model(self):
130 | self.origin_model = self.data.get("model", "gpt-3.5-turbo-0125")
131 | self.resp_model = model_proxy.get(self.origin_model, self.origin_model)
132 | if "o1-preview" in self.origin_model:
133 | self.req_model = "o1-preview"
134 | elif "o1-mini" in self.origin_model:
135 | self.req_model = "o1-mini"
136 | elif "o1" in self.origin_model:
137 | self.req_model = "o1"
138 | elif "gpt-4.5o" in self.origin_model:
139 | self.req_model = "gpt-4.5o"
140 | elif "gpt-4o-canmore" in self.origin_model:
141 | self.req_model = "gpt-4o-canmore"
142 | elif "gpt-4o-mini" in self.origin_model:
143 | self.req_model = "gpt-4o-mini"
144 | elif "gpt-4o" in self.origin_model:
145 | self.req_model = "gpt-4o"
146 | elif "gpt-4-mobile" in self.origin_model:
147 | self.req_model = "gpt-4-mobile"
148 | elif "gpt-4-gizmo" in self.origin_model:
149 | self.req_model = "gpt-4o"
150 | elif "gpt-4" in self.origin_model:
151 | self.req_model = "gpt-4"
152 | elif "gpt-3.5" in self.origin_model:
153 | self.req_model = "text-davinci-002-render-sha"
154 | elif "auto" in self.origin_model:
155 | self.req_model = "auto"
156 | else:
157 | self.req_model = "auto"
158 |
159 | async def get_chat_requirements(self):
160 | if conversation_only:
161 | return None
162 | url = f'{self.base_url}/sentinel/chat-requirements'
163 | headers = self.base_headers.copy()
164 | try:
165 | config = get_config(self.user_agent)
166 | p = get_requirements_token(config)
167 | data = {'p': p}
168 | r = await self.s.post(url, headers=headers, json=data, timeout=5)
169 | if r.status_code == 200:
170 | resp = r.json()
171 |
172 | if check_model:
173 | r = await self.s.get(f'{self.base_url}/models', headers=headers, timeout=5)
174 | if r.status_code == 200:
175 | models = r.json().get('models')
176 | if not any(self.req_model in model.get("slug", "") for model in models):
177 | logger.error(f"Model {self.req_model} not support.")
178 | raise HTTPException(
179 | status_code=404,
180 | detail={
181 | "message": f"The model `{self.origin_model}` does not exist or you do not have access to it.",
182 | "type": "invalid_request_error",
183 | "param": None,
184 | "code": "model_not_found",
185 | },
186 | )
187 | else:
188 | raise HTTPException(status_code=404, detail="Failed to get models")
189 | else:
190 | self.persona = resp.get("persona")
191 | if self.persona != "chatgpt-paid":
192 | if self.req_model == "gpt-4":
193 | logger.error(f"Model {self.resp_model} not support for {self.persona}")
194 | raise HTTPException(
195 | status_code=404,
196 | detail={
197 | "message": f"The model `{self.origin_model}` does not exist or you do not have access to it.",
198 | "type": "invalid_request_error",
199 | "param": None,
200 | "code": "model_not_found",
201 | },
202 | )
203 |
204 | turnstile = resp.get('turnstile', {})
205 | turnstile_required = turnstile.get('required')
206 | if turnstile_required:
207 | turnstile_dx = turnstile.get("dx")
208 | try:
209 | if turnstile_solver_url:
210 | res = await self.s.post(
211 | turnstile_solver_url, json={"url": "https://chatgpt.com", "p": p, "dx": turnstile_dx}
212 | )
213 | self.turnstile_token = res.json().get("t")
214 | except Exception as e:
215 | logger.info(f"Turnstile ignored: {e}")
216 | # raise HTTPException(status_code=403, detail="Turnstile required")
217 |
218 | ark0se = resp.get('ark' + 'ose', {})
219 | ark0se_required = ark0se.get('required')
220 | if ark0se_required:
221 | if self.persona == "chatgpt-freeaccount":
222 | ark0se_method = "chat35"
223 | else:
224 | ark0se_method = "chat4"
225 | if not self.ark0se_token_url:
226 | raise HTTPException(status_code=403, detail="Ark0se service required")
227 | ark0se_dx = ark0se.get("dx")
228 | ark0se_client = Client(impersonate=self.fp.get("impersonate", "safari15_3"))
229 | try:
230 | r2 = await ark0se_client.post(
231 | url=self.ark0se_token_url, json={"blob": ark0se_dx, "method": ark0se_method}, timeout=15
232 | )
233 | r2esp = r2.json()
234 | logger.info(f"ark0se_token: {r2esp}")
235 | if r2esp.get('solved', True):
236 | self.ark0se_token = r2esp.get('token')
237 | else:
238 | raise HTTPException(status_code=403, detail="Failed to get Ark0se token")
239 | except Exception:
240 | raise HTTPException(status_code=403, detail="Failed to get Ark0se token")
241 | finally:
242 | await ark0se_client.close()
243 |
244 | proofofwork = resp.get('proofofwork', {})
245 | proofofwork_required = proofofwork.get('required')
246 | if proofofwork_required:
247 | proofofwork_diff = proofofwork.get("difficulty")
248 | if proofofwork_diff <= pow_difficulty:
249 | raise HTTPException(status_code=403, detail=f"Proof of work difficulty too high: {proofofwork_diff}")
250 | proofofwork_seed = proofofwork.get("seed")
251 | self.proof_token, solved = await run_in_threadpool(
252 | get_answer_token, proofofwork_seed, proofofwork_diff, config
253 | )
254 | if not solved:
255 | raise HTTPException(status_code=403, detail="Failed to solve proof of work")
256 |
257 | self.chat_token = resp.get('token')
258 | if not self.chat_token:
259 | raise HTTPException(status_code=403, detail=f"Failed to get chat token: {r.text}")
260 | return self.chat_token
261 | else:
262 | if "application/json" == r.headers.get("Content-Type", ""):
263 | detail = r.json().get("detail", r.json())
264 | else:
265 | detail = r.text
266 | if "cf-spinner-please-wait" in detail:
267 | raise HTTPException(status_code=r.status_code, detail="cf-spinner-please-wait")
268 | if r.status_code == 429:
269 | raise HTTPException(status_code=r.status_code, detail="rate-limit")
270 | raise HTTPException(status_code=r.status_code, detail=detail)
271 | except HTTPException as e:
272 | raise HTTPException(status_code=e.status_code, detail=e.detail)
273 | except Exception as e:
274 | raise HTTPException(status_code=500, detail=str(e))
275 |
276 | async def prepare_send_conversation(self):
277 | try:
278 | chat_messages, self.prompt_tokens = await api_messages_to_chat(self, self.api_messages, upload_by_url)
279 | except Exception as e:
280 | logger.error(f"Failed to format messages: {str(e)}")
281 | raise HTTPException(status_code=400, detail="Failed to format messages.")
282 | self.chat_headers = self.base_headers.copy()
283 | self.chat_headers.update(
284 | {
285 | 'accept': 'text/event-stream',
286 | 'openai-sentinel-chat-requirements-token': self.chat_token,
287 | 'openai-sentinel-proof-token': self.proof_token,
288 | }
289 | )
290 | if self.ark0se_token:
291 | self.chat_headers['openai-sentinel-ark' + 'ose-token'] = self.ark0se_token
292 |
293 | if self.turnstile_token:
294 | self.chat_headers['openai-sentinel-turnstile-token'] = self.turnstile_token
295 |
296 | if conversation_only:
297 | self.chat_headers.pop('openai-sentinel-chat-requirements-token', None)
298 | self.chat_headers.pop('openai-sentinel-proof-token', None)
299 | self.chat_headers.pop('openai-sentinel-ark' + 'ose-token', None)
300 | self.chat_headers.pop('openai-sentinel-turnstile-token', None)
301 |
302 | if "gpt-4-gizmo" in self.origin_model:
303 | gizmo_id = self.origin_model.split("gpt-4-gizmo-")[-1]
304 | conversation_mode = {"kind": "gizmo_interaction", "gizmo_id": gizmo_id}
305 | else:
306 | conversation_mode = {"kind": "primary_assistant"}
307 |
308 | logger.info(f"Model mapping: {self.origin_model} -> {self.req_model}")
309 | self.chat_request = {
310 | "action": "next",
311 | "conversation_mode": conversation_mode,
312 | "force_nulligen": False,
313 | "force_paragen": False,
314 | "force_paragen_model_slug": "",
315 | "force_rate_limit": False,
316 | "force_use_sse": True,
317 | "history_and_training_disabled": self.history_disabled,
318 | "messages": chat_messages,
319 | "model": self.req_model,
320 | "parent_message_id": self.parent_message_id if self.parent_message_id else f"{uuid.uuid4()}",
321 | "reset_rate_limits": False,
322 | "suggestions": [],
323 | "timezone_offset_min": -480,
324 | "variant_purpose": "comparison_implicit",
325 | "websocket_request_id": f"{uuid.uuid4()}",
326 | }
327 | if self.conversation_id:
328 | self.chat_request['conversation_id'] = self.conversation_id
329 | return self.chat_request
330 |
331 | async def send_conversation(self):
332 | try:
333 | url = f'{self.base_url}/conversation'
334 | stream = self.data.get("stream", False)
335 | r = await self.s.post_stream(url, headers=self.chat_headers, json=self.chat_request, timeout=10, stream=True)
336 | if r.status_code != 200:
337 | rtext = await r.atext()
338 | if "application/json" == r.headers.get("Content-Type", ""):
339 | detail = json.loads(rtext).get("detail", json.loads(rtext))
340 | if r.status_code == 429:
341 | check_is_limit(detail, token=self.req_token, model=self.req_model)
342 | else:
343 | if "cf-spinner-please-wait" in rtext:
344 | # logger.error(f"Failed to send conversation: cf-spinner-please-wait")
345 | raise HTTPException(status_code=r.status_code, detail="cf-spinner-please-wait")
346 | if r.status_code == 429:
347 | # logger.error(f"Failed to send conversation: rate-limit")
348 | raise HTTPException(status_code=r.status_code, detail="rate-limit")
349 | detail = r.text[:100]
350 | # logger.error(f"Failed to send conversation: {detail}")
351 | raise HTTPException(status_code=r.status_code, detail=detail)
352 |
353 | content_type = r.headers.get("Content-Type", "")
354 | if "text/event-stream" in content_type:
355 | res, start = await head_process_response(r.aiter_lines())
356 | if not start:
357 | raise HTTPException(
358 | status_code=403,
359 | detail="Our systems have detected unusual activity coming from your system. Please try again later.",
360 | )
361 | if stream:
362 | return stream_response(self, res, self.resp_model, self.max_tokens)
363 | else:
364 | return await format_not_stream_response(
365 | stream_response(self, res, self.resp_model, self.max_tokens),
366 | self.prompt_tokens,
367 | self.max_tokens,
368 | self.resp_model,
369 | )
370 | elif "application/json" in content_type:
371 | rtext = await r.atext()
372 | resp = json.loads(rtext)
373 | raise HTTPException(status_code=r.status_code, detail=resp)
374 | else:
375 | rtext = await r.atext()
376 | raise HTTPException(status_code=r.status_code, detail=rtext)
377 | except HTTPException as e:
378 | raise HTTPException(status_code=e.status_code, detail=e.detail)
379 | except Exception as e:
380 | raise HTTPException(status_code=500, detail=str(e))
381 |
382 | async def get_download_url(self, file_id):
383 | url = f"{self.base_url}/files/{file_id}/download"
384 | headers = self.base_headers.copy()
385 | try:
386 | r = await self.s.get(url, headers=headers, timeout=10)
387 | if r.status_code == 200:
388 | download_url = r.json().get('download_url')
389 | return download_url
390 | else:
391 | raise HTTPException(status_code=r.status_code, detail=r.text)
392 | except Exception as e:
393 | logger.error(f"Failed to get download url: {e}")
394 | return ""
395 |
396 | async def get_download_url_from_upload(self, file_id):
397 | url = f"{self.base_url}/files/{file_id}/uploaded"
398 | headers = self.base_headers.copy()
399 | try:
400 | r = await self.s.post(url, headers=headers, json={}, timeout=10)
401 | if r.status_code == 200:
402 | download_url = r.json().get('download_url')
403 | return download_url
404 | else:
405 | raise HTTPException(status_code=r.status_code, detail=r.text)
406 | except Exception as e:
407 | logger.error(f"Failed to get download url from upload: {e}")
408 | return ""
409 |
410 | async def get_upload_url(self, file_name, file_size, use_case="multimodal"):
411 | url = f'{self.base_url}/files'
412 | headers = self.base_headers.copy()
413 | try:
414 | r = await self.s.post(
415 | url,
416 | headers=headers,
417 | json={"file_name": file_name, "file_size": file_size, "reset_rate_limits": False, "timezone_offset_min": -480, "use_case": use_case},
418 | timeout=5,
419 | )
420 | if r.status_code == 200:
421 | res = r.json()
422 | file_id = res.get('file_id')
423 | upload_url = res.get('upload_url')
424 | logger.info(f"file_id: {file_id}, upload_url: {upload_url}")
425 | return file_id, upload_url
426 | else:
427 | raise HTTPException(status_code=r.status_code, detail=r.text)
428 | except Exception as e:
429 | logger.error(f"Failed to get upload url: {e}")
430 | return "", ""
431 |
432 | async def upload(self, upload_url, file_content, mime_type):
433 | headers = self.base_headers.copy()
434 | headers.update(
435 | {
436 | 'accept': 'application/json, text/plain, */*',
437 | 'content-type': mime_type,
438 | 'x-ms-blob-type': 'BlockBlob',
439 | 'x-ms-version': '2020-04-08',
440 | }
441 | )
442 | headers.pop('authorization', None)
443 | headers.pop('oai-device-id', None)
444 | headers.pop('oai-language', None)
445 | try:
446 | r = await self.s.put(upload_url, headers=headers, data=file_content, timeout=60)
447 | if r.status_code == 201:
448 | return True
449 | else:
450 | raise HTTPException(status_code=r.status_code, detail=r.text)
451 | except Exception as e:
452 | logger.error(f"Failed to upload file: {e}")
453 | return False
454 |
455 | async def upload_file(self, file_content, mime_type):
456 | if not file_content or not mime_type:
457 | return None
458 |
459 | width, height = None, None
460 | if mime_type.startswith("image/"):
461 | try:
462 | width, height = await get_image_size(file_content)
463 | except Exception as e:
464 | logger.error(f"Error image mime_type, change to text/plain: {e}")
465 | mime_type = 'text/plain'
466 | file_size = len(file_content)
467 | file_extension = await get_file_extension(mime_type)
468 | file_name = f"{uuid.uuid4()}{file_extension}"
469 | use_case = await determine_file_use_case(mime_type)
470 |
471 | file_id, upload_url = await self.get_upload_url(file_name, file_size, use_case)
472 | if file_id and upload_url:
473 | if await self.upload(upload_url, file_content, mime_type):
474 | download_url = await self.get_download_url_from_upload(file_id)
475 | if download_url:
476 | file_meta = {
477 | "file_id": file_id,
478 | "file_name": file_name,
479 | "size_bytes": file_size,
480 | "mime_type": mime_type,
481 | "width": width,
482 | "height": height,
483 | "use_case": use_case,
484 | }
485 | logger.info(f"File_meta: {file_meta}")
486 | return file_meta
487 |
488 | async def check_upload(self, file_id):
489 | url = f'{self.base_url}/files/{file_id}'
490 | headers = self.base_headers.copy()
491 | try:
492 | for i in range(30):
493 | r = await self.s.get(url, headers=headers, timeout=5)
494 | if r.status_code == 200:
495 | res = r.json()
496 | retrieval_index_status = res.get('retrieval_index_status', '')
497 | if retrieval_index_status == "success":
498 | break
499 | await asyncio.sleep(1)
500 | return True
501 | except HTTPException:
502 | return False
503 |
504 | async def get_response_file_url(self, conversation_id, message_id, sandbox_path):
505 | try:
506 | url = f"{self.base_url}/conversation/{conversation_id}/interpreter/download"
507 | params = {"message_id": message_id, "sandbox_path": sandbox_path}
508 | headers = self.base_headers.copy()
509 | r = await self.s.get(url, headers=headers, params=params, timeout=10)
510 | if r.status_code == 200:
511 | return r.json().get("download_url")
512 | else:
513 | return None
514 | except Exception:
515 | logger.info("Failed to get response file url")
516 | return None
517 |
518 | async def close_client(self):
519 | if self.s:
520 | await self.s.close()
521 | if self.ws:
522 | await self.ws.close()
523 | del self.ws
524 |
--------------------------------------------------------------------------------