├── .gitignore
├── .dockerignore
├── docs
├── tokens.png
└── capsolver.jpg
├── requirements.txt
├── Dockerfile
├── .env.example
├── app.py
├── docker-compose.yml
├── utils
├── Logger.py
├── retry.py
├── Client.py
└── config.py
├── LICENSE
├── api
├── models.py
├── tokens.py
└── files.py
├── chatgpt
├── wssClient.py
├── chatLimit.py
├── globals.py
├── authorization.py
├── refreshToken.py
├── reverseProxy.py
├── turnstile.py
├── proofofWork.py
├── chatFormat.py
└── ChatService.py
├── .github
└── workflows
│ └── build_docker.yml
├── templates
└── tokens.html
├── chat2api.py
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 | .env
2 | *.pyc
3 | /.git/
4 | /.idea/
5 | /tmp/
6 | /data/
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | .env
2 | *.pyc
3 | /.git/
4 | /.idea/
5 | /docs/
6 | /tmp/
7 | /data/
--------------------------------------------------------------------------------
/docs/tokens.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Suge8/chat2api/main/docs/tokens.png
--------------------------------------------------------------------------------
/docs/capsolver.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Suge8/chat2api/main/docs/capsolver.jpg
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | fastapi
2 | curl_cffi
3 | uvicorn
4 | tiktoken
5 | python-dotenv
6 | websockets
7 | pillow
8 | pybase64
9 | jinja2
10 | APScheduler
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.11-slim
2 |
3 | WORKDIR /app
4 |
5 | COPY . /app
6 |
7 | RUN pip install --no-cache-dir -r requirements.txt
8 |
9 | EXPOSE 5005
10 |
11 | CMD ["python", "app.py"]
--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------
1 | API_PREFIX=your_prefix
2 | CHATGPT_BASE_URL=https://chatgpt.com
3 | HISTORY_DISABLED=true
4 | PROXY_URL=your_first_proxy, your_second_proxy
5 | EXPORT_PROXY_URL=your_export_proxy
6 | ARKOSE_TOKEN_URL=https://arkose.example.com/token
7 | POW_DIFFICULTY=000032
8 | RETRY_TIMES=3
9 | ENABLE_GATEWAY=true
10 | CONVERSATION_ONLY=false
11 | ENABLE_LIMIT=true
12 | UPLOAD_BY_URL=false
13 | SCHEDULED_REFRESH=false
--------------------------------------------------------------------------------
/app.py:
--------------------------------------------------------------------------------
1 | import uvicorn
2 |
3 | log_config = uvicorn.config.LOGGING_CONFIG
4 | default_format = "%(asctime)s | %(levelname)s | %(message)s"
5 | access_format = r'%(asctime)s | %(levelname)s | %(client_addr)s: %(request_line)s %(status_code)s'
6 | log_config["formatters"]["default"]["fmt"] = default_format
7 | log_config["formatters"]["access"]["fmt"] = access_format
8 |
9 | uvicorn.run("chat2api:app", host="0.0.0.0", port=5005)
10 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 |
3 | services:
4 | chat2api:
5 | image: lanqian528/chat2api:latest
6 | container_name: chat2api
7 | restart: unless-stopped
8 | ports:
9 | - '5005:5005'
10 | volumes:
11 | - ./data:/app/data # 挂载一些需要保存的数据
12 | environment:
13 | - TZ=Asia/Shanghai # 设置时区
14 | - ARKOSE_TOKEN_URL=http://arkose:5006/token # 已内置,不要改动
15 |
16 | arkose:
17 | image: lanqian528/funcaptcha_solver:latest
18 | container_name: funcaptcha_solver
19 | restart: unless-stopped
20 | ports:
21 | - '5006:5006'
22 | environment:
23 | - TZ=Asia/Shanghai # 设置时区
--------------------------------------------------------------------------------
/utils/Logger.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | logging.basicConfig(level=logging.INFO, format='%(asctime)s | %(levelname)s | %(message)s')
4 |
5 |
6 | class Logger:
7 | @staticmethod
8 | def info(message):
9 | logging.info(str(message))
10 |
11 | @staticmethod
12 | def warning(message):
13 | logging.warning("\033[0;33m" + str(message) + "\033[0m")
14 |
15 | @staticmethod
16 | def error(message):
17 | logging.error("\033[0;31m" + "-" * 50 + '\n| ' + str(message) + "\033[0m" + "\n" + "└" + "-" * 80)
18 |
19 | @staticmethod
20 | def debug(message):
21 | logging.debug("\033[0;37m" + str(message) + "\033[0m")
22 |
23 |
24 | logger = Logger()
25 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 aurora-develop
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/api/models.py:
--------------------------------------------------------------------------------
1 | model_proxy = {
2 | "gpt-3.5-turbo": "gpt-3.5-turbo-0125",
3 | "gpt-3.5-turbo-16k": "gpt-3.5-turbo-16k-0613",
4 | "gpt-4": "gpt-4-0613",
5 | "gpt-4-32k": "gpt-4-32k-0613",
6 | "gpt-4-turbo-preview": "gpt-4-0125-preview",
7 | "gpt-4-vision-preview": "gpt-4-1106-vision-preview",
8 | "gpt-4-turbo": "gpt-4-turbo-2024-04-09",
9 | "gpt-4o": "gpt-4o-2024-05-13",
10 | "gpt-4o-mini": "gpt-4o-mini-2024-07-18",
11 | "claude-3-opus": "claude-3-opus-20240229",
12 | "claude-3-sonnet": "claude-3-sonnet-20240229",
13 | "claude-3-haiku": "claude-3-haiku-20240307",
14 | }
15 |
16 | model_system_fingerprint = {
17 | "gpt-3.5-turbo-0125": ["fp_b28b39ffa8"],
18 | "gpt-3.5-turbo-1106": ["fp_592ef5907d"],
19 | "gpt-4-0125-preview": ["fp_f38f4d6482", "fp_2f57f81c11", "fp_a7daf7c51e", "fp_a865e8ede4", "fp_13c70b9f70",
20 | "fp_b77cb481ed"],
21 | "gpt-4-1106-preview": ["fp_e467c31c3d", "fp_d986a8d1ba", "fp_99a5a401bb", "fp_123d5a9f90", "fp_0d1affc7a6",
22 | "fp_5c95a4634e"],
23 | "gpt-4-turbo-2024-04-09": ["fp_d1bac968b4"],
24 | "gpt-4o-2024-05-13": ["fp_d1bac968b4"]
25 | }
26 |
--------------------------------------------------------------------------------
/chatgpt/wssClient.py:
--------------------------------------------------------------------------------
1 | import json
2 | import time
3 |
4 | from utils.Logger import logger
5 | import chatgpt.globals as globals
6 |
7 |
8 | def save_wss_map(wss_map):
9 | with open(globals.WSS_MAP_FILE, "w") as file:
10 | json.dump(wss_map, file)
11 |
12 |
13 | async def token2wss(token):
14 | if not token:
15 | return False, None
16 | if token in globals.wss_map:
17 | wss_mode = globals.wss_map[token]["wss_mode"]
18 | if wss_mode:
19 | if int(time.time()) - globals.wss_map.get(token, {}).get("timestamp", 0) < 60 * 60:
20 | wss_url = globals.wss_map[token]["wss_url"]
21 | logger.info(f"token -> wss_url from cache")
22 | return wss_mode, wss_url
23 | else:
24 | logger.info(f"token -> wss_url expired")
25 | return wss_mode, None
26 | else:
27 | return False, None
28 | return False, None
29 |
30 |
31 | async def set_wss(token, wss_mode, wss_url=None):
32 | if not token:
33 | return True
34 | globals.wss_map[token] = {"timestamp": int(time.time()), "wss_url": wss_url, "wss_mode": wss_mode}
35 | save_wss_map(globals.wss_map)
36 | return True
37 |
--------------------------------------------------------------------------------
/.github/workflows/build_docker.yml:
--------------------------------------------------------------------------------
1 | name: Build Docker Image
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | paths-ignore:
8 | - 'README.md'
9 | - 'docker-compose.yml'
10 | - 'docs/**'
11 | - '.github/workflows/build_docker.yml'
12 | workflow_dispatch:
13 |
14 | jobs:
15 | main:
16 | runs-on: ubuntu-latest
17 | steps:
18 | - name: Checkout
19 | uses: actions/checkout@v2
20 |
21 | - name: Set up QEMU
22 | uses: docker/setup-qemu-action@v3
23 |
24 | - name: Set up Docker Buildx
25 | uses: docker/setup-buildx-action@v3
26 |
27 | - name: Log in to Docker Hub
28 | uses: docker/login-action@v3
29 | with:
30 | username: ${{ secrets.DOCKER_USERNAME }}
31 | password: ${{ secrets.DOCKER_PASSWORD }}
32 |
33 | - name: Docker meta
34 | id: meta
35 | uses: docker/metadata-action@v5
36 | with:
37 | images: lanqian528/chat2api
38 | tags: |
39 | type=raw,value=latest,enable={{is_default_branch}}
40 | type=raw,value=v1.3.7
41 |
42 | - name: Build and push
43 | uses: docker/build-push-action@v5
44 | with:
45 | context: .
46 | platforms: linux/amd64,linux/arm64
47 | file: Dockerfile
48 | push: true
49 | tags: ${{ steps.meta.outputs.tags }}
50 | labels: ${{ steps.meta.outputs.labels }}
--------------------------------------------------------------------------------
/chatgpt/chatLimit.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import time
3 | from datetime import datetime
4 |
5 | from utils.Logger import logger
6 |
7 | limit_details = {}
8 |
9 |
10 | def check_is_limit(detail, token, model):
11 | if token and detail.get('clears_in'):
12 | clear_time = int(time.time()) + detail.get('clears_in')
13 | limit_details.setdefault(token, {})[model] = clear_time
14 | logger.info(f"{token[:40]}: Reached {model} limit, will be cleared at {datetime.fromtimestamp(clear_time).replace(microsecond=0)}")
15 |
16 |
17 | async def handle_request_limit(token, model):
18 | try:
19 | if limit_details.get(token) and model in limit_details[token]:
20 | limit_time = limit_details[token][model]
21 | is_limit = limit_time > int(time.time())
22 | if is_limit:
23 | clear_date = datetime.fromtimestamp(limit_time).replace(microsecond=0)
24 | result = f"Request limit exceeded. You can continue with the default model now, or try again after {clear_date}"
25 | logger.info(result)
26 | return result
27 | else:
28 | del limit_details[token][model]
29 | return None
30 | except KeyError as e:
31 | logger.error(f"Key error: {e}")
32 | return None
33 | except Exception as e:
34 | logger.error(f"An unexpected error occurred: {e}")
35 | return None
36 |
--------------------------------------------------------------------------------
/utils/retry.py:
--------------------------------------------------------------------------------
1 | from fastapi import HTTPException
2 |
3 | from utils.Logger import logger
4 | from utils.config import retry_times
5 |
6 |
7 | async def async_retry(func, *args, max_retries=retry_times, **kwargs):
8 | for attempt in range(max_retries + 1):
9 | try:
10 | result = await func(*args, **kwargs)
11 | return result
12 | except HTTPException as e:
13 | if attempt == max_retries:
14 | logger.error(f"Throw an exception {e.status_code}, {e.detail}")
15 | if e.status_code == 500:
16 | raise HTTPException(status_code=500, detail="Server error")
17 | raise HTTPException(status_code=e.status_code, detail=e.detail)
18 | logger.info(f"Retry {attempt + 1} status code {e.status_code}, {e.detail}. Retrying...")
19 |
20 |
21 | def retry(func, *args, max_retries=retry_times, **kwargs):
22 | for attempt in range(max_retries + 1):
23 | try:
24 | result = func(*args, **kwargs)
25 | return result
26 | except HTTPException as e:
27 | if attempt == max_retries:
28 | logger.error(f"Throw an exception {e.status_code}, {e.detail}")
29 | if e.status_code == 500:
30 | raise HTTPException(status_code=500, detail="Server error")
31 | raise HTTPException(status_code=e.status_code, detail=e.detail)
32 | logger.error(f"Retry {attempt + 1} status code {e.status_code}, {e.detail}. Retrying...")
33 |
--------------------------------------------------------------------------------
/chatgpt/globals.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 |
4 | from utils.Logger import logger
5 |
6 | DATA_FOLDER = "data"
7 | TOKENS_FILE = os.path.join(DATA_FOLDER, "token.txt")
8 | REFRESH_MAP_FILE = os.path.join(DATA_FOLDER, "refresh_map.json")
9 | ERROR_TOKENS_FILE = os.path.join(DATA_FOLDER, "error_token.txt")
10 | WSS_MAP_FILE = os.path.join(DATA_FOLDER, "wss_map.json")
11 |
12 | count = 0
13 | token_list = []
14 | error_token_list = []
15 | refresh_map = {}
16 | wss_map = {}
17 |
18 |
19 | if not os.path.exists(DATA_FOLDER):
20 | os.makedirs(DATA_FOLDER)
21 |
22 | if os.path.exists(REFRESH_MAP_FILE):
23 | with open(REFRESH_MAP_FILE, "r") as file:
24 | refresh_map = json.load(file)
25 | else:
26 | refresh_map = {}
27 |
28 | if os.path.exists(WSS_MAP_FILE):
29 | with open(WSS_MAP_FILE, "r") as file:
30 | wss_map = json.load(file)
31 | else:
32 | wss_map = {}
33 |
34 |
35 | if os.path.exists(TOKENS_FILE):
36 | with open(TOKENS_FILE, "r", encoding="utf-8") as f:
37 | for line in f:
38 | if line.strip() and not line.startswith("#"):
39 | token_list.append(line.strip())
40 | else:
41 | with open(TOKENS_FILE, "w", encoding="utf-8") as f:
42 | pass
43 |
44 | if os.path.exists(ERROR_TOKENS_FILE):
45 | with open(ERROR_TOKENS_FILE, "r", encoding="utf-8") as f:
46 | for line in f:
47 | if line.strip() and not line.startswith("#"):
48 | error_token_list.append(line.strip())
49 | else:
50 | with open(ERROR_TOKENS_FILE, "w", encoding="utf-8") as f:
51 | pass
52 |
53 | if token_list:
54 | logger.info(f"Token list count: {len(token_list)}, Error token list count: {len(error_token_list)}")
--------------------------------------------------------------------------------
/chatgpt/authorization.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | from fastapi import HTTPException
4 |
5 | from chatgpt.refreshToken import rt2ac
6 | from utils.Logger import logger
7 | from utils.config import authorization_list
8 | import chatgpt.globals as globals
9 |
10 |
11 | def get_req_token(req_token):
12 | if req_token in authorization_list:
13 | if globals.token_list:
14 | globals.count += 1
15 | globals.count %= len(globals.token_list)
16 | while globals.token_list[globals.count] in globals.error_token_list:
17 | globals.count += 1
18 | globals.count %= len(globals.token_list)
19 | return globals.token_list[globals.count]
20 | else:
21 | return None
22 | else:
23 | return req_token
24 |
25 |
26 | async def verify_token(req_token):
27 | if not req_token:
28 | if authorization_list:
29 | logger.error("Unauthorized with empty token.")
30 | raise HTTPException(status_code=401)
31 | else:
32 | return None
33 | else:
34 | if req_token.startswith("eyJhbGciOi") or req_token.startswith("fk-"):
35 | access_token = req_token
36 | return access_token
37 | elif len(req_token) == 45:
38 | try:
39 | access_token = await rt2ac(req_token, force_refresh=False)
40 | return access_token
41 | except HTTPException as e:
42 | raise HTTPException(status_code=e.status_code, detail=e.detail)
43 | else:
44 | return req_token
45 |
46 |
47 | async def refresh_all_tokens(force_refresh=False):
48 | for token in globals.token_list:
49 | if len(token) == 45:
50 | try:
51 | await asyncio.sleep(2)
52 | await rt2ac(token, force_refresh=force_refresh)
53 | except HTTPException:
54 | pass
55 | logger.info("All tokens refreshed.")
56 |
--------------------------------------------------------------------------------
/utils/Client.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | from curl_cffi.requests import AsyncSession
4 |
5 |
6 | class Client:
7 | def __init__(self, proxy=None, timeout=15, verify=True):
8 | self.proxies = {
9 | "http": proxy,
10 | "https": proxy,
11 | }
12 | self.timeout = timeout
13 | self.verify = verify
14 | self.impersonate = random.choice(["safari15_3"])
15 | self.session = AsyncSession(proxies=self.proxies, timeout=self.timeout, verify=self.verify)
16 | self.session2 = AsyncSession(proxies=self.proxies, timeout=self.timeout, verify=self.verify)
17 |
18 | async def post(self, *args, **kwargs):
19 | r = await self.session.post(*args, impersonate=self.impersonate, **kwargs)
20 | return r
21 |
22 | async def post_stream(self, *args, headers=None, cookies=None, **kwargs):
23 | if self.session:
24 | headers = headers or self.session.headers
25 | cookies = cookies or self.session.cookies
26 | r = await self.session2.post(*args, headers=headers, cookies=cookies, impersonate=self.impersonate, **kwargs)
27 | return r
28 |
29 | async def get(self, *args, **kwargs):
30 | r = await self.session.get(*args, impersonate=self.impersonate, **kwargs)
31 | return r
32 |
33 | async def request(self, *args, **kwargs):
34 | r = await self.session.request(*args, impersonate=self.impersonate, **kwargs)
35 | return r
36 |
37 | async def put(self, *args, **kwargs):
38 | r = await self.session.put(*args, impersonate=self.impersonate, **kwargs)
39 | return r
40 |
41 | async def close(self):
42 | if self.session:
43 | try:
44 | await self.session.close()
45 | del self.session
46 | except Exception:
47 | pass
48 | if self.session2:
49 | try:
50 | await self.session2.close()
51 | del self.session2
52 | except Exception:
53 | pass
54 |
--------------------------------------------------------------------------------
/chatgpt/refreshToken.py:
--------------------------------------------------------------------------------
1 | import json
2 | import random
3 | import time
4 |
5 | from fastapi import HTTPException
6 |
7 | from utils.Client import Client
8 | from utils.Logger import logger
9 | from utils.config import proxy_url_list
10 | import chatgpt.globals as globals
11 |
12 |
13 | def save_refresh_map(refresh_map):
14 | with open(globals.REFRESH_MAP_FILE, "w") as file:
15 | json.dump(refresh_map, file)
16 |
17 |
18 | async def rt2ac(refresh_token, force_refresh=False):
19 | if not force_refresh and (refresh_token in globals.refresh_map and int(time.time()) - globals.refresh_map.get(refresh_token, {}).get("timestamp", 0) < 5 * 24 * 60 * 60):
20 | access_token = globals.refresh_map[refresh_token]["token"]
21 | logger.info(f"refresh_token -> access_token from cache")
22 | return access_token
23 | else:
24 | try:
25 | access_token = await chat_refresh(refresh_token)
26 | globals.refresh_map[refresh_token] = {"token": access_token, "timestamp": int(time.time())}
27 | save_refresh_map(globals.refresh_map)
28 | logger.info(f"refresh_token -> access_token with openai: {access_token}")
29 | return access_token
30 | except HTTPException as e:
31 | raise HTTPException(status_code=e.status_code, detail=e.detail)
32 |
33 |
34 | async def chat_refresh(refresh_token):
35 | data = {
36 | "client_id": "pdlLIX2Y72MIl2rhLhTE9VV9bN905kBh",
37 | "grant_type": "refresh_token",
38 | "redirect_uri": "com.openai.chat://auth0.openai.com/ios/com.openai.chat/callback",
39 | "refresh_token": refresh_token
40 | }
41 | client = Client(proxy=random.choice(proxy_url_list) if proxy_url_list else None)
42 | try:
43 | r = await client.post("https://auth0.openai.com/oauth/token", json=data, timeout=5)
44 | if r.status_code == 200:
45 | access_token = r.json()['access_token']
46 | return access_token
47 | else:
48 | with open(globals.ERROR_TOKENS_FILE, "a", encoding="utf-8") as f:
49 | f.write(refresh_token + "\n")
50 | if refresh_token not in globals.error_token_list:
51 | globals.error_token_list.append(refresh_token)
52 | raise Exception(r.text[:100])
53 | except Exception as e:
54 | logger.error(f"Failed to refresh access_token `{refresh_token}`: {str(e)}")
55 | raise HTTPException(status_code=500, detail=f"Failed to refresh access_token.")
56 | finally:
57 | await client.close()
58 | del client
59 |
--------------------------------------------------------------------------------
/utils/config.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from dotenv import load_dotenv
4 |
5 | from utils.Logger import logger
6 |
7 | load_dotenv(encoding="ascii")
8 |
9 |
10 | def is_true(x):
11 | if isinstance(x, bool):
12 | return x
13 | if isinstance(x, str):
14 | return x.lower() in ['true', '1', 't', 'y', 'yes']
15 | elif isinstance(x, int):
16 | return x == 1
17 | else:
18 | return False
19 |
20 |
21 | api_prefix = os.getenv('API_PREFIX', None)
22 | authorization = os.getenv('AUTHORIZATION', '').replace(' ', '')
23 | chatgpt_base_url = os.getenv('CHATGPT_BASE_URL', 'https://chatgpt.com').replace(' ', '')
24 | auth_key = os.getenv('AUTH_KEY', None)
25 | user_agents = os.getenv('USER_AGENTS', '').replace(', ', ',')
26 |
27 | arkose_token_url = os.getenv('ARKOSE_TOKEN_URL', '').replace(' ', '')
28 | proxy_url = os.getenv('PROXY_URL', '').replace(' ', '')
29 | export_proxy_url = os.getenv('EXPORT_PROXY_URL', None)
30 | cf_file_url = os.getenv('CF_FILE_URL', None)
31 |
32 | history_disabled = is_true(os.getenv('HISTORY_DISABLED', True))
33 | pow_difficulty = os.getenv('POW_DIFFICULTY', '000032')
34 | retry_times = int(os.getenv('RETRY_TIMES', 3))
35 | enable_gateway = is_true(os.getenv('ENABLE_GATEWAY', True))
36 | conversation_only = is_true(os.getenv('CONVERSATION_ONLY', False))
37 | enable_limit = is_true(os.getenv('ENABLE_LIMIT', True))
38 | upload_by_url = is_true(os.getenv('UPLOAD_BY_URL', False))
39 | check_model = is_true(os.getenv('CHECK_MODEL', False))
40 | scheduled_refresh = is_true(os.getenv('SCHEDULED_REFRESH', False))
41 |
42 | authorization_list = authorization.split(',') if authorization else []
43 | chatgpt_base_url_list = chatgpt_base_url.split(',') if chatgpt_base_url else []
44 | arkose_token_url_list = arkose_token_url.split(',') if arkose_token_url else []
45 | proxy_url_list = proxy_url.split(',') if proxy_url else []
46 | user_agents_list = user_agents.split(',') if user_agents else []
47 |
48 | logger.info("-" * 60)
49 | logger.info("Chat2Api v1.3.7 | https://github.com/lanqian528/chat2api")
50 | logger.info("-" * 60)
51 | logger.info("Environment variables:")
52 | logger.info("API_PREFIX: " + str(api_prefix))
53 | logger.info("AUTHORIZATION: " + str(authorization_list))
54 | logger.info("CHATGPT_BASE_URL: " + str(chatgpt_base_url_list))
55 | logger.info("AUTH_KEY: " + str(auth_key))
56 | logger.info("ARKOSE_TOKEN_URL: " + str(arkose_token_url_list))
57 | logger.info("PROXY_URL: " + str(proxy_url_list))
58 | logger.info("EXPORT_PROXY_URL: " + str(export_proxy_url))
59 | logger.info("HISTORY_DISABLED: " + str(history_disabled))
60 | logger.info("POW_DIFFICULTY: " + str(pow_difficulty))
61 | logger.info("RETRY_TIMES: " + str(retry_times))
62 | logger.info("ENABLE_GATEWAY: " + str(enable_gateway))
63 | logger.info("CONVERSATION_ONLY: " + str(conversation_only))
64 | logger.info("ENABLE_LIMIT: " + str(enable_limit))
65 | logger.info("UPLOAD_BY_URL: " + str(upload_by_url))
66 | logger.info("CHECK_MODEL: " + str(check_model))
67 | logger.info("SCHEDULED_REFRESH: " + str(scheduled_refresh))
68 | logger.info("-" * 60)
69 |
--------------------------------------------------------------------------------
/api/tokens.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | import tiktoken
4 |
5 |
6 | async def calculate_image_tokens(width, height, detail):
7 | if detail == "low":
8 | return 85
9 | else:
10 | max_dimension = max(width, height)
11 | if max_dimension > 2048:
12 | scale_factor = 2048 / max_dimension
13 | new_width = int(width * scale_factor)
14 | new_height = int(height * scale_factor)
15 | else:
16 | new_width = width
17 | new_height = height
18 |
19 | width, height = new_width, new_height
20 | min_dimension = min(width, height)
21 | if min_dimension > 768:
22 | scale_factor = 768 / min_dimension
23 | new_width = int(width * scale_factor)
24 | new_height = int(height * scale_factor)
25 | else:
26 | new_width = width
27 | new_height = height
28 |
29 | width, height = new_width, new_height
30 | num_masks_w = math.ceil(width / 512)
31 | num_masks_h = math.ceil(height / 512)
32 | total_masks = num_masks_w * num_masks_h
33 |
34 | tokens_per_mask = 170
35 | total_tokens = total_masks * tokens_per_mask + 85
36 |
37 | return total_tokens
38 |
39 |
40 | async def num_tokens_from_messages(messages, model=''):
41 | try:
42 | encoding = tiktoken.encoding_for_model(model)
43 | except KeyError:
44 | encoding = tiktoken.get_encoding("cl100k_base")
45 | if model == "gpt-3.5-turbo-0301":
46 | tokens_per_message = 4
47 | else:
48 | tokens_per_message = 3
49 | num_tokens = 0
50 | for message in messages:
51 | num_tokens += tokens_per_message
52 | for key, value in message.items():
53 | if isinstance(value, list):
54 | for item in value:
55 | if item.get("type") == "text":
56 | num_tokens += len(encoding.encode(item.get("text")))
57 | if item.get("type") == "image_url":
58 | pass
59 | else:
60 | num_tokens += len(encoding.encode(value))
61 | num_tokens += 3
62 | return num_tokens
63 |
64 |
65 | async def num_tokens_from_content(content, model=None):
66 | try:
67 | encoding = tiktoken.encoding_for_model(model)
68 | except KeyError:
69 | encoding = tiktoken.get_encoding("cl100k_base")
70 | encoded_content = encoding.encode(content)
71 | len_encoded_content = len(encoded_content)
72 | return len_encoded_content
73 |
74 |
75 | async def split_tokens_from_content(content, max_tokens, model=None):
76 | try:
77 | encoding = tiktoken.encoding_for_model(model)
78 | except KeyError:
79 | encoding = tiktoken.get_encoding("cl100k_base")
80 | encoded_content = encoding.encode(content)
81 | len_encoded_content = len(encoded_content)
82 | if len_encoded_content >= max_tokens:
83 | content = encoding.decode(encoded_content[:max_tokens])
84 | return content, max_tokens, "length"
85 | else:
86 | return content, len_encoded_content, "stop"
87 |
--------------------------------------------------------------------------------
/templates/tokens.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Tokens 管理
7 |
8 |
54 |
55 |
56 |
57 |
58 |
当前可用 Tokens 数量:{{ tokens_count }}
59 |
64 |
65 |
点击清空,将会清空上传和错误的 Tokens
66 |
69 |
70 |
71 |
72 |
73 |
错误 Tokens
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
--------------------------------------------------------------------------------
/chat2api.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import types
3 | import warnings
4 |
5 | from apscheduler.schedulers.asyncio import AsyncIOScheduler
6 | from fastapi import FastAPI, Request, Depends, HTTPException, Form
7 | from fastapi.middleware.cors import CORSMiddleware
8 | from fastapi.responses import HTMLResponse
9 | from fastapi.responses import StreamingResponse, JSONResponse
10 | from fastapi.security import OAuth2PasswordBearer
11 | from fastapi.templating import Jinja2Templates
12 | from starlette.background import BackgroundTask
13 |
14 | from chatgpt.ChatService import ChatService
15 | from chatgpt.authorization import refresh_all_tokens
16 | import chatgpt.globals as globals
17 | from chatgpt.reverseProxy import chatgpt_reverse_proxy
18 | from utils.Logger import logger
19 | from utils.config import api_prefix, scheduled_refresh
20 | from utils.retry import async_retry
21 |
22 | warnings.filterwarnings("ignore")
23 |
24 | app = FastAPI()
25 | scheduler = AsyncIOScheduler()
26 | templates = Jinja2Templates(directory="templates")
27 | oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token", auto_error=False)
28 |
29 | app.add_middleware(
30 | CORSMiddleware,
31 | allow_origins=["*"],
32 | allow_credentials=True,
33 | allow_methods=["*"],
34 | allow_headers=["*"],
35 | )
36 |
37 |
38 | @app.on_event("startup")
39 | async def app_start():
40 | if scheduled_refresh:
41 | scheduler.add_job(id='refresh', func=refresh_all_tokens, trigger='cron', hour=3, minute=0, day='*/4', kwargs={'force_refresh': True})
42 | scheduler.start()
43 | asyncio.get_event_loop().call_later(0, lambda: asyncio.create_task(refresh_all_tokens(force_refresh=False)))
44 |
45 |
46 | async def to_send_conversation(request_data, req_token):
47 | chat_service = ChatService(req_token)
48 | try:
49 | await chat_service.set_dynamic_data(request_data)
50 | await chat_service.get_chat_requirements()
51 | return chat_service
52 | except HTTPException as e:
53 | await chat_service.close_client()
54 | raise HTTPException(status_code=e.status_code, detail=e.detail)
55 | except Exception as e:
56 | await chat_service.close_client()
57 | logger.error(f"Server error, {str(e)}")
58 | raise HTTPException(status_code=500, detail="Server error")
59 |
60 |
61 | @app.post(f"/{api_prefix}/v1/chat/completions" if api_prefix else "/v1/chat/completions")
62 | async def send_conversation(request: Request, req_token: str = Depends(oauth2_scheme)):
63 | try:
64 | request_data = await request.json()
65 | except Exception:
66 | raise HTTPException(status_code=400, detail={"error": "Invalid JSON body"})
67 |
68 | chat_service = await async_retry(to_send_conversation, request_data, req_token)
69 | try:
70 | await chat_service.prepare_send_conversation()
71 | res = await chat_service.send_conversation()
72 | if isinstance(res, types.AsyncGeneratorType):
73 | background = BackgroundTask(chat_service.close_client)
74 | return StreamingResponse(res, media_type="text/event-stream", background=background)
75 | else:
76 | background = BackgroundTask(chat_service.close_client)
77 | return JSONResponse(res, media_type="application/json", background=background)
78 | except HTTPException as e:
79 | await chat_service.close_client()
80 | if e.status_code == 500:
81 | logger.error(f"Server error, {str(e)}")
82 | raise HTTPException(status_code=500, detail="Server error")
83 | raise HTTPException(status_code=e.status_code, detail=e.detail)
84 | except Exception as e:
85 | await chat_service.close_client()
86 | logger.error(f"Server error, {str(e)}")
87 | raise HTTPException(status_code=500, detail="Server error")
88 |
89 |
90 | @app.get(f"/{api_prefix}/tokens" if api_prefix else "/tokens", response_class=HTMLResponse)
91 | async def upload_html(request: Request):
92 | tokens_count = len(set(globals.token_list) - set(globals.error_token_list))
93 | return templates.TemplateResponse("tokens.html",
94 | {"request": request, "api_prefix": api_prefix, "tokens_count": tokens_count})
95 |
96 |
97 | @app.post(f"/{api_prefix}/tokens/upload" if api_prefix else "/tokens/upload")
98 | async def upload_post(text: str = Form(...)):
99 | lines = text.split("\n")
100 | for line in lines:
101 | if line.strip() and not line.startswith("#"):
102 | globals.token_list.append(line.strip())
103 | with open("data/token.txt", "a", encoding="utf-8") as f:
104 | f.write(line.strip() + "\n")
105 | logger.info(f"Token count: {len(globals.token_list)}, Error token count: {len(globals.error_token_list)}")
106 | tokens_count = len(set(globals.token_list) - set(globals.error_token_list))
107 | return {"status": "success", "tokens_count": tokens_count}
108 |
109 |
110 | @app.post(f"/{api_prefix}/tokens/clear" if api_prefix else "/tokens/clear")
111 | async def upload_post():
112 | globals.token_list.clear()
113 | globals.error_token_list.clear()
114 | with open("data/token.txt", "w", encoding="utf-8") as f:
115 | pass
116 | logger.info(f"Token count: {len(globals.token_list)}, Error token count: {len(globals.error_token_list)}")
117 | tokens_count = len(set(globals.token_list) - set(globals.error_token_list))
118 | return {"status": "success", "tokens_count": tokens_count}
119 |
120 |
121 | @app.post(f"/{api_prefix}/tokens/error" if api_prefix else "/tokens/error")
122 | async def error_tokens():
123 | error_tokens_list = list(set(globals.error_token_list))
124 | return {"status": "success", "error_tokens": error_tokens_list}
125 |
126 |
127 | @app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE", "OPTIONS", "HEAD", "PATCH", "TRACE"])
128 | async def reverse_proxy(request: Request, path: str):
129 | return await chatgpt_reverse_proxy(request, path)
130 |
--------------------------------------------------------------------------------
/api/files.py:
--------------------------------------------------------------------------------
1 | import io
2 |
3 | import pybase64
4 | from PIL import Image
5 |
6 | from utils.Client import Client
7 | from utils.config import export_proxy_url, cf_file_url
8 |
9 |
10 | async def get_file_content(url):
11 | if url.startswith("data:"):
12 | mime_type, base64_data = url.split(';')[0].split(':')[1], url.split(',')[1]
13 | file_content = pybase64.b64decode(base64_data)
14 | return file_content, mime_type
15 | else:
16 | client = Client()
17 | try:
18 | if cf_file_url:
19 | body = {"file_url": url}
20 | r = await client.post(cf_file_url, timeout=60, json=body)
21 | else:
22 | r = await client.get(url, proxy=export_proxy_url, timeout=60)
23 | if r.status_code != 200:
24 | return None, None
25 | file_content = r.content
26 | mime_type = r.headers.get('Content-Type', '').split(';')[0].strip()
27 | return file_content, mime_type
28 | finally:
29 | await client.close()
30 | del client
31 |
32 |
33 | async def determine_file_use_case(mime_type):
34 | multimodal_types = ["image/jpeg", "image/webp", "image/png", "image/gif"]
35 | my_files_types = ["text/x-php", "application/msword", "text/x-c", "text/html",
36 | "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
37 | "application/json", "text/javascript", "application/pdf",
38 | "text/x-java", "text/x-tex", "text/x-typescript", "text/x-sh",
39 | "text/x-csharp", "application/vnd.openxmlformats-officedocument.presentationml.presentation",
40 | "text/x-c++", "application/x-latext", "text/markdown", "text/plain",
41 | "text/x-ruby", "text/x-script.python"]
42 |
43 | if mime_type in multimodal_types:
44 | return "multimodal"
45 | elif mime_type in my_files_types:
46 | return "my_files"
47 | else:
48 | return "ace_upload"
49 |
50 |
51 | async def get_image_size(file_content):
52 | with Image.open(io.BytesIO(file_content)) as img:
53 | return img.width, img.height
54 |
55 |
56 | async def get_file_extension(mime_type):
57 | extension_mapping = {
58 | "image/jpeg": ".jpg",
59 | "image/png": ".png",
60 | "image/gif": ".gif",
61 | "image/webp": ".webp",
62 | "text/x-php": ".php",
63 | "application/msword": ".doc",
64 | "text/x-c": ".c",
65 | "text/html": ".html",
66 | "application/vnd.openxmlformats-officedocument.wordprocessingml.document": ".docx",
67 | "application/json": ".json",
68 | "text/javascript": ".js",
69 | "application/pdf": ".pdf",
70 | "text/x-java": ".java",
71 | "text/x-tex": ".tex",
72 | "text/x-typescript": ".ts",
73 | "text/x-sh": ".sh",
74 | "text/x-csharp": ".cs",
75 | "application/vnd.openxmlformats-officedocument.presentationml.presentation": ".pptx",
76 | "text/x-c++": ".cpp",
77 | "application/x-latex": ".latex",
78 | "text/markdown": ".md",
79 | "text/plain": ".txt",
80 | "text/x-ruby": ".rb",
81 | "text/x-script.python": ".py",
82 | "application/zip": ".zip",
83 | "application/x-zip-compressed": ".zip",
84 | "application/x-tar": ".tar",
85 | "application/x-compressed-tar": ".tar.gz",
86 | "application/vnd.rar": ".rar",
87 | "application/x-rar-compressed": ".rar",
88 | "application/x-7z-compressed": ".7z",
89 | "application/octet-stream": ".bin",
90 | "audio/mpeg": ".mp3",
91 | "audio/wav": ".wav",
92 | "audio/ogg": ".ogg",
93 | "audio/aac": ".aac",
94 | "video/mp4": ".mp4",
95 | "video/x-msvideo": ".avi",
96 | "video/x-matroska": ".mkv",
97 | "video/webm": ".webm",
98 | "application/rtf": ".rtf",
99 | "application/vnd.ms-excel": ".xls",
100 | "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": ".xlsx",
101 | "text/css": ".css",
102 | "text/xml": ".xml",
103 | "application/xml": ".xml",
104 | "application/vnd.android.package-archive": ".apk",
105 | "application/vnd.apple.installer+xml": ".mpkg",
106 | "application/x-bzip": ".bz",
107 | "application/x-bzip2": ".bz2",
108 | "application/x-csh": ".csh",
109 | "application/x-debian-package": ".deb",
110 | "application/x-dvi": ".dvi",
111 | "application/java-archive": ".jar",
112 | "application/x-java-jnlp-file": ".jnlp",
113 | "application/vnd.mozilla.xul+xml": ".xul",
114 | "application/vnd.ms-fontobject": ".eot",
115 | "application/ogg": ".ogx",
116 | "application/x-font-ttf": ".ttf",
117 | "application/font-woff": ".woff",
118 | "application/x-shockwave-flash": ".swf",
119 | "application/vnd.visio": ".vsd",
120 | "application/xhtml+xml": ".xhtml",
121 | "application/vnd.ms-powerpoint": ".ppt",
122 | "application/vnd.oasis.opendocument.text": ".odt",
123 | "application/vnd.oasis.opendocument.spreadsheet": ".ods",
124 | "application/x-xpinstall": ".xpi",
125 | "application/vnd.google-earth.kml+xml": ".kml",
126 | "application/vnd.google-earth.kmz": ".kmz",
127 | "application/x-font-otf": ".otf",
128 | "application/vnd.ms-excel.addin.macroEnabled.12": ".xlam",
129 | "application/vnd.ms-excel.sheet.binary.macroEnabled.12": ".xlsb",
130 | "application/vnd.ms-excel.template.macroEnabled.12": ".xltm",
131 | "application/vnd.ms-powerpoint.addin.macroEnabled.12": ".ppam",
132 | "application/vnd.ms-powerpoint.presentation.macroEnabled.12": ".pptm",
133 | "application/vnd.ms-powerpoint.slideshow.macroEnabled.12": ".ppsm",
134 | "application/vnd.ms-powerpoint.template.macroEnabled.12": ".potm",
135 | "application/vnd.ms-word.document.macroEnabled.12": ".docm",
136 | "application/vnd.ms-word.template.macroEnabled.12": ".dotm",
137 | "application/x-ms-application": ".application",
138 | "application/x-ms-wmd": ".wmd",
139 | "application/x-ms-wmz": ".wmz",
140 | "application/x-ms-xbap": ".xbap",
141 | "application/vnd.ms-xpsdocument": ".xps",
142 | "application/x-silverlight-app": ".xap"
143 | }
144 | return extension_mapping.get(mime_type, "")
145 |
--------------------------------------------------------------------------------
/chatgpt/reverseProxy.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | from fastapi import Request, HTTPException
4 | from fastapi.responses import StreamingResponse, Response
5 | from starlette.background import BackgroundTask
6 |
7 | from utils.Client import Client
8 | from utils.config import chatgpt_base_url_list, proxy_url_list, enable_gateway
9 |
10 | headers_reject_list = [
11 | "x-real-ip",
12 | "x-forwarded-for",
13 | "x-forwarded-proto",
14 | "x-forwarded-port",
15 | "x-forwarded-host",
16 | "x-forwarded-server",
17 | "cf-warp-tag-id",
18 | "cf-visitor",
19 | "cf-ray",
20 | "cf-connecting-ip",
21 | "cf-ipcountry",
22 | "cdn-loop",
23 | "remote-host",
24 | "x-frame-options",
25 | "x-xss-protection",
26 | "x-content-type-options",
27 | "content-security-policy",
28 | "host",
29 | "cookie",
30 | "connection",
31 | "content-length",
32 | "content-encoding",
33 | "x-middleware-prefetch",
34 | "x-nextjs-data",
35 | "purpose",
36 | "x-forwarded-uri",
37 | "x-forwarded-path",
38 | "x-forwarded-method",
39 | "x-forwarded-protocol",
40 | "x-forwarded-scheme",
41 | "cf-request-id",
42 | "cf-worker",
43 | "cf-access-client-id",
44 | "cf-access-client-device-type",
45 | "cf-access-client-device-model",
46 | "cf-access-client-device-name",
47 | "cf-access-client-device-brand",
48 | "x-middleware-prefetch",
49 | "x-forwarded-for",
50 | "x-forwarded-host",
51 | "x-forwarded-proto",
52 | "x-forwarded-server",
53 | "x-real-ip",
54 | "x-forwarded-port",
55 | "cf-connecting-ip",
56 | "cf-ipcountry",
57 | "cf-ray",
58 | "cf-visitor",
59 | ]
60 |
61 |
62 | async def chatgpt_reverse_proxy(request: Request, path: str):
63 | if not enable_gateway:
64 | raise HTTPException(status_code=404, detail="Gateway is disabled")
65 | try:
66 | origin_host = request.url.netloc
67 | if ":" in origin_host:
68 | petrol = "http"
69 | else:
70 | petrol = "https"
71 | if path.startswith("v1/"):
72 | base_url = "https://ab.chatgpt.com"
73 | else:
74 | base_url = random.choice(chatgpt_base_url_list) if chatgpt_base_url_list else "https://chatgpt.com"
75 | params = dict(request.query_params)
76 | headers = {
77 | key: value for key, value in request.headers.items()
78 | if (key.lower() not in ["host", "origin", "referer", "user-agent",
79 | "authorization"] and key.lower() not in headers_reject_list)
80 | }
81 | request_cookies = dict(request.cookies)
82 |
83 | headers.update({
84 | "accept-Language": "en-US,en;q=0.9",
85 | "host": base_url.replace("https://", "").replace("http://", ""),
86 | "origin": base_url,
87 | "referer": f"{base_url}/",
88 | "sec-ch-ua": '"Chromium";v="124", "Microsoft Edge";v="124", "Not-A.Brand";v="99"',
89 | "sec-ch-ua-mobile": "?0",
90 | "sec-ch-ua-platform": "\"Windows\"",
91 | "sec-fetch-dest": "empty",
92 | "sec-fetch-mode": "cors",
93 | "sec-fetch-site": "same-origin",
94 | "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36 Edg/124.0.0.0"
95 | })
96 |
97 | if request.headers.get('Authorization'):
98 | headers['Authorization'] = request.headers['Authorization']
99 |
100 | if headers.get("Content-Type") == "application/json":
101 | data = await request.json()
102 | else:
103 | data = await request.body()
104 |
105 | client = Client(proxy=random.choice(proxy_url_list) if proxy_url_list else None)
106 | try:
107 | background = BackgroundTask(client.close)
108 | r = await client.request(request.method, f"{base_url}/{path}", params=params, headers=headers,
109 | cookies=request_cookies, data=data, stream=True, allow_redirects=False)
110 | if r.status_code == 304:
111 | return Response(status_code=304, background=background)
112 | elif r.status_code == 307:
113 | if "oai-dm=1" not in r.headers.get("Location"):
114 | return Response(status_code=307, headers={
115 | "Location": r.headers.get("Location").replace("chat.openai.com", origin_host)
116 | .replace("chatgpt.com", origin_host)
117 | .replace("https", petrol) + "?oai-dm=1"}, background=background)
118 | else:
119 | return Response(status_code=307, headers={"Location": r.headers.get("Location")},
120 | background=background)
121 | elif r.status_code == 302:
122 | return Response(status_code=302,
123 | headers={"Location": r.headers.get("Location").replace("chatgpt.com", origin_host)
124 | .replace("chat.openai.com", origin_host)
125 | .replace("ab.chatgpt.com", origin_host)
126 | .replace("cdn.oaistatic.com", origin_host)
127 | .replace("https", petrol)}, background=background)
128 | elif 'stream' in r.headers.get("content-type", ""):
129 | return StreamingResponse(r.aiter_content(), media_type=r.headers.get("content-type", ""),
130 | background=background)
131 | else:
132 | if "/conversation" in path or "/register-websocket" in path:
133 | response = Response(content=(await r.atext()), media_type=r.headers.get("content-type"),
134 | status_code=r.status_code, background=background)
135 | else:
136 | content = ((await r.atext()).replace("chatgpt.com", origin_host)
137 | .replace("chat.openai.com", origin_host)
138 | .replace("ab.chatgpt.com", origin_host)
139 | .replace("cdn.oaistatic.com", origin_host)
140 | .replace("https", petrol))
141 | response = Response(content=content, media_type=r.headers.get("content-type"),
142 | status_code=r.status_code, background=background)
143 | for cookie_name in r.cookies:
144 | if cookie_name in request_cookies:
145 | continue
146 | for cookie_domain in [".chatgpt.com"]:
147 | cookie_value = r.cookies.get(name=cookie_name, domain=cookie_domain)
148 | if cookie_name.startswith("__"):
149 | response.set_cookie(key=cookie_name, value=cookie_value, secure=True, httponly=True)
150 | else:
151 | response.set_cookie(key=cookie_name, value=cookie_value)
152 | return response
153 | except Exception:
154 | await client.close()
155 |
156 | except Exception as e:
157 | raise HTTPException(status_code=500, detail=str(e))
158 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # CHAT2API
2 |
3 | 🤖 一个简单的 ChatGPT TO API 代理
4 |
5 | 🌟 无需账号即可使用免费、无限的 `GPT-3.5`
6 |
7 | 💥 支持 AccessToken 使用账号,支持 `GPT-4`、`GPT-4o`、 `GPTs`
8 |
9 | 🔍 回复格式与真实 API 完全一致,适配几乎所有客户端
10 |
11 | ## 交流群
12 |
13 | [https://t.me/chat2api](https://t.me/chat2api)
14 |
15 | 要提问请先阅读完仓库文档,尤其是常见问题部分。
16 |
17 | 提问时请提供:
18 |
19 | 1. 启动日志截图(敏感信息打码,包括环境变量和版本号)
20 | 2. 报错的日志信息(敏感信息打码)
21 | 3. 接口返回的状态码和响应体
22 |
23 | ## 功能
24 |
25 | ### 最新版 v1.3.7
26 |
27 | > 已完成
28 | > - [x] 流式、非流式传输
29 | > - [x] 免登录 GPT-3.5 对话
30 | > - [x] GPT-3.5 对话(传入模型名不包含 gpt-4,则默认使用 gpt-3.5,也就是 text-davinci-002-render-sha)
31 | > - [x] GPT-4 对话(传入模型名包含: gpt-4,gpt-4o,gpt-4o-mini,gpt-4-moblie 即可使用对应模型,需传入 AccessToken)
32 | > - [x] GPT-4 画图、代码、联网
33 | > - [x] 支持 GPTs(传入模型名:gpt-4-gizmo-g-*)
34 | > - [x] 支持 Team Plus 账号(需传入 team account id)
35 | > - [x] 上传图片、文件(格式为 API 对应格式,支持 URL 和 base64)
36 | > - [x] WebUI([http://127.0.0.1:5005](http://127.0.0.1:5005),不支持登录使用, 网关副产品,因此不做维护)
37 | > - [x] 可作为网关使用,可多机分布部署
38 | > - [x] 多账号轮询,同时支持 AccessToken 和 RefreshToken
39 | > - [x] 请求失败重试,自动轮询下一个 Token
40 | > - [x] Tokens 管理,支持上传、清除
41 | > - [x] 定时使用 RefreshToken 刷新 AccessToken / 每次启动将会全部非强制刷新一次,每4天晚上3点全部强制刷新一次。
42 | > - [x] 支持文件下载,需要开启历史记录
43 |
44 | > TODO
45 | > - [ ] 暂无,欢迎提 issue
46 |
47 | ## Tokens 管理
48 |
49 | 首先配置环境变量 `AUTHORIZATION`,然后运行程序。
50 |
51 | 访问 `/tokens` 或者 `/api_prefix/tokens` 可以查看现有 Tokens 数量,也可以上传新的 Tokens ,或者清空 Tokens。
52 |
53 | 请求时传入 `AUTHORIZATION` 中你配置的值即可多账号轮询, `AUTHORIZATION` 可以配置多个值,用英文逗号分隔。
54 |
55 | 
56 |
57 | ## 环境变量
58 |
59 | 每个环境变量都有默认值,如果不懂环境变量的含义,请不要设置,更不要传空值,字符串无需引号。
60 |
61 | | 分类 | 变量名 | 示例值 | 默认值 | 描述 |
62 | |------|-------------------|-------------------------------------------------------------|-----------------------|--------------------------------------------------------------|
63 | | 安全相关 | API_PREFIX | `your_prefix` | `None` | API 前缀密码,不设置容易被人访问,设置后需请求 `/your_prefix/v1/chat/completions` |
64 | | | AUTHORIZATION | `your_first_authorization`,
`your_second_authorization` | `[]` | 你自己为使用多账号轮询 Tokens 设置的授权,英文逗号分隔 |
65 | | | AUTH_KEY | `your_auth_key` | `None` | 私人网关需要加`auth_key`请求头才设置该项 |
66 | | 请求相关 | CHATGPT_BASE_URL | `https://chatgpt.com` | `https://chatgpt.com` | ChatGPT 网关地址,设置后会改变请求的网站,多个网关用逗号分隔 |
67 | | | PROXY_URL | `http://ip:port`,
`http://username:password@ip:port` | `[]` | 全局代理 URL,出 403 时启用,多个代理用逗号分隔 |
68 | | | EXPORT_PROXY_URL | `http://ip:port`或
`http://username:password@ip:port` | `None` | 出口代理 URL,防止请求图片和文件时泄漏源站 ip |
69 | | | ARKOSE_TOKEN_URL | `https://example.com/token` | `[]` | 获取 Arkose token 的地址 |
70 | | 功能相关 | HISTORY_DISABLED | `true` | `true` | 是否不保存聊天记录并返回 conversation_id |
71 | | | POW_DIFFICULTY | `00003a` | `00003a` | 要解决的工作量证明难度,不懂别设置 |
72 | | | RETRY_TIMES | `3` | `3` | 出错重试次数,使用 AUTHORIZATION 会自动轮询下一个账号 |
73 | | | ENABLE_GATEWAY | `true` | `true` | 是否启用网关模式(WEBUI) |
74 | | | CONVERSATION_ONLY | `false` | `false` | 是否直接使用对话接口,如果你用的网关支持自动解决pow和arkose才启用 |
75 | | | ENABLE_LIMIT | `true` | `true` | 开启后不尝试突破官方次数限制,尽可能防止封号 |
76 | | | UPLOAD_BY_URL | `false` | `false` | 开启后按照 `URL+空格+正文` 进行对话,自动解析 URL 内容并上传,多个 URL 用空格分隔 |
77 | | | CHECK_MODEL | `false` | `false` | 检查账号是否支持传入模型,开启后可以稍微避免4o返回3.5内容,但是会增加请求时延,且并不能解决降智问题 |
78 | | | SCHEDULED_REFRESH | `false` | `false` | 是否定时刷新 AccessToken ,开启后每次启动程序将会全部非强制刷新一次,每4天晚上3点全部强制刷新一次。 |
79 |
80 | ## 部署
81 |
82 | ### Zeabur 部署
83 |
84 | [](https://zeabur.com/templates/6HEGIZ?referralCode=LanQian528)
85 |
86 | ### 直接部署
87 |
88 | ```bash
89 | git clone https://github.com/LanQian528/chat2api
90 | cd chat2api
91 | pip install -r requirements.txt
92 | python app.py
93 | ```
94 |
95 | ### Docker 部署
96 |
97 | 您需要安装 Docker 和 Docker Compose。
98 |
99 | ```bash
100 | docker run -d \
101 | --name chat2api \
102 | -p 5005:5005 \
103 | lanqian528/chat2api:latest
104 | ```
105 |
106 | ### (推荐,可用 PLUS 账号) Docker Compose 部署
107 |
108 | 创建一个新的目录,例如 chat2api,并进入该目录:
109 |
110 | ```bash
111 | mkdir chat2api
112 | cd chat2api
113 | ```
114 |
115 | 在此目录中下载库中的 docker-compose.yml 文件:
116 |
117 | ```bash
118 | wget https://raw.githubusercontent.com/LanQian528/chat2api/main/docker-compose.yml
119 | ```
120 |
121 | 修改 docker-compose.yml 文件中的环境变量,保存后:
122 |
123 | ```bash
124 | docker-compose up -d
125 | ```
126 |
127 | ## 使用
128 |
129 | 1. 在网页使用,直接访问以下地址,仅支持使用免登 GPT-3.5:
130 |
131 | ```
132 | http://127.0.0.1:5005
133 | ```
134 |
135 | 2. 使用 API ,支持传入 AccessToken 或 RefreshToken,可用 GPT-4, GPT-4o, GPTs:
136 |
137 | ```bash
138 | curl --location 'http://127.0.0.1:5005/v1/chat/completions' \
139 | --header 'Content-Type: application/json' \
140 | --header 'Authorization: Bearer {{OpenAI APIKEY}}' \
141 | --data '{
142 | "model": "gpt-3.5-turbo",
143 | "messages": [{"role": "user", "content": "Say this is a test!"}],
144 | "stream": true
145 | }'
146 | ```
147 |
148 | 将你账号的 `AccessToken` 或 `RefreshToken` 当作 `OpenAI APIKEY` 传入。
149 |
150 | 如果有team账号,可以传入 `ChatGPT-Account-ID`,使用 Team 工作区:
151 |
152 | - 传入方式一:
153 | `headers` 中传入 `ChatGPT-Account-ID`值
154 |
155 | - 传入方式二:
156 | `Authorization: Bearer ,`
157 |
158 | 如果设置了 `AUTHORIZATION` 环境变量,可以将设置的值当作 `OpenAI APIKEY` 传入进行多 Tokens 轮询。
159 |
160 | > - `AccessToken` 获取: chatgpt官网登录后,再打开 [https://chatgpt.com/api/auth/session](https://chatgpt.com/api/auth/session) 获取 `accessToken` 这个值。
161 | > - `RefreshToken` 获取: 此处不提供获取方法。
162 | > - 免登录 gpt-3.5 无需传入 Token。
163 |
164 | ## ArkoseToken
165 |
166 | > #### 目前支持外部服务提供 ArkoseToken
167 | >
168 | > #### 推荐使用 docker-compose 方式部署,已内置 Arkose 服务
169 |
170 | 1. 设置环境变量 `ARKOSE_TOKEN_URL`
171 |
172 | 2. 在需要 `ArkoseToken` 的时候,`chat2api` 会向 `ARKOSE_TOKEN_URL` 发送 `POST` 请求
173 |
174 | 3. 请按照以下格式提供外部服务:
175 |
176 | - 请求体:
177 |
178 | ```json
179 | {"blob": "rFYaxQNEApDlx/Db.KyrE79pAAFBs70CYtbM4pMNUsc7jIkLGdiDs7vziHRGe78bqWXDo0AYyq2A10qIlcTt89lBYXJqCbONC/nD8C199pEZ/c9ocVKKtM27jZQ7fyOpWd9p5qjKeXT4xEGBFpoE3Re1DwdQeijYp7VMJQyw7RYN+IDB1QEx3aKSO6aTI+ivnhw9ztfn/p1SkvAyyOhur/ArF08WQ+rXQpxpttaSQlzMsIwlYbuUUuYE2f9JrQaYG7qip1DKvju111P6wTNy4QVlMXG32VrzaOWh4nmQ0lOcZ1DmN6u2aeJZotffHV2zOOQAqqnParidTbN+qFre2t77ZwBuGKGqLyT8LeOp02GdFwcyw0kkeX+L7vwYAzBpjA5ky0r0X+i8HpzWt8QCyWzEW9kHn9LLCTwg2MOumzjb66Ad4WDe+C1bAcOKuEyXiYh+a1cWZAOdzEuxEg90yCfI7DZR94BsoDR85gEC/Og88i098u5HV7hZZEOQ6J8fmi68FSyPkN7oLCmBsZCMAZqzapNP/MkeIMExrdw7Jf/PtMrZN4bwM56mWfyIJf5h/zXu8PUajVwE9Pj/M5VtB0spZg49JNeHExosVCAB0C0JW+T8vEIwoqiY4pRQ0lbMHTQZFpU2xURTgcgh+m6g1SEYR1FY3de1XnzfiTQq1RTNJPydj5xpt6r6okr8yIJdRhmVXlQI+pS7vi3+Lls2hnpr7L+l1mcUIMPZNBCs3AUFJNpp6SwQjZkPvKggg1p+uS6PdvKRizM9O9+FKc103AhuSia8KTrvU8tWhBhCzIHCD4LNfnkjuBWSdbDttva4AEXUoPuKkQCWaBzq4lQPUIHFOM9HmNe738vVkNdAuOYffxDNegcpIxLVgZGfbgLQ="}
180 | ```
181 |
182 | - 响应体:
183 |
184 | ```json
185 | {"token": "45017c7bb17115f36.7290869304|r=ap-southeast-1|meta=3|metabgclr=transparent|metaiconclr=%23757575|guitextcolor=%23000000|pk=0A1D34FC-659D-4E23-B17B-694DCFCF6A6C|at=40|sup=1|rid=3|ag=101|cdn_url=https%3A%2F%2Ftcr9i.openai.com%2Fcdn%2Ffc|lurl=https%3A%2F%2Faudio-ap-southeast-1.arkoselabs.com|surl=https%3A%2F%2Ftcr9i.openai.com|smurl=https%3A%2F%2Ftcr9i.openai.com%2Fcdn%2Ffc%2Fassets%2Fstyle-manager"}
186 | ```
187 |
188 | ## 常见问题
189 |
190 | > - 错误代码:
191 | > - `401`:当前 IP 不支持免登录,请尝试更换 IP 地址,或者在环境变量 `PROXY_URL` 中设置代理,或者你的身份验证失败。
192 | > - `403`:请在日志中查看具体报错信息。
193 | > - `429`:当前 IP 请求1小时内请求超过限制,请稍后再试,或更换 IP。
194 | > - `500`:服务器内部错误,请求失败。
195 | > - `502`:服务器网关错误,或网络不可用,请尝试更换网络环境。
196 |
197 | > - 已知情况:
198 | > - 日本 IP 很多不支持免登,免登 GPT-3.5 建议使用美国 IP。
199 | > - 99%的账号都支持免费 `GPT-4o` ,但根据 IP 地区开启,目前日本和新加坡 IP 已知开启概率较大。
200 |
201 | > - 环境变量 `AUTHORIZATION` 是什么?
202 | > - 是一个自己给 chat2api 设置的一个身份验证,设置后才可使用已保存的 Tokens 轮询,请求时当作 `APIKEY` 传入。
203 | > - AccessToken 如何获取?
204 | > - chatgpt官网登录后,再打开 [https://chatgpt.com/api/auth/session](https://chatgpt.com/api/auth/session) 获取 `accessToken` 这个值。
205 | > - PLUS 账号报错 `403`?
206 | > - PLUS 账号需要配置 `ArkoseToken`,请根据上文进行配置。
207 | > - ArkoseToken 是什么,怎么获取?
208 | > - 请参考上文的说明,更多请参考 [https://www.arkoselabs.com/](https://www.arkoselabs.com/)
209 |
210 | ## 赞助商
211 |
212 | [](https://capsolver.com/?utm_source=github&utm_medium=github_banner&utm_campaign=chat2api)
213 |
214 | ## License
215 |
216 | MIT License
217 |
218 |
--------------------------------------------------------------------------------
/chatgpt/turnstile.py:
--------------------------------------------------------------------------------
1 | import pybase64
2 | import json
3 | import random
4 | import time
5 | from typing import Any, Callable, Dict, List, Union
6 |
7 |
8 | class OrderedMap:
9 | def __init__(self):
10 | self.keys = []
11 | self.values = {}
12 |
13 | def add(self, key: str, value: Any):
14 | if key not in self.values:
15 | self.keys.append(key)
16 | self.values[key] = value
17 |
18 | def to_json(self):
19 | return json.dumps({k: self.values[k] for k in self.keys})
20 |
21 |
22 | TurnTokenList = List[List[Any]]
23 | FloatMap = Dict[float, Any]
24 | StringMap = Dict[str, Any]
25 | FuncType = Callable[..., Any]
26 |
27 |
28 | def get_turnstile_token(dx: str, p: str) -> Union[str, None]:
29 | try:
30 | decoded_bytes = pybase64.b64decode(dx)
31 | return process_turnstile_token(decoded_bytes.decode(), p)
32 | except Exception as e:
33 | print(f"Error in get_turnstile_token: {e}")
34 | return None
35 |
36 |
37 | def process_turnstile_token(dx: str, p: str) -> str:
38 | result = []
39 | p_length = len(p)
40 | if p_length != 0:
41 | for i, r in enumerate(dx):
42 | result.append(chr(ord(r) ^ ord(p[i % p_length])))
43 | else:
44 | result = list(dx)
45 | return ''.join(result)
46 |
47 |
48 | def is_slice(input_val: Any) -> bool:
49 | return isinstance(input_val, (list, tuple))
50 |
51 |
52 | def is_float(input_val: Any) -> bool:
53 | return isinstance(input_val, float)
54 |
55 |
56 | def is_string(input_val: Any) -> bool:
57 | return isinstance(input_val, str)
58 |
59 |
60 | def to_str(input_val: Any) -> str:
61 | if input_val is None:
62 | return "undefined"
63 | elif is_float(input_val):
64 | return str(input_val)
65 | elif is_string(input_val):
66 | special_cases = {
67 | "window.Math": "[object Math]",
68 | "window.Reflect": "[object Reflect]",
69 | "window.performance": "[object Performance]",
70 | "window.localStorage": "[object Storage]",
71 | "window.Object": "function Object() { [native code] }",
72 | "window.Reflect.set": "function set() { [native code] }",
73 | "window.performance.now": "function () { [native code] }",
74 | "window.Object.create": "function create() { [native code] }",
75 | "window.Object.keys": "function keys() { [native code] }",
76 | "window.Math.random": "function random() { [native code] }"
77 | }
78 | return special_cases.get(input_val, input_val)
79 | elif isinstance(input_val, list) and all(isinstance(item, str) for item in input_val):
80 | return ','.join(input_val)
81 | else:
82 | return str(input_val)
83 |
84 |
85 | def get_func_map() -> FloatMap:
86 | process_map: FloatMap = {}
87 |
88 | def func_1(e: float, t: float):
89 | e_str = to_str(process_map[e])
90 | t_str = to_str(process_map[t])
91 | res = process_turnstile_token(e_str, t_str)
92 | process_map[e] = res
93 |
94 | def func_2(e: float, t: Any):
95 | process_map[e] = t
96 |
97 | def func_5(e: float, t: float):
98 | n = process_map[e]
99 | tres = process_map[t]
100 | if is_slice(n):
101 | nt = n + [tres]
102 | process_map[e] = nt
103 | else:
104 | if is_string(n) or is_string(tres):
105 | res = to_str(n) + to_str(tres)
106 | elif is_float(n) and is_float(tres):
107 | res = n + tres
108 | else:
109 | res = "NaN"
110 | process_map[e] = res
111 |
112 | def func_6(e: float, t: float, n: float):
113 | tv = process_map[t]
114 | nv = process_map[n]
115 | if is_string(tv) and is_string(nv):
116 | res = f"{tv}.{nv}"
117 | if res == "window.document.location":
118 | process_map[e] = "https://chatgpt.com/"
119 | else:
120 | process_map[e] = res
121 | else:
122 | print("func type 6 error")
123 |
124 | def func_24(e: float, t: float, n: float):
125 | tv = process_map[t]
126 | nv = process_map[n]
127 | if is_string(tv) and is_string(nv):
128 | process_map[e] = f"{tv}.{nv}"
129 | else:
130 | print("func type 24 error")
131 |
132 | def func_7(e: float, *args):
133 | n = [process_map[arg] for arg in args]
134 | ev = process_map[e]
135 | if isinstance(ev, str):
136 | if ev == "window.Reflect.set":
137 | obj = n[0]
138 | key_str = str(n[1])
139 | val = n[2]
140 | obj.add(key_str, val)
141 | elif callable(ev):
142 | ev(*n)
143 |
144 | def func_17(e: float, t: float, *args):
145 | i = [process_map[arg] for arg in args]
146 | tv = process_map[t]
147 | res = None
148 | if isinstance(tv, str):
149 | if tv == "window.performance.now":
150 | current_time = time.time_ns()
151 | elapsed_ns = current_time - int(start_time * 1e9)
152 | res = (elapsed_ns + random.random()) / 1e6
153 | elif tv == "window.Object.create":
154 | res = OrderedMap()
155 | elif tv == "window.Object.keys":
156 | if isinstance(i[0], str) and i[0] == "window.localStorage":
157 | res = ["STATSIG_LOCAL_STORAGE_INTERNAL_STORE_V4", "STATSIG_LOCAL_STORAGE_STABLE_ID",
158 | "client-correlated-secret", "oai/apps/capExpiresAt", "oai-did",
159 | "STATSIG_LOCAL_STORAGE_LOGGING_REQUEST", "UiState.isNavigationCollapsed.1"]
160 | elif tv == "window.Math.random":
161 | res = random.random()
162 | elif callable(tv):
163 | res = tv(*i)
164 | process_map[e] = res
165 |
166 | def func_8(e: float, t: float):
167 | process_map[e] = process_map[t]
168 |
169 | def func_14(e: float, t: float):
170 | tv = process_map[t]
171 | if is_string(tv):
172 | token_list = json.loads(tv)
173 | process_map[e] = token_list
174 | else:
175 | print("func type 14 error")
176 |
177 | def func_15(e: float, t: float):
178 | tv = process_map[t]
179 | process_map[e] = json.dumps(tv)
180 |
181 | def func_18(e: float):
182 | ev = process_map[e]
183 | e_str = to_str(ev)
184 | decoded = pybase64.b64decode(e_str).decode()
185 | process_map[e] = decoded
186 |
187 | def func_19(e: float):
188 | ev = process_map[e]
189 | e_str = to_str(ev)
190 | encoded = pybase64.b64encode(e_str.encode()).decode()
191 | process_map[e] = encoded
192 |
193 | def func_20(e: float, t: float, n: float, *args):
194 | o = [process_map[arg] for arg in args]
195 | ev = process_map[e]
196 | tv = process_map[t]
197 | if ev == tv:
198 | nv = process_map[n]
199 | if callable(nv):
200 | nv(*o)
201 | else:
202 | print("func type 20 error")
203 |
204 | def func_21(*args):
205 | pass
206 |
207 | def func_23(e: float, t: float, *args):
208 | i = list(args)
209 | ev = process_map[e]
210 | tv = process_map[t]
211 | if ev is not None:
212 | if callable(tv):
213 | tv(*i)
214 |
215 | process_map.update({
216 | 1: func_1, 2: func_2, 5: func_5, 6: func_6, 24: func_24, 7: func_7,
217 | 17: func_17, 8: func_8, 10: "window", 14: func_14, 15: func_15,
218 | 18: func_18, 19: func_19, 20: func_20, 21: func_21, 23: func_23
219 | })
220 |
221 | return process_map
222 |
223 | start_time = 0
224 |
225 |
226 | def process_turnstile(dx: str, p: str) -> str:
227 | global start_time
228 | start_time = time.time()
229 | tokens = get_turnstile_token(dx, p)
230 | if tokens is None:
231 | return ""
232 |
233 | token_list = json.loads(tokens)
234 | # print(token_list)
235 | res = ""
236 | process_map = get_func_map()
237 |
238 | def func_3(e: str):
239 | nonlocal res
240 | res = pybase64.b64encode(e.encode()).decode()
241 |
242 | process_map[3] = func_3
243 | process_map[9] = token_list
244 | process_map[16] = p
245 |
246 | for token in token_list:
247 | try:
248 | e = token[0]
249 | t = token[1:]
250 | f = process_map.get(e)
251 | if callable(f):
252 | f(*t)
253 | else:
254 | pass
255 | # print(f"Warning: No function found for key {e}")
256 | except Exception as exc:
257 | pass
258 | # print(f"Error processing token {token}: {exc}")
259 |
260 | return res
261 |
262 |
263 | if __name__ == "__main__":
264 | result = process_turnstile(
265 | "PBp5bWF1cHlLe1ttQhRfaTdmXEpidGdEYU5JdGJpR3xfHFVuGHVEY0tZVG18Vh54RWJ5CXpxKXl3SUZ7b2FZAWJaTBl6RGQZURh8BndUcRlQVgoYalAca2QUX24ffQZgdVVbbmBrAH9FV08Rb2oVVgBeQVRrWFp5VGZMYWNyMnoSN0FpaQgFT1l1f3h7c1RtcQUqY1kZbFJ5BQRiZEJXS3RvHGtieh9PaBlHaXhVWnVLRUlKdwsdbUtbKGFaAlN4a0V/emUJe2J2dl9BZkAxZWU/WGocRUBnc3VyT3F4WkJmYSthdBIGf0RwQ2FjAUBnd3ZEelgbVUEIDAJjS1VZbU9sSWFjfk55J2lZFV0HWX1cbVV5dWdAfkFIAVQVbloUXQtYaAR+VXhUF1BZdG4CBHRyK21AG1JaHhBFaBwCWUlocyQGVT4NBzNON2ASFVtXeQRET1kARndjUEBDT2RKeQN7RmJjeVtvZGpDeWJ1EHxafVd+Wk1AbzdLVTpafkd9dWZKeARecGJrS0xcenZIEEJQOmcFa01menFOeVRiSGFZC1JnWUA0SU08QGgeDFFgY34YWXAdZHYaHRhANFRMOV0CZmBfVExTWh9lZlVpSnx6eQURb2poa2RkQVJ0cmF0bwJbQgB6RlRbQHRQaQFKBHtENwVDSWpgHAlbTU1hXEpwdBh2eBlNY3l2UEhnblx7AmpaQ08JDDAzJUVAbn5IA2d8XX5ZFVlrYWhSXWlYQlEdZlQ/QUwuYwJgTG5GZghSRHdCYk1CWWBjclp0aWo3TWMSQmFaaAdge05FbmFhH3hxCFZuIX1BY01WVW5ABx5jfG1ZbjcZEiwwPFYQVm0sdHV8Xnl7alRuemgKZUwICklweW1heHR5Q3UqYVoSR3BCaldIc3Z8SmJOS212CAY5AmMkYmMaRn5UXEthZFsHYFx7ZHRnYV5tcFBZeHocQxUXXU0bYk0VFUZ0ZgFrSWcMRksCAwdJEBBncF12fGUVdnFNQnl4ZQB9WUclYGMRe04TQUZMf0FEbEthW357HEN2aVhAdHAMH0NPdWFicm1YbzNRBSkWMDUAOVdXbBlfRz51ah54YG5iVX9sR2t6RF1pR1RGU20MABBWQy55T3dQfmlUfmFrA35gY2AdDiBWMWVlP1hqHEVAZ3NzfE9/c1pCZWErYXQSB2BKcENjew1baXB9Rm1aG1VBCAkJY01aWW1NbklgZH5Oek1rTX9FFEB7RHNGEG9pKH1eRgFSZGJJdkcMQHUSY0IRQRkzUmFgBG90cklvVwNZThIHQXYABjFJaApCWh1qUEhnWVpiBHxDRDlAHg8kFVcCY1dCUk8VRm9obEN9e21EdnluWxN7eWt8RnFOekRTRXZKXkNPWH40YGMRXHwfRHZ7Z1JKS2R9XG1XR09qCGlaZmZ/QXwnfloWTQxIflxbSVNdSUZgHBRLKCwpQwwmXzB2NFRMOVxUTFNfH3BoRVhfWkcBYghVaSh0ZWMFeG9qBWp5eENNeGNldncHR0wBezVPTjdlSGcOTndjVkAUVl99YQFkRUE2YlNKe3ppeml2V2lvYkhGHjtbNHIALywsMScPEjEFO3Q1MQ0UGDYvK148ETYxIzEcD0gzchNcLSs+LAJxJiEQKBd5MCsXCRclFA0gBRg3axk1HTkBGyoUPRhwCwI2OAIRB2gUBRcjATt6ORQ9JDANOHFlEQITIC8VOS4GAC49GDscBBQMNQ4hDQtQZHYMHmk3BRFHeHZvcXNvd01+WXxPFF9pN2ZaSmR3Z0RkQkl7YmlHbzMsSS8HEy4PPggxGAAYBBcuJREBEQA7LAMANgEiNiZgFR5Mchs0eH83ERFsGCceZTESe2MeEgQSGwgXIgIbb38FFBAWEC1GFC42OQ0CCwcudSIpOwY6MRw7IjwYAgAYD3UbOA8AaHoHPiUkBgQmTA4FUxgAOCoJKxNmVSoANDIzAjdlDxA6ISIOKhQDEhwLPS82IT4CUFIsOyIwLD4+BBsDAww1AnMqHAIlMiMTGT0oAQlUE3QDQhIUACMxDwhGLxEXHQsSIV0FLgMaAgJ2LgsEHyEPLBcKOBtfUhg9MiAXPT5fHhA1Wg8+BxoPLgYcGS0WRSsELjIZKg8EJw4lFQAoUCcTcxASLS9BOTsZD3ERGRUhOD1YUjJxWBEBdnc9PwkQNytyED0zAQtaG3Y2ACsWXSsoPV4+DBQ2DyQ+bg0MHxVHKhAqNh8QPVkNET5fAis5Jh0uGxACKA8kOyo6IBkHIgkKdx0sAgA8SAQVHCkCLwcoBnQHGRAeAxAXOQAdKxhrNxMLJQYrKwAxHnFcOA4HIlEEAVkVDigqAwMoORQQKFkaOy0pISMoRmYDPyFLCRIqVhwCImITET04Gx8QPTMWWRQDcgstAioLGSkBTjw7ECYLeSgraxFoazw2CQcrJgU1cQ0fAB4YEykpIQMEPgJ0NUY0Lhc8IBEEWQtyNSkeECEmHitRFhsULgUrASkfO3E6XDsqLTAVcg8pFCwUaT8rPiMALzskFQQNJBkfKgUxBwscAj4YWhYHDxoXEBRwHgUUMx4gCxsCGBRJAz5yABsCAxIPFSo2AQILLSs7NS4EAGEnFBANJBgTOV0FLWJSKAUQeRkDKyAjCjYqIwEUBwAUPT5iBgohDzYmBAEBJS4pCSspGgUQBDsuD3wvKFd7HwE/EQ8ZFQgRICYEAgUuRhovHFYdM15eNwIgZBgmBVIoJGBnACRXChIKQR8lDVh2CicfKTIBcxwzNionIg4PEVI0FyMQOTkaABI3JSoAByVTKAItJn1ULjcEOG4gBjoqDnAQDjsGHzA2cF92CTIlAhMdchoJABA6KQEyajcgBAM+IhwyE292OTQ0IzUsAVY8EBcxMRxoKgEhBRQSGTMLfQsgFDp1PDQsCgEFKAkIASA8EhF4IgpjIzMJJC4WcyYcEQkPPSMBHlUSfFkuPCQnKiMaAGYWEC80EQIeex9wJjszCSQMFg4iDDcvVxMEBR17Knw0OnMVRyc4fj9ROQpiABoWFxAscR0Na3gBHWdyPjcOBCMleBQgKR4rLQViBhcLGnEgDDZ4ACoPJhQQIH4nHBoDNhkWCyUWDRgVFx4YAwAzFjAELCUPNScjDQ4hDB54Gwg4K2g3BmMBKjkwGggiFAo0Iwp6BBQeDxYwBz4VKCIzeDQmJjYeXTUmHCZpcygrAQt3NAFrBjsmGhtWJz8uUiR3CjorPy4NJXUuOjYIBDoMDGM4MwxxNiMNGg4SES01GHA1O3EIOSo7LQUXHnEeOgIjPXENLjQSfn4OVSkSAgcFBQIxDQUuajUPOj0MFwwcZhMnVzQOCQMDAWBWZBUPPx4oBAA5YA5qBwcrEwQ+IjppEz47Ji4CE2YNKTEzAUcjBgAoFFwyKHwbCz8pARUrDgIIMgg1H2MXGTUBFx0XAgMdEj0HOQ4MIionOyE2cUcxHAA7Iw0sNTkBDUU9GRsbPgkzOBwNKD9hHBdVJipxVTYRAgMmGAIVKxc2JREoNxgtMysDHggNExYWBh8FHwUfBQ8/KQYONiUrLjkfIwpxHDgYCTw1MDEMMBU2JRErK2crDzZdCy94UjAOC00MMgFCKTJxZw8mdgoSCzQMcAtzDC8hMBw7CHJ/GjQ+Cw4aDAVyMTMwEi8gHhUfNB8sDi4hWTQ0GDdJdSEVNggXAhY7Knd3MQ4KGhoZDm11DysqLxI8NXYZCXMDMngaMQg5PSsYKjYxJRJzdx8jOzQlIwklEwgtDhEMdwskLAs3Izg7LQscJi4IeyE3GiAbDAYrHzEzEjcxKicAdSteCTMqJHsUMSEXMT0kJD4Ga3V2Kk4rMSUZHS8qMAsqHTsEPR8RXzArXzc2OgYQOy4oPXc1AQM+DhpuMDFRFTMrBn8pCQkCdCE/MDILKG8uGllRNRlGRy0NGjsyFGoTKSUsOiwkAi8sNRJUNgQ0czEuFgUNMShjBAsBDDErbywzKBoKKzkeOncPDR42HCskNGg7BjEMVgAvOyApLQ5WPgAVHiM+Jz8eOA8BOSI7Xwo4JGIJNjYdCz0MFmAuPhEbLzc3VjUQAGwoHjATcSAGdwUVCjIqMDA1OyQNUB5gGRw6UwpkNS0eECoqbCt2KzQEdD1jBzEZOxQdIjBoMxVqCyoEBToSDB5xPz44LA9MCDAKMAZhLgZZACwMKAYDPWgHODIGHiwMIDUpZ2YEMA04By8INQl3ClQLLC8wCDIIXG8/PSARMDYQLxQyeh8qFTg7MhhUDzkLKwNzDT8RPQ84JC0dDTAqGDA7KxkoKDAcPzh1KQo9LzkeN3YMIxc4HzsBNxorAj0jQX90CCMlPQ4FMTYPfDgwDA0sMyoJHyw6EigMCwULUBsDcnsAdQUAKRAMFBIqLQwCGCkLLmoOJQIEOSU/JQ0JFQgmDx02LwgrIjMLHQQ9DCw+cgoRJREWZAQkCyoyNgskJip0JDg5cy1BXXIzJAl3GCQCdggwZXEbBmcPNAwwCAV9fAkGDDUUBhBmKTgyKAo0KRklcRc/IxY5KQ8SACIKEgg4FVUuDx0FUVoiK3IuEiQEGQkkYToJDhcPJhVTfA8zMiMhFgxnAystCycgLTweB1A0GAMuACIBVEUKHSYiCR0UJA0ENQsRBwUPCgEpMCcvGyUKdxcvH3U5OAwRegMnCiE1IxYiOgsGEGoOAhg/DxJ9IggHCzESCgMsJgJ9awodFDksDRAyCyA1NwodDCwJOFcWCw0yNwokfTUKLwt3IwolIwwocTcbRRAeCwoMHiUZOWkeCRclHihWMyVVcTcfVQEkJjAyMyReOT0jEFwMC1UPPyMwATQnO1oxHz8DNSIoAScYMBMtDi8iFgwgHwwKMAxnDjsXDQooCx4YHSY4JQYYPgQ0Cz0PVkQEEQYqKCIWPTELLBsxElgUMBcENhMKPQQRbyQVRhJdREdUW0tUYB4MX2BjeAU8bxEfZUVYW1VHTF5OSQV/f1xBMU5Jamd7QX9fbWd4H3p1ZhNuYmRFVHRyZHRnBltCCnxGV1YxeEQcDUp3ZlJAFFhafWEKFUlQQ25cOW9iHm90Yk5teXpaSGdhXHsBYStPTR1fdG5wHUIAZ0ZuZWVTeFQVWWliaFxSGFRQOARhQlRVQFVpBmBObEZmAUlKdU9gW0VFbHJkXW0Ffko6cmVTfEx3CXdvV1x+eWMDE2h1IXlJZ0J1VkNKe1cGBnZkcE1gdFJbbXdsWntMECo=",
266 | "gAAAAACWzMwMzIsIlRodSBKdWwgMTEgMjAyNCAwMzoxMDo0NiBHTVQrMDgwMCAo5Lit5Zu95qCH5YeG5pe26Ze0KSIsNDI5NDcwNTE1MiwxLCJNb3ppbGxhLzUuMCAoV2luZG93cyBOVCAxMC4wOyBXaW42NDsgeDY0KSBBcHBsZVdlYktpdC81MzcuMzYgKEtIVE1MLCBsaWtlIEdlY2tvKSBDaHJvbWUvMTI2LjAuMC4wIFNhZmFyaS81MzcuMzYgRWRnLzEyNi4wLjAuMCIsImh0dHBzOi8vY2RuLm9haXN0YXRpYy5jb20vX25leHQvc3RhdGljL2NodW5rcy9wYWdlcy9fYXBwLWMwOWZmNWY0MjQwMjcwZjguanMiLCJjL1pGWGkxeTNpMnpaS0EzSVQwNzRzMy9fIiwiemgtQ04iLCJ6aC1DTixlbixlbi1HQixlbi1VUyIsMTM1LCJ3ZWJraXRUZW1wb3JhcnlTdG9yYWdl4oiSW29iamVjdCBEZXByZWNhdGVkU3RvcmFnZVF1b3RhXSIsIl9yZWFjdExpc3RlbmluZ3NxZjF0ejFzNmsiLCJmZXRjaCIsMzY1NCwiNWU1NDUzNzItMzcyNy00ZDAyLTkwMDYtMzMwMDRjMWJmYTQ2Il0="
267 | )
268 | print(result)
269 |
--------------------------------------------------------------------------------
/chatgpt/proofofWork.py:
--------------------------------------------------------------------------------
1 | import hashlib
2 | import json
3 | import random
4 | import time
5 | import uuid
6 | from datetime import datetime, timedelta, timezone
7 | from html.parser import HTMLParser
8 |
9 | import pybase64
10 |
11 | from utils.Logger import logger
12 | from utils.config import conversation_only
13 |
14 | cores = [16, 24, 32]
15 | screens = [3000, 4000, 6000]
16 | timeLayout = "%a %b %d %Y %H:%M:%S"
17 |
18 | cached_scripts = []
19 | cached_dpl = ""
20 | cached_time = 0
21 | cached_require_proof = ""
22 |
23 | navigator_key = [
24 | "registerProtocolHandler−function registerProtocolHandler() { [native code] }",
25 | "storage−[object StorageManager]",
26 | "locks−[object LockManager]",
27 | "appCodeName−Mozilla",
28 | "permissions−[object Permissions]",
29 | "appVersion−5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
30 | "share−function share() { [native code] }",
31 | "webdriver−false",
32 | "managed−[object NavigatorManagedData]",
33 | "canShare−function canShare() { [native code] }",
34 | "vendor−Google Inc.",
35 | "vendor−Google Inc.",
36 | "mediaDevices−[object MediaDevices]",
37 | "vibrate−function vibrate() { [native code] }",
38 | "storageBuckets−[object StorageBucketManager]",
39 | "mediaCapabilities−[object MediaCapabilities]",
40 | "getGamepads−function getGamepads() { [native code] }",
41 | "bluetooth−[object Bluetooth]",
42 | "share−function share() { [native code] }",
43 | "cookieEnabled−true",
44 | "virtualKeyboard−[object VirtualKeyboard]",
45 | "product−Gecko",
46 | "mediaDevices−[object MediaDevices]",
47 | "canShare−function canShare() { [native code] }",
48 | "getGamepads−function getGamepads() { [native code] }",
49 | "product−Gecko",
50 | "xr−[object XRSystem]",
51 | "clipboard−[object Clipboard]",
52 | "storageBuckets−[object StorageBucketManager]",
53 | "unregisterProtocolHandler−function unregisterProtocolHandler() { [native code] }",
54 | "productSub−20030107",
55 | "login−[object NavigatorLogin]",
56 | "vendorSub−",
57 | "login−[object NavigatorLogin]",
58 | "userAgent−Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
59 | "getInstalledRelatedApps−function getInstalledRelatedApps() { [native code] }",
60 | "userAgent−Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
61 | "mediaDevices−[object MediaDevices]",
62 | "locks−[object LockManager]",
63 | "webkitGetUserMedia−function webkitGetUserMedia() { [native code] }",
64 | "vendor−Google Inc.",
65 | "xr−[object XRSystem]",
66 | "mediaDevices−[object MediaDevices]",
67 | "virtualKeyboard−[object VirtualKeyboard]",
68 | "userAgent−Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
69 | "virtualKeyboard−[object VirtualKeyboard]",
70 | "appName−Netscape",
71 | "storageBuckets−[object StorageBucketManager]",
72 | "presentation−[object Presentation]",
73 | "onLine−true",
74 | "mimeTypes−[object MimeTypeArray]",
75 | "credentials−[object CredentialsContainer]",
76 | "presentation−[object Presentation]",
77 | "getGamepads−function getGamepads() { [native code] }",
78 | "vendorSub−",
79 | "virtualKeyboard−[object VirtualKeyboard]",
80 | "serviceWorker−[object ServiceWorkerContainer]",
81 | "xr−[object XRSystem]",
82 | "product−Gecko",
83 | "keyboard−[object Keyboard]",
84 | "gpu−[object GPU]",
85 | "getInstalledRelatedApps−function getInstalledRelatedApps() { [native code] }",
86 | "webkitPersistentStorage−[object DeprecatedStorageQuota]",
87 | "doNotTrack",
88 | "clearAppBadge−function clearAppBadge() { [native code] }",
89 | "presentation−[object Presentation]",
90 | "serial−[object Serial]",
91 | "locks−[object LockManager]",
92 | "requestMIDIAccess−function requestMIDIAccess() { [native code] }",
93 | "locks−[object LockManager]",
94 | "requestMediaKeySystemAccess−function requestMediaKeySystemAccess() { [native code] }",
95 | "vendor−Google Inc.",
96 | "pdfViewerEnabled−true",
97 | "language−zh-CN",
98 | "setAppBadge−function setAppBadge() { [native code] }",
99 | "geolocation−[object Geolocation]",
100 | "userAgentData−[object NavigatorUAData]",
101 | "mediaCapabilities−[object MediaCapabilities]",
102 | "requestMIDIAccess−function requestMIDIAccess() { [native code] }",
103 | "getUserMedia−function getUserMedia() { [native code] }",
104 | "mediaDevices−[object MediaDevices]",
105 | "webkitPersistentStorage−[object DeprecatedStorageQuota]",
106 | "userAgent−Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
107 | "sendBeacon−function sendBeacon() { [native code] }",
108 | "hardwareConcurrency−32",
109 | "appVersion−5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
110 | "credentials−[object CredentialsContainer]",
111 | "storage−[object StorageManager]",
112 | "cookieEnabled−true",
113 | "pdfViewerEnabled−true",
114 | "windowControlsOverlay−[object WindowControlsOverlay]",
115 | "scheduling−[object Scheduling]",
116 | "pdfViewerEnabled−true",
117 | "hardwareConcurrency−32",
118 | "xr−[object XRSystem]",
119 | "userAgent−Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
120 | "webdriver−false",
121 | "getInstalledRelatedApps−function getInstalledRelatedApps() { [native code] }",
122 | "getInstalledRelatedApps−function getInstalledRelatedApps() { [native code] }",
123 | "bluetooth−[object Bluetooth]"
124 | ]
125 | document_key = ['_reactListeningo743lnnpvdg', 'location']
126 | window_key = [
127 | "0",
128 | "window",
129 | "self",
130 | "document",
131 | "name",
132 | "location",
133 | "customElements",
134 | "history",
135 | "navigation",
136 | "locationbar",
137 | "menubar",
138 | "personalbar",
139 | "scrollbars",
140 | "statusbar",
141 | "toolbar",
142 | "status",
143 | "closed",
144 | "frames",
145 | "length",
146 | "top",
147 | "opener",
148 | "parent",
149 | "frameElement",
150 | "navigator",
151 | "origin",
152 | "external",
153 | "screen",
154 | "innerWidth",
155 | "innerHeight",
156 | "scrollX",
157 | "pageXOffset",
158 | "scrollY",
159 | "pageYOffset",
160 | "visualViewport",
161 | "screenX",
162 | "screenY",
163 | "outerWidth",
164 | "outerHeight",
165 | "devicePixelRatio",
166 | "clientInformation",
167 | "screenLeft",
168 | "screenTop",
169 | "styleMedia",
170 | "onsearch",
171 | "isSecureContext",
172 | "trustedTypes",
173 | "performance",
174 | "onappinstalled",
175 | "onbeforeinstallprompt",
176 | "crypto",
177 | "indexedDB",
178 | "sessionStorage",
179 | "localStorage",
180 | "onbeforexrselect",
181 | "onabort",
182 | "onbeforeinput",
183 | "onbeforematch",
184 | "onbeforetoggle",
185 | "onblur",
186 | "oncancel",
187 | "oncanplay",
188 | "oncanplaythrough",
189 | "onchange",
190 | "onclick",
191 | "onclose",
192 | "oncontentvisibilityautostatechange",
193 | "oncontextlost",
194 | "oncontextmenu",
195 | "oncontextrestored",
196 | "oncuechange",
197 | "ondblclick",
198 | "ondrag",
199 | "ondragend",
200 | "ondragenter",
201 | "ondragleave",
202 | "ondragover",
203 | "ondragstart",
204 | "ondrop",
205 | "ondurationchange",
206 | "onemptied",
207 | "onended",
208 | "onerror",
209 | "onfocus",
210 | "onformdata",
211 | "oninput",
212 | "oninvalid",
213 | "onkeydown",
214 | "onkeypress",
215 | "onkeyup",
216 | "onload",
217 | "onloadeddata",
218 | "onloadedmetadata",
219 | "onloadstart",
220 | "onmousedown",
221 | "onmouseenter",
222 | "onmouseleave",
223 | "onmousemove",
224 | "onmouseout",
225 | "onmouseover",
226 | "onmouseup",
227 | "onmousewheel",
228 | "onpause",
229 | "onplay",
230 | "onplaying",
231 | "onprogress",
232 | "onratechange",
233 | "onreset",
234 | "onresize",
235 | "onscroll",
236 | "onsecuritypolicyviolation",
237 | "onseeked",
238 | "onseeking",
239 | "onselect",
240 | "onslotchange",
241 | "onstalled",
242 | "onsubmit",
243 | "onsuspend",
244 | "ontimeupdate",
245 | "ontoggle",
246 | "onvolumechange",
247 | "onwaiting",
248 | "onwebkitanimationend",
249 | "onwebkitanimationiteration",
250 | "onwebkitanimationstart",
251 | "onwebkittransitionend",
252 | "onwheel",
253 | "onauxclick",
254 | "ongotpointercapture",
255 | "onlostpointercapture",
256 | "onpointerdown",
257 | "onpointermove",
258 | "onpointerrawupdate",
259 | "onpointerup",
260 | "onpointercancel",
261 | "onpointerover",
262 | "onpointerout",
263 | "onpointerenter",
264 | "onpointerleave",
265 | "onselectstart",
266 | "onselectionchange",
267 | "onanimationend",
268 | "onanimationiteration",
269 | "onanimationstart",
270 | "ontransitionrun",
271 | "ontransitionstart",
272 | "ontransitionend",
273 | "ontransitioncancel",
274 | "onafterprint",
275 | "onbeforeprint",
276 | "onbeforeunload",
277 | "onhashchange",
278 | "onlanguagechange",
279 | "onmessage",
280 | "onmessageerror",
281 | "onoffline",
282 | "ononline",
283 | "onpagehide",
284 | "onpageshow",
285 | "onpopstate",
286 | "onrejectionhandled",
287 | "onstorage",
288 | "onunhandledrejection",
289 | "onunload",
290 | "crossOriginIsolated",
291 | "scheduler",
292 | "alert",
293 | "atob",
294 | "blur",
295 | "btoa",
296 | "cancelAnimationFrame",
297 | "cancelIdleCallback",
298 | "captureEvents",
299 | "clearInterval",
300 | "clearTimeout",
301 | "close",
302 | "confirm",
303 | "createImageBitmap",
304 | "fetch",
305 | "find",
306 | "focus",
307 | "getComputedStyle",
308 | "getSelection",
309 | "matchMedia",
310 | "moveBy",
311 | "moveTo",
312 | "open",
313 | "postMessage",
314 | "print",
315 | "prompt",
316 | "queueMicrotask",
317 | "releaseEvents",
318 | "reportError",
319 | "requestAnimationFrame",
320 | "requestIdleCallback",
321 | "resizeBy",
322 | "resizeTo",
323 | "scroll",
324 | "scrollBy",
325 | "scrollTo",
326 | "setInterval",
327 | "setTimeout",
328 | "stop",
329 | "structuredClone",
330 | "webkitCancelAnimationFrame",
331 | "webkitRequestAnimationFrame",
332 | "chrome",
333 | "caches",
334 | "cookieStore",
335 | "ondevicemotion",
336 | "ondeviceorientation",
337 | "ondeviceorientationabsolute",
338 | "launchQueue",
339 | "documentPictureInPicture",
340 | "getScreenDetails",
341 | "queryLocalFonts",
342 | "showDirectoryPicker",
343 | "showOpenFilePicker",
344 | "showSaveFilePicker",
345 | "originAgentCluster",
346 | "onpageswap",
347 | "onpagereveal",
348 | "credentialless",
349 | "speechSynthesis",
350 | "onscrollend",
351 | "webkitRequestFileSystem",
352 | "webkitResolveLocalFileSystemURL",
353 | "sendMsgToSolverCS",
354 | "webpackChunk_N_E",
355 | "__next_set_public_path__",
356 | "next",
357 | "__NEXT_DATA__",
358 | "__SSG_MANIFEST_CB",
359 | "__NEXT_P",
360 | "_N_E",
361 | "regeneratorRuntime",
362 | "__REACT_INTL_CONTEXT__",
363 | "DD_RUM",
364 | "_",
365 | "filterCSS",
366 | "filterXSS",
367 | "__SEGMENT_INSPECTOR__",
368 | "__NEXT_PRELOADREADY",
369 | "Intercom",
370 | "__MIDDLEWARE_MATCHERS",
371 | "__STATSIG_SDK__",
372 | "__STATSIG_JS_SDK__",
373 | "__STATSIG_RERENDER_OVERRIDE__",
374 | "_oaiHandleSessionExpired",
375 | "__BUILD_MANIFEST",
376 | "__SSG_MANIFEST",
377 | "__intercomAssignLocation",
378 | "__intercomReloadLocation"
379 | ]
380 |
381 |
382 | class ScriptSrcParser(HTMLParser):
383 | def handle_starttag(self, tag, attrs):
384 | global cached_scripts, cached_dpl, cached_time
385 | if tag == "script":
386 | attrs_dict = dict(attrs)
387 | if "src" in attrs_dict:
388 | src = attrs_dict["src"]
389 | if "dpl" in src:
390 | cached_scripts.append(src)
391 | cached_dpl = src[src.index("dpl"):]
392 | cached_time = int(time.time())
393 |
394 |
395 | async def get_dpl(service):
396 | global cached_scripts, cached_dpl, cached_time
397 | if int(time.time()) - cached_time < 15 * 60:
398 | return True
399 | headers = service.base_headers.copy()
400 | cached_scripts.clear()
401 | try:
402 | if conversation_only:
403 | return True
404 | r = await service.s.get(f"{service.host_url}/?oai-dm=1", headers=headers, timeout=5)
405 | r.raise_for_status()
406 | parser = ScriptSrcParser()
407 | parser.feed(r.text)
408 | if len(cached_scripts) == 0:
409 | raise Exception("No scripts found")
410 | else:
411 | return True
412 | except Exception:
413 | cached_scripts.append(
414 | "https://cdn.oaistatic.com/_next/static/cXh69klOLzS0Gy2joLDRS/_ssgManifest.js?dpl=453ebaec0d44c2decab71692e1bfe39be35a24b3")
415 | cached_dpl = "453ebaec0d44c2decab71692e1bfe39be35a24b3"
416 | cached_time = int(time.time())
417 | return False
418 |
419 |
420 | def get_parse_time():
421 | now = datetime.now(timezone(timedelta(hours=-5)))
422 | return now.strftime(timeLayout) + " GMT-0500 (Eastern Standard Time)"
423 |
424 |
425 | def get_config(user_agent):
426 | core = random.choice(cores)
427 | screen = random.choice(screens)
428 | config = [
429 | core + screen,
430 | get_parse_time(),
431 | 4294705152,
432 | 0,
433 | user_agent,
434 | random.choice(cached_scripts),
435 | cached_dpl,
436 | "en-US",
437 | "en-US,es-US,en,es",
438 | 0,
439 | random.choice(navigator_key),
440 | random.choice(document_key),
441 | random.choice(window_key),
442 | time.perf_counter(),
443 | str(uuid.uuid4()),
444 | ]
445 | return config
446 |
447 |
448 | def get_answer_token(seed, diff, config):
449 | start = time.time()
450 | answer, solved = generate_answer(seed, diff, config)
451 | end = time.time()
452 | logger.info(f'diff: {diff}, time: {int((end - start) * 1e6) / 1e3}ms, solved: {solved}')
453 | return "gAAAAAB" + answer, solved
454 |
455 |
456 | def generate_answer(seed, diff, config):
457 | diff_len = len(diff)
458 | seed_encoded = seed.encode()
459 | static_config_part1 = (json.dumps(config[:3], separators=(',', ':'), ensure_ascii=False)[:-1] + ',').encode()
460 | static_config_part2 = (',' + json.dumps(config[4:9], separators=(',', ':'), ensure_ascii=False)[1:-1] + ',').encode()
461 | static_config_part3 = (',' + json.dumps(config[10:], separators=(',', ':'), ensure_ascii=False)[1:]).encode()
462 |
463 | target_diff = bytes.fromhex(diff)
464 |
465 | for i in range(500000):
466 | dynamic_json_i = str(i).encode()
467 | dynamic_json_j = str(i >> 1).encode()
468 | final_json_bytes = static_config_part1 + dynamic_json_i + static_config_part2 + dynamic_json_j + static_config_part3
469 | base_encode = pybase64.b64encode(final_json_bytes)
470 | hash_value = hashlib.sha3_512(seed_encoded + base_encode).digest()
471 | if hash_value[:diff_len] <= target_diff:
472 | return base_encode.decode(), True
473 |
474 | return "wQ8Lk5FbGpA2NcR9dShT6gYjU7VxZ4D" + pybase64.b64encode(f'"{seed}"'.encode()).decode(), False
475 |
476 |
477 | def get_requirements_token(config):
478 | require, solved = generate_answer(format(random.random()), "0fffff", config)
479 | return 'gAAAAAC' + require
480 |
481 |
482 | if __name__ == "__main__":
483 | # cached_scripts.append(
484 | # "https://cdn.oaistatic.com/_next/static/cXh69klOLzS0Gy2joLDRS/_ssgManifest.js?dpl=453ebaec0d44c2decab71692e1bfe39be35a24b3")
485 | # cached_dpl = "453ebaec0d44c2decab71692e1bfe39be35a24b3"
486 | # cached_time = int(time.time())
487 | # for i in range(10):
488 | # seed = format(random.random())
489 | # diff = "000032"
490 | # config = get_config("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome")
491 | # answer = get_answer_token(seed, diff, config)
492 | cached_scripts.append(
493 | "https://cdn.oaistatic.com/_next/static/cXh69klOLzS0Gy2joLDRS/_ssgManifest.js?dpl=453ebaec0d44c2decab71692e1bfe39be35a24b3")
494 | cached_dpl = "dpl=453ebaec0d44c2decab71692e1bfe39be35a24b3"
495 | config = get_config("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36")
496 | get_requirements_token(config)
497 |
--------------------------------------------------------------------------------
/chatgpt/chatFormat.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import json
3 | import random
4 | import re
5 | import string
6 | import time
7 | import uuid
8 |
9 | import pybase64
10 | import websockets
11 | from fastapi import HTTPException
12 |
13 | from api.files import get_file_content
14 | from api.models import model_system_fingerprint
15 | from api.tokens import split_tokens_from_content, calculate_image_tokens, num_tokens_from_messages
16 | from utils.Logger import logger
17 |
18 | moderation_message = "I'm sorry, I cannot provide or engage in any content related to pornography, violence, or any unethical material. If you have any other questions or need assistance, please feel free to let me know. I'll do my best to provide support and assistance."
19 |
20 |
21 | async def format_not_stream_response(response, prompt_tokens, max_tokens, model):
22 | chat_id = f"chatcmpl-{''.join(random.choice(string.ascii_letters + string.digits) for _ in range(29))}"
23 | system_fingerprint_list = model_system_fingerprint.get(model, None)
24 | system_fingerprint = random.choice(system_fingerprint_list) if system_fingerprint_list else None
25 | created_time = int(time.time())
26 | all_text = ""
27 | async for chunk in response:
28 | try:
29 | if chunk.startswith("data: [DONE]"):
30 | break
31 | elif not chunk.startswith("data: "):
32 | continue
33 | else:
34 | chunk = json.loads(chunk[6:])
35 | if not chunk["choices"][0].get("delta"):
36 | continue
37 | all_text += chunk["choices"][0]["delta"]["content"]
38 | except Exception as e:
39 | logger.error(f"Error: {chunk}, error: {str(e)}")
40 | continue
41 | content, completion_tokens, finish_reason = await split_tokens_from_content(all_text, max_tokens, model)
42 | message = {
43 | "role": "assistant",
44 | "content": content,
45 | }
46 | usage = {
47 | "prompt_tokens": prompt_tokens,
48 | "completion_tokens": completion_tokens,
49 | "total_tokens": prompt_tokens + completion_tokens
50 | }
51 | if not message.get("content"):
52 | raise HTTPException(status_code=403, detail="No content in the message.")
53 | return {
54 | "id": chat_id,
55 | "object": "chat.completion",
56 | "created": created_time,
57 | "model": model,
58 | "choices": [
59 | {
60 | "index": 0,
61 | "message": message,
62 | "logprobs": None,
63 | "finish_reason": finish_reason
64 | }
65 | ],
66 | "usage": usage,
67 | "system_fingerprint": system_fingerprint
68 | }
69 |
70 |
71 | async def wss_stream_response(websocket, conversation_id):
72 | while not websocket.closed:
73 | try:
74 | message = await asyncio.wait_for(websocket.recv(), timeout=10)
75 | if message:
76 | resultObj = json.loads(message)
77 | sequenceId = resultObj.get("sequenceId", None)
78 | if not sequenceId:
79 | continue
80 | data = resultObj.get("data", {})
81 | if conversation_id != data.get("conversation_id", ""):
82 | continue
83 | sequenceId = resultObj.get('sequenceId')
84 | if sequenceId and sequenceId % 80 == 0:
85 | await websocket.send(
86 | json.dumps(
87 | {"type": "sequenceAck", "sequenceId": sequenceId}
88 | )
89 | )
90 | decoded_bytes = pybase64.b64decode(data.get("body", None))
91 | yield decoded_bytes
92 | else:
93 | print("No message received within the specified time.")
94 | except asyncio.TimeoutError:
95 | logger.error("Timeout! No message received within the specified time.")
96 | break
97 | except websockets.ConnectionClosed as e:
98 | if e.code == 1000:
99 | logger.error("WebSocket closed normally with code 1000 (OK)")
100 | yield b"data: [DONE]\n\n"
101 | else:
102 | logger.error(f"WebSocket closed with error code {e.code}")
103 | except Exception as e:
104 | logger.error(f"Error: {str(e)}")
105 | continue
106 |
107 |
108 | async def stream_response(service, response, model, max_tokens):
109 | chat_id = f"chatcmpl-{''.join(random.choice(string.ascii_letters + string.digits) for _ in range(29))}"
110 | system_fingerprint_list = model_system_fingerprint.get(model, None)
111 | system_fingerprint = random.choice(system_fingerprint_list) if system_fingerprint_list else None
112 | created_time = int(time.time())
113 | completion_tokens = -1
114 | len_last_content = 0
115 | len_last_citation = 0
116 | last_message_id = None
117 | last_content_type = None
118 | last_recipient = None
119 | start = False
120 | end = False
121 |
122 | async for chunk in response:
123 | chunk = chunk.decode("utf-8")
124 | # chunk = 'data: {"message": null, "conversation_id": "38b8bfcf-9912-45db-a48e-b62fb585c855", "error": "Our systems have detected unusual activity coming from your system. Please try again later."}'
125 | if end:
126 | yield "data: [DONE]\n\n"
127 | break
128 | try:
129 | if chunk.startswith("data: {"):
130 | chunk_old_data = json.loads(chunk[6:])
131 | finish_reason = None
132 | message = chunk_old_data.get("message", {})
133 | role = message.get('author', {}).get('role')
134 | if role == 'user' or role == 'system':
135 | continue
136 |
137 | status = message.get("status")
138 | if start:
139 | pass
140 | elif status == "in_progress":
141 | start = True
142 | else:
143 | continue
144 |
145 | conversation_id = chunk_old_data.get("conversation_id")
146 | message_id = message.get("id")
147 | content = message.get("content", {})
148 | recipient = message.get("recipient", "")
149 |
150 | if not message and chunk_old_data.get("type") == "moderation":
151 | delta = {"role": "assistant", "content": moderation_message}
152 | finish_reason = "stop"
153 | end = True
154 | elif status == "in_progress":
155 | outer_content_type = content.get("content_type")
156 | if outer_content_type == "text":
157 | part = content.get("parts", [])[0]
158 | if not part:
159 | new_text = ""
160 | else:
161 | if last_message_id and last_message_id != message_id:
162 | continue
163 | citation = message.get("metadata", {}).get("citations", [])
164 | if len(citation) > len_last_citation:
165 | inside_metadata = citation[-1].get("metadata", {})
166 | citation_title = inside_metadata.get("title", "")
167 | citation_url = inside_metadata.get("url", "")
168 | new_text = f' **[[""]]({citation_url} "{citation_title}")** '
169 | len_last_citation = len(citation)
170 | else:
171 | new_text = part[len_last_content:]
172 | len_last_content = len(part)
173 | else:
174 | text = content.get("text", "")
175 | if outer_content_type == "code" and last_content_type != "code":
176 | new_text = "\n```" + recipient + "\n" + text[len_last_content:]
177 | elif outer_content_type == "execution_output" and last_content_type != "execution_output":
178 | new_text = "\n```" + "Output" + "\n" + text[len_last_content:]
179 | else:
180 | new_text = text[len_last_content:]
181 | len_last_content = len(text)
182 | if last_content_type == "code" and outer_content_type != "code":
183 | new_text = "\n```\n" + new_text
184 | elif last_content_type == "execution_output" and outer_content_type != "execution_output":
185 | new_text = "\n```\n" + new_text
186 | if recipient == "dalle.text2im" and last_recipient != "dalle.text2im":
187 | new_text = "\n```" + "json" + "\n" + new_text
188 | delta = {"content": new_text}
189 | last_content_type = outer_content_type
190 | last_recipient = recipient
191 | if completion_tokens >= max_tokens:
192 | delta = {}
193 | finish_reason = "length"
194 | end = True
195 | elif status == "finished_successfully":
196 | if content.get("content_type") == "multimodal_text":
197 | parts = content.get("parts", [])
198 | delta = {}
199 | for part in parts:
200 | if isinstance(part, str):
201 | continue
202 | inner_content_type = part.get('content_type')
203 | if inner_content_type == "image_asset_pointer":
204 | last_content_type = "image_asset_pointer"
205 | file_id = part.get('asset_pointer').replace('file-service://', '')
206 | logger.debug(f"file_id: {file_id}")
207 | image_download_url = await service.get_download_url(file_id)
208 | logger.debug(f"image_download_url: {image_download_url}")
209 | if image_download_url:
210 | delta = {"content": f"\n```\n\n"}
211 | else:
212 | delta = {"content": f"\n```\nFailed to load the image.\n"}
213 | elif message.get("end_turn"):
214 | part = content.get("parts", [])[0]
215 | new_text = part[len_last_content:]
216 | if not new_text:
217 | matches = re.findall(r'\(sandbox:(.*?)\)', part)
218 | if matches:
219 | file_url_content = ""
220 | for i, sandbox_path in enumerate(matches):
221 | file_download_url = await service.get_response_file_url(conversation_id, message_id, sandbox_path)
222 | if file_download_url:
223 | file_url_content += f"\n```\n\n"
224 | delta = {"content": file_url_content}
225 | else:
226 | delta = {}
227 | else:
228 | delta = {"content": new_text}
229 | finish_reason = "stop"
230 | end = True
231 | else:
232 | last_message_id = None
233 | len_last_content = 0
234 | continue
235 | else:
236 | continue
237 | last_message_id = message_id
238 | if not end and not delta.get("content"):
239 | delta = {"role": "assistant", "content": ""}
240 | chunk_new_data = {
241 | "id": chat_id,
242 | "object": "chat.completion.chunk",
243 | "created": created_time,
244 | "model": model,
245 | "choices": [
246 | {
247 | "index": 0,
248 | "delta": delta,
249 | "logprobs": None,
250 | "finish_reason": finish_reason
251 | }
252 | ],
253 | "system_fingerprint": system_fingerprint
254 | }
255 | if not service.history_disabled:
256 | chunk_new_data.update({
257 | "message_id": message_id,
258 | "conversation_id": conversation_id,
259 | })
260 | completion_tokens += 1
261 | yield f"data: {json.dumps(chunk_new_data)}\n\n"
262 | elif chunk.startswith("data: [DONE]"):
263 | yield "data: [DONE]\n\n"
264 | else:
265 | continue
266 | except Exception as e:
267 | if chunk.startswith("data: "):
268 | chunk_data = json.loads(chunk[6:])
269 | if chunk_data.get("error"):
270 | logger.error(f"Error: {chunk_data.get('error')}")
271 | yield "data: [DONE]\n\n"
272 | break
273 | logger.error(f"Error: {chunk}, details: {str(e)}")
274 | continue
275 |
276 |
277 | def get_url_from_content(content):
278 | if isinstance(content, str) and content.startswith('http'):
279 | try:
280 | url = re.match(
281 | r'(?i)\b((?:[a-z][\w-]+:(?:/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))',
282 | content.split(' ')[0])[0]
283 | content = content.replace(url, '').strip()
284 | return url, content
285 | except Exception:
286 | return None, content
287 | return None, content
288 |
289 |
290 | def format_messages_with_url(content):
291 | url_list = []
292 | while True:
293 | url, content = get_url_from_content(content)
294 | if url:
295 | url_list.append(url)
296 | logger.info(f"Found a file_url from messages: {url}")
297 | else:
298 | break
299 | if not url_list:
300 | return content
301 | new_content = [
302 | {
303 | "type": "text",
304 | "text": content
305 | }
306 | ]
307 | for url in url_list:
308 | new_content.append({
309 | "type": "image_url",
310 | "image_url": {
311 | "url": url
312 | }
313 | })
314 | return new_content
315 |
316 |
317 | async def api_messages_to_chat(service, api_messages, upload_by_url=False):
318 | file_tokens = 0
319 | chat_messages = []
320 | for api_message in api_messages:
321 | role = api_message.get('role')
322 | content = api_message.get('content')
323 | if upload_by_url:
324 | if isinstance(content, str):
325 | content = format_messages_with_url(content)
326 | if isinstance(content, list):
327 | parts = []
328 | attachments = []
329 | content_type = "multimodal_text"
330 | for i in content:
331 | if i.get("type") == "text":
332 | parts.append(i.get("text"))
333 | elif i.get("type") == "image_url":
334 | image_url = i.get("image_url")
335 | url = image_url.get("url")
336 | detail = image_url.get("detail", "auto")
337 | file_content, mime_type = await get_file_content(url)
338 | file_meta = await service.upload_file(file_content, mime_type)
339 | if file_meta:
340 | file_id = file_meta["file_id"]
341 | file_size = file_meta["size_bytes"]
342 | file_name = file_meta["file_name"]
343 | mime_type = file_meta["mime_type"]
344 | use_case = file_meta["use_case"]
345 | if mime_type.startswith("image/"):
346 | width, height = file_meta["width"], file_meta["height"]
347 | file_tokens += await calculate_image_tokens(width, height, detail)
348 | parts.append({
349 | "content_type": "image_asset_pointer",
350 | "asset_pointer": f"file-service://{file_id}",
351 | "size_bytes": file_size,
352 | "width": width,
353 | "height": height
354 | })
355 | attachments.append({
356 | "id": file_id,
357 | "size": file_size,
358 | "name": file_name,
359 | "mime_type": mime_type,
360 | "width": width,
361 | "height": height
362 | })
363 | else:
364 | if not use_case == "ace_upload":
365 | await service.check_upload(file_id)
366 | file_tokens += file_size // 1000
367 | attachments.append({
368 | "id": file_id,
369 | "size": file_size,
370 | "name": file_name,
371 | "mime_type": mime_type,
372 | })
373 | metadata = {
374 | "attachments": attachments
375 | }
376 | else:
377 | content_type = "text"
378 | parts = [content]
379 | metadata = {}
380 | chat_message = {
381 | "id": f"{uuid.uuid4()}",
382 | "author": {"role": role},
383 | "content": {"content_type": content_type, "parts": parts},
384 | "metadata": metadata
385 | }
386 | chat_messages.append(chat_message)
387 | text_tokens = await num_tokens_from_messages(api_messages, service.resp_model)
388 | prompt_tokens = text_tokens + file_tokens
389 | return chat_messages, prompt_tokens
390 |
--------------------------------------------------------------------------------
/chatgpt/ChatService.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import json
3 | import random
4 | import types
5 | import uuid
6 |
7 | import websockets
8 | from fastapi import HTTPException
9 | from starlette.concurrency import run_in_threadpool
10 |
11 | from api.files import get_image_size, get_file_extension, determine_file_use_case
12 | from api.models import model_proxy
13 | from chatgpt.authorization import get_req_token, verify_token
14 | from chatgpt.chatFormat import api_messages_to_chat, stream_response, wss_stream_response, format_not_stream_response
15 | from chatgpt.chatLimit import check_is_limit, handle_request_limit
16 | from chatgpt.proofofWork import get_config, get_dpl, get_answer_token, get_requirements_token
17 | from chatgpt.turnstile import process_turnstile
18 | from chatgpt.wssClient import token2wss, set_wss
19 | from utils.Client import Client
20 | from utils.Logger import logger
21 | from utils.config import proxy_url_list, chatgpt_base_url_list, arkose_token_url_list, history_disabled, pow_difficulty, \
22 | conversation_only, enable_limit, upload_by_url, check_model, auth_key, user_agents_list
23 |
24 |
25 | class ChatService:
26 | def __init__(self, origin_token=None):
27 | self.user_agent = random.choice(user_agents_list) if user_agents_list else "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
28 | self.req_token = get_req_token(origin_token)
29 | self.chat_token = "gAAAAAB"
30 | self.s = None
31 | self.ws = None
32 |
33 | async def set_dynamic_data(self, data):
34 | if self.req_token:
35 | logger.info(f"Request token: {self.req_token}")
36 | req_len = len(self.req_token.split(","))
37 | if req_len == 1:
38 | self.access_token = await verify_token(self.req_token)
39 | self.account_id = None
40 | else:
41 | self.access_token = await verify_token(self.req_token.split(",")[0])
42 | self.account_id = self.req_token.split(",")[1]
43 | else:
44 | logger.info("Request token is empty, use no-auth 3.5")
45 | self.access_token = None
46 | self.account_id = None
47 |
48 | self.data = data
49 | await self.set_model()
50 | if enable_limit and self.req_token:
51 | limit_response = await handle_request_limit(self.req_token, self.req_model)
52 | if limit_response:
53 | raise HTTPException(status_code=429, detail=limit_response)
54 |
55 | self.account_id = self.data.get('Chatgpt-Account-Id', self.account_id)
56 | self.parent_message_id = self.data.get('parent_message_id')
57 | self.conversation_id = self.data.get('conversation_id')
58 | self.history_disabled = self.data.get('history_disabled', history_disabled)
59 |
60 | self.api_messages = self.data.get("messages", [])
61 | self.prompt_tokens = 0
62 | self.max_tokens = self.data.get("max_tokens", 2147483647)
63 | if not isinstance(self.max_tokens, int):
64 | self.max_tokens = 2147483647
65 |
66 | self.proxy_url = random.choice(proxy_url_list) if proxy_url_list else None
67 | self.host_url = random.choice(chatgpt_base_url_list) if chatgpt_base_url_list else "https://chatgpt.com"
68 | self.arkose_token_url = random.choice(arkose_token_url_list) if arkose_token_url_list else None
69 |
70 | self.s = Client(proxy=self.proxy_url)
71 | self.ws = None
72 | if conversation_only:
73 | self.wss_mode = False
74 | self.wss_url = None
75 | else:
76 | self.wss_mode, self.wss_url = await token2wss(self.req_token)
77 |
78 | self.oai_device_id = str(uuid.uuid4())
79 | self.persona = None
80 | self.arkose_token = None
81 | self.proof_token = None
82 | self.turnstile_token = None
83 |
84 | self.chat_headers = None
85 | self.chat_request = None
86 |
87 | self.base_headers = {
88 | 'Accept': '*/*',
89 | 'Accept-Encoding': 'gzip, deflate, br, zstd',
90 | 'Accept-Language': 'en-US,en;q=0.9',
91 | 'Content-Type': 'application/json',
92 | 'Oai-Device-Id': self.oai_device_id,
93 | 'Oai-Language': 'en-US',
94 | 'Origin': self.host_url,
95 | 'Priority': 'u=1, i',
96 | 'Referer': f'{self.host_url}/',
97 | 'Sec-Ch-Ua': '"Chromium";v="124", "Microsoft Edge";v="124", "Not-A.Brand";v="99"',
98 | 'Sec-Ch-Ua-Mobile': '?0',
99 | 'Sec-Ch-Ua-Platform': '"Windows"',
100 | 'Sec-Fetch-Dest': 'empty',
101 | 'Sec-Fetch-Mode': 'cors',
102 | 'Sec-Fetch-Site': 'same-origin',
103 | 'User-Agent': self.user_agent
104 | }
105 | if self.access_token:
106 | self.base_url = self.host_url + "/backend-api"
107 | self.base_headers['Authorization'] = f'Bearer {self.access_token}'
108 | if self.account_id:
109 | self.base_headers['Chatgpt-Account-Id'] = self.account_id
110 | else:
111 | self.base_url = self.host_url + "/backend-anon"
112 |
113 | if auth_key:
114 | self.base_headers['authkey'] = auth_key
115 |
116 | await get_dpl(self)
117 | self.s.session.cookies.set("__Secure-next-auth.callback-url", "https%3A%2F%2Fchatgpt.com;",
118 | domain=self.host_url.split("://")[1], secure=True)
119 |
120 | async def set_model(self):
121 | self.origin_model = self.data.get("model", "gpt-3.5-turbo-0125")
122 | self.resp_model = model_proxy.get(self.origin_model, self.origin_model)
123 | if "gpt-4o-mini" in self.origin_model:
124 | self.req_model = "gpt-4o-mini"
125 | elif "gpt-4o" in self.origin_model:
126 | self.req_model = "gpt-4o"
127 | elif "gpt-4-mobile" in self.origin_model:
128 | self.req_model = "gpt-4-mobile"
129 | elif "gpt-4-gizmo" in self.origin_model:
130 | self.req_model = "gpt-4o"
131 | elif "gpt-4" in self.origin_model:
132 | self.req_model = "gpt-4"
133 | else:
134 | self.req_model = "text-davinci-002-render-sha"
135 |
136 | async def get_wss_url(self):
137 | url = f'{self.base_url}/register-websocket'
138 | headers = self.base_headers.copy()
139 | r = await self.s.post(url, headers=headers, data='', timeout=5)
140 | try:
141 | if r.status_code == 200:
142 | resp = r.json()
143 | logger.info(f'register-websocket response:{resp}')
144 | wss_url = resp.get('wss_url')
145 | return wss_url
146 | raise Exception(r.text)
147 | except Exception as e:
148 | logger.error(f"get_wss_url error: {str(e)}")
149 | raise HTTPException(status_code=r.status_code, detail=f"Failed to get wss url: {str(e)}")
150 |
151 | async def get_chat_requirements(self):
152 | if conversation_only:
153 | return None
154 | url = f'{self.base_url}/sentinel/chat-requirements'
155 | headers = self.base_headers.copy()
156 | try:
157 | config = get_config(self.user_agent)
158 | p = get_requirements_token(config)
159 | data = {'p': p}
160 | r = await self.s.post(url, headers=headers, json=data, timeout=5)
161 | if r.status_code == 200:
162 | resp = r.json()
163 |
164 | if check_model:
165 | r = await self.s.get(f'{self.base_url}/models', headers=headers, timeout=5)
166 | if r.status_code == 200:
167 | models = r.json().get('models')
168 | if not any(self.req_model in model.get("slug", "") for model in models):
169 | logger.error(f"Model {self.req_model} not support.")
170 | raise HTTPException(status_code=404, detail={
171 | "message": f"The model `{self.origin_model}` does not exist or you do not have access to it.",
172 | "type": "invalid_request_error",
173 | "param": None,
174 | "code": "model_not_found"
175 | })
176 | else:
177 | raise HTTPException(status_code=404, detail="Failed to get models")
178 | else:
179 | self.persona = resp.get("persona")
180 | if self.persona != "chatgpt-paid":
181 | if self.req_model == "gpt-4":
182 | logger.error(f"Model {self.resp_model} not support for {self.persona}")
183 | raise HTTPException(status_code=404, detail={
184 | "message": f"The model `{self.origin_model}` does not exist or you do not have access to it.",
185 | "type": "invalid_request_error",
186 | "param": None,
187 | "code": "model_not_found"
188 | })
189 |
190 | turnstile = resp.get('turnstile', {})
191 | turnstile_required = turnstile.get('required')
192 | if turnstile_required:
193 | turnstile_dx = turnstile.get("dx")
194 | try:
195 | self.turnstile_token = process_turnstile(turnstile_dx, p)
196 | except Exception as e:
197 | logger.info(f"Turnstile ignored: {e}")
198 | # raise HTTPException(status_code=403, detail="Turnstile required")
199 |
200 | arkose = resp.get('arkose', {})
201 | arkose_required = arkose.get('required')
202 | if arkose_required and self.persona != "chatgpt-freeaccount":
203 | # logger.info("Arkose required: ignore")
204 | if not self.arkose_token_url:
205 | raise HTTPException(status_code=403, detail="Arkose service required")
206 | arkose_dx = arkose.get("dx")
207 | arkose_client = Client()
208 | try:
209 | r2 = await arkose_client.post(
210 | url=self.arkose_token_url,
211 | json={"blob": arkose_dx},
212 | timeout=15
213 | )
214 | r2esp = r2.json()
215 | logger.info(f"arkose_token: {r2esp}")
216 | self.arkose_token = r2esp.get('token')
217 | if not self.arkose_token:
218 | raise HTTPException(status_code=403, detail="Failed to get Arkose token")
219 | except Exception:
220 | raise HTTPException(status_code=403, detail="Failed to get Arkose token")
221 | finally:
222 | await arkose_client.close()
223 |
224 | proofofwork = resp.get('proofofwork', {})
225 | proofofwork_required = proofofwork.get('required')
226 | if proofofwork_required:
227 | proofofwork_diff = proofofwork.get("difficulty")
228 | if proofofwork_diff <= pow_difficulty:
229 | raise HTTPException(status_code=403,
230 | detail=f"Proof of work difficulty too high: {proofofwork_diff}")
231 | proofofwork_seed = proofofwork.get("seed")
232 | self.proof_token, solved = await run_in_threadpool(get_answer_token, proofofwork_seed,
233 | proofofwork_diff, config)
234 | if not solved:
235 | raise HTTPException(status_code=403, detail="Failed to solve proof of work")
236 |
237 | self.chat_token = resp.get('token')
238 | if not self.chat_token:
239 | raise HTTPException(status_code=403, detail=f"Failed to get chat token: {r.text}")
240 | return self.chat_token
241 | else:
242 | if "application/json" == r.headers.get("Content-Type", ""):
243 | detail = r.json().get("detail", r.json())
244 | else:
245 | detail = r.text
246 | if "cf-please-wait" in detail:
247 | raise HTTPException(status_code=r.status_code, detail="cf-please-wait")
248 | if r.status_code == 429:
249 | raise HTTPException(status_code=r.status_code, detail="rate-limit")
250 | raise HTTPException(status_code=r.status_code, detail=detail)
251 | except HTTPException as e:
252 | raise HTTPException(status_code=e.status_code, detail=e.detail)
253 | except Exception as e:
254 | raise HTTPException(status_code=500, detail=str(e))
255 |
256 | async def prepare_send_conversation(self):
257 | try:
258 | chat_messages, self.prompt_tokens = await api_messages_to_chat(self, self.api_messages, upload_by_url)
259 | except Exception as e:
260 | logger.error(f"Failed to format messages: {str(e)}")
261 | raise HTTPException(status_code=400, detail="Failed to format messages.")
262 | self.chat_headers = self.base_headers.copy()
263 | self.chat_headers.update({
264 | 'Accept': 'text/event-stream',
265 | 'Openai-Sentinel-Chat-Requirements-Token': self.chat_token,
266 | 'Openai-Sentinel-Proof-Token': self.proof_token,
267 | })
268 | if self.arkose_token:
269 | self.chat_headers['Openai-Sentinel-Arkose-Token'] = self.arkose_token
270 |
271 | if self.turnstile_token:
272 | self.chat_headers['Openai-Sentinel-Turnstile-Token'] = self.turnstile_token
273 |
274 | if conversation_only:
275 | self.chat_headers.pop('Openai-Sentinel-Chat-Requirements-Token', None)
276 | self.chat_headers.pop('Openai-Sentinel-Proof-Token', None)
277 | self.chat_headers.pop('Openai-Sentinel-Arkose-Token', None)
278 | self.chat_headers.pop('Openai-Sentinel-Turnstile-Token', None)
279 |
280 | if "gpt-4-gizmo" in self.origin_model:
281 | gizmo_id = self.origin_model.split("gpt-4-gizmo-")[-1]
282 | conversation_mode = {"kind": "gizmo_interaction", "gizmo_id": gizmo_id}
283 | else:
284 | conversation_mode = {"kind": "primary_assistant"}
285 |
286 | logger.info(f"Model mapping: {self.origin_model} -> {self.req_model}")
287 | self.chat_request = {
288 | "action": "next",
289 | "conversation_mode": conversation_mode,
290 | "force_nulligen": False,
291 | "force_paragen": False,
292 | "force_paragen_model_slug": "",
293 | "force_rate_limit": False,
294 | "force_use_sse": True,
295 | "history_and_training_disabled": self.history_disabled,
296 | "messages": chat_messages,
297 | "model": self.req_model,
298 | "parent_message_id": self.parent_message_id if self.parent_message_id else f"{uuid.uuid4()}",
299 | "reset_rate_limits": False,
300 | "suggestions": [],
301 | "timezone_offset_min": -480,
302 | "variant_purpose": "comparison_implicit",
303 | "websocket_request_id": f"{uuid.uuid4()}"
304 | }
305 | if self.conversation_id:
306 | self.chat_request['conversation_id'] = self.conversation_id
307 | return self.chat_request
308 |
309 | async def send_conversation(self):
310 | try:
311 | try:
312 | if self.wss_mode:
313 | if not self.wss_url:
314 | self.wss_url = await self.get_wss_url()
315 | self.ws = await websockets.connect(self.wss_url, ping_interval=None, subprotocols=["json.reliable.webpubsub.azure.v1"])
316 | except Exception as e:
317 | logger.error(f"Failed to connect to wss: {str(e)}", )
318 | raise HTTPException(status_code=502, detail="Failed to connect to wss")
319 | url = f'{self.base_url}/conversation'
320 | stream = self.data.get("stream", False)
321 | r = await self.s.post_stream(url, headers=self.chat_headers, json=self.chat_request, timeout=10,
322 | stream=True)
323 | if r.status_code != 200:
324 | rtext = await r.atext()
325 | if "application/json" == r.headers.get("Content-Type", ""):
326 | detail = json.loads(rtext).get("detail", json.loads(rtext))
327 | if r.status_code == 429:
328 | check_is_limit(detail, token=self.req_token, model=self.req_model)
329 | else:
330 | if "cf-please-wait" in rtext:
331 | logger.error(f"Failed to send conversation: cf-please-wait")
332 | raise HTTPException(status_code=r.status_code, detail="cf-please-wait")
333 | if r.status_code == 429:
334 | logger.error(f"Failed to send conversation: rate-limit")
335 | raise HTTPException(status_code=r.status_code, detail="rate-limit")
336 | detail = r.text[:100]
337 | logger.error(f"Failed to send conversation: {detail}")
338 | raise HTTPException(status_code=r.status_code, detail=detail)
339 |
340 | content_type = r.headers.get("Content-Type", "")
341 | if "text/event-stream" in content_type and stream:
342 | await set_wss(self.req_token, False)
343 | return stream_response(self, r.aiter_lines(), self.resp_model, self.max_tokens)
344 | elif "text/event-stream" in content_type and not stream:
345 | await set_wss(self.req_token, False)
346 | return await format_not_stream_response(
347 | stream_response(self, r.aiter_lines(), self.resp_model, self.max_tokens), self.prompt_tokens,
348 | self.max_tokens, self.resp_model)
349 | elif "application/json" in content_type:
350 | rtext = await r.atext()
351 | resp = json.loads(rtext)
352 | self.wss_url = resp.get('wss_url')
353 | conversation_id = resp.get('conversation_id')
354 | await set_wss(self.req_token, True, self.wss_url)
355 | logger.info(f"next wss_url: {self.wss_url}")
356 | if not self.ws:
357 | try:
358 | self.ws = await websockets.connect(self.wss_url, ping_interval=None, subprotocols=["json.reliable.webpubsub.azure.v1"])
359 | except Exception as e:
360 | logger.error(f"Failed to connect to wss: {str(e)}", )
361 | raise HTTPException(status_code=502, detail="Failed to connect to wss")
362 | wss_r = wss_stream_response(self.ws, conversation_id)
363 | try:
364 | if stream and isinstance(wss_r, types.AsyncGeneratorType):
365 | return stream_response(self, wss_r, self.resp_model, self.max_tokens)
366 | else:
367 | return await format_not_stream_response(
368 | stream_response(self, wss_r, self.resp_model, self.max_tokens), self.prompt_tokens,
369 | self.max_tokens, self.resp_model)
370 | finally:
371 | if not isinstance(wss_r, types.AsyncGeneratorType):
372 | await self.ws.close()
373 | else:
374 | raise HTTPException(status_code=r.status_code, detail="Unsupported Content-Type")
375 | except HTTPException as e:
376 | raise HTTPException(status_code=e.status_code, detail=e.detail)
377 | except Exception as e:
378 | raise HTTPException(status_code=500, detail=str(e))
379 |
380 | async def get_download_url(self, file_id):
381 | url = f"{self.base_url}/files/{file_id}/download"
382 | headers = self.base_headers.copy()
383 | try:
384 | r = await self.s.get(url, headers=headers, timeout=5)
385 | if r.status_code == 200:
386 | download_url = r.json().get('download_url')
387 | return download_url
388 | else:
389 | return ""
390 | except HTTPException:
391 | return ""
392 |
393 | async def get_download_url_from_upload(self, file_id):
394 | url = f"{self.base_url}/files/{file_id}/uploaded"
395 | headers = self.base_headers.copy()
396 | try:
397 | r = await self.s.post(url, headers=headers, json={}, timeout=5)
398 | if r.status_code == 200:
399 | download_url = r.json().get('download_url')
400 | return download_url
401 | else:
402 | return ""
403 | except HTTPException:
404 | return ""
405 |
406 | async def get_upload_url(self, file_name, file_size, use_case="multimodal"):
407 | url = f'{self.base_url}/files'
408 | headers = self.base_headers.copy()
409 | try:
410 | r = await self.s.post(url, headers=headers, json={
411 | "file_name": file_name,
412 | "file_size": file_size,
413 | "timezone_offset_min": -480,
414 | "use_case": use_case
415 | }, timeout=5)
416 | if r.status_code == 200:
417 | res = r.json()
418 | file_id = res.get('file_id')
419 | upload_url = res.get('upload_url')
420 | logger.info(f"file_id: {file_id}, upload_url: {upload_url}")
421 | return file_id, upload_url
422 | else:
423 | return "", ""
424 | except HTTPException:
425 | return "", ""
426 |
427 | async def upload(self, upload_url, file_content, mime_type):
428 | headers = self.base_headers.copy()
429 | headers.update({
430 | 'Accept': 'application/json, text/plain, */*',
431 | 'Content-Type': mime_type,
432 | 'X-Ms-Blob-Type': 'BlockBlob',
433 | 'X-Ms-Version': '2020-04-08'
434 | })
435 | headers.pop('Authorization', None)
436 | try:
437 | r = await self.s.put(upload_url, headers=headers, data=file_content)
438 | if r.status_code == 201:
439 | return True
440 | return False
441 | except Exception:
442 | return False
443 |
444 | async def upload_file(self, file_content, mime_type):
445 | if not file_content or not mime_type:
446 | return None
447 |
448 | width, height = None, None
449 | if mime_type.startswith("image/"):
450 | try:
451 | width, height = await get_image_size(file_content)
452 | except Exception as e:
453 | logger.error(f"Error image mime_type, change to text/plain: {e}")
454 | mime_type = 'text/plain'
455 | file_size = len(file_content)
456 | file_extension = await get_file_extension(mime_type)
457 | file_name = f"{uuid.uuid4()}{file_extension}"
458 | use_case = await determine_file_use_case(mime_type)
459 |
460 | file_id, upload_url = await self.get_upload_url(file_name, file_size, use_case)
461 | if file_id and upload_url:
462 | if await self.upload(upload_url, file_content, mime_type):
463 | download_url = await self.get_download_url_from_upload(file_id)
464 | if download_url:
465 | file_meta = {
466 | "file_id": file_id,
467 | "file_name": file_name,
468 | "size_bytes": file_size,
469 | "mime_type": mime_type,
470 | "width": width,
471 | "height": height,
472 | "use_case": use_case
473 | }
474 | logger.info(f"File_meta: {file_meta}")
475 | return file_meta
476 | else:
477 | logger.error("Failed to get download url")
478 | else:
479 | logger.error("Failed to upload file")
480 | else:
481 | logger.error("Failed to get upload url")
482 |
483 | async def check_upload(self, file_id):
484 | url = f'{self.base_url}/files/{file_id}'
485 | headers = self.base_headers.copy()
486 | try:
487 | for i in range(30):
488 | r = await self.s.get(url, headers=headers, timeout=5)
489 | if r.status_code == 200:
490 | res = r.json()
491 | retrieval_index_status = res.get('retrieval_index_status', '')
492 | if retrieval_index_status == "success":
493 | break
494 | await asyncio.sleep(1)
495 | return True
496 | except HTTPException:
497 | return False
498 |
499 | async def get_response_file_url(self, conversation_id, message_id, sandbox_path):
500 | try:
501 | url = f"{self.base_url}/conversation/{conversation_id}/interpreter/download"
502 | params = {
503 | "message_id": message_id,
504 | "sandbox_path": sandbox_path
505 | }
506 | headers = self.base_headers.copy()
507 | r = await self.s.get(url, headers=headers, params=params, timeout=10)
508 | if r.status_code != 200:
509 | return None
510 | else:
511 | return r.json().get("download_url")
512 | except Exception:
513 | logger.info("Failed to get response file url")
514 | return None
515 |
516 | async def close_client(self):
517 | if self.s:
518 | await self.s.close()
519 | if self.ws:
520 | await self.ws.close()
521 | del self.ws
522 |
--------------------------------------------------------------------------------