├── src └── pandora │ ├── py.typed │ ├── bots │ ├── __init__.py │ └── server.py │ ├── exts │ ├── __init__.py │ ├── config.py │ ├── hooks.py │ └── token.py │ ├── openai │ ├── __init__.py │ ├── token.py │ ├── utils.py │ ├── auth.py │ └── api.py │ ├── turbo │ ├── __init__.py │ ├── base.py │ └── chat.py │ ├── migrations │ ├── __init__.py │ ├── database.py │ ├── migrate.py │ ├── scripts │ │ └── 20230308_01_7ctOr.sql │ └── models.py │ ├── __init__.py │ ├── __main__.py │ ├── flask │ ├── static │ │ ├── favicon-16x16.png │ │ ├── favicon-32x32.png │ │ ├── _next │ │ │ └── static │ │ │ │ ├── olf4sv64FWIcQ_zCGl90t │ │ │ │ ├── _ssgManifest.js │ │ │ │ └── _buildManifest.js │ │ │ │ └── chunks │ │ │ │ ├── pages │ │ │ │ └── _error-786d27d84962122a.js │ │ │ │ ├── 68a27ff6-1185184b61bc22d0.js │ │ │ │ ├── bd26816a-981e1ddc27b37cc6.js │ │ │ │ ├── 949.1a6eb804b5e91f61.js │ │ │ │ ├── 554.9b8bfd0762461d74.js │ │ │ │ ├── webpack-c9a868e8e0796ec6.js │ │ │ │ ├── 174-bd28069f281ef76f.js │ │ │ │ ├── 762-222df1028c0c1555.js │ │ │ │ ├── 264-13e92c51b0315184.js │ │ │ │ └── 14-0cb0d20affbd720d.js │ │ ├── apple-touch-icon.png │ │ ├── fonts │ │ │ ├── Sohne-Buch.otf │ │ │ ├── Sohne-Halbfett.otf │ │ │ ├── SohneMono-Buch.otf │ │ │ ├── KaTeX_Main-Bold.woff │ │ │ ├── KaTeX_Fraktur-Bold.woff │ │ │ ├── KaTeX_Main-Italic.woff │ │ │ ├── KaTeX_Main-Regular.woff │ │ │ ├── KaTeX_Math-Italic.woff │ │ │ ├── Signifier-Regular.otf │ │ │ ├── SohneMono-Halbfett.otf │ │ │ ├── KaTeX_SansSerif-Bold.woff │ │ │ ├── KaTeX_Script-Regular.woff │ │ │ ├── KaTeX_Size1-Regular.woff │ │ │ ├── KaTeX_Size2-Regular.woff │ │ │ ├── KaTeX_Size3-Regular.woff │ │ │ ├── KaTeX_Size4-Regular.woff │ │ │ ├── KaTeX_Caligraphic-Bold.woff │ │ │ ├── KaTeX_Fraktur-Regular.woff │ │ │ ├── KaTeX_Main-BoldItalic.woff │ │ │ ├── KaTeX_Math-BoldItalic.woff │ │ │ ├── KaTeX_SansSerif-Italic.woff │ │ │ ├── KaTeX_SansSerif-Regular.woff │ │ │ ├── KaTeX_Caligraphic-Regular.woff │ │ │ └── KaTeX_Typewriter-Regular.woff │ │ └── images │ │ │ └── 2022 │ │ │ └── 11 │ │ │ └── ChatGPT.jpg │ └── templates │ │ └── chat.html │ ├── cloud_launcher.py │ └── launcher.py ├── requirements_api.txt ├── doc ├── images │ ├── s01.png │ ├── s02.png │ ├── s03.png │ ├── s04.png │ ├── s05.png │ ├── s06.png │ ├── s11.jpeg │ ├── s12.jpeg │ ├── t0.png │ ├── t1.1.png │ ├── t1.png │ ├── t2.png │ ├── t3.png │ ├── t4.png │ └── t7.png ├── HTTP-API.md ├── wiki_en.md ├── wiki.md └── fakeopen.md ├── .dockerignore ├── MANIFEST.in ├── requirements.txt ├── Dockerfile ├── .github └── workflows │ ├── python-publish.yml │ └── docker-publish.yml ├── bin └── startup.sh ├── .gitignore ├── setup.py ├── README.md └── LICENSE /src/pandora/py.typed: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/pandora/bots/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | -------------------------------------------------------------------------------- /src/pandora/exts/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | -------------------------------------------------------------------------------- /src/pandora/openai/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | -------------------------------------------------------------------------------- /src/pandora/turbo/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | -------------------------------------------------------------------------------- /src/pandora/migrations/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | -------------------------------------------------------------------------------- /src/pandora/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | __version__ = '1.3.4' 4 | -------------------------------------------------------------------------------- /requirements_api.txt: -------------------------------------------------------------------------------- 1 | pandora-tiktoken~=0.3.1 2 | sqlalchemy~=2.0.7 3 | yoyo-migrations~=8.2.0 -------------------------------------------------------------------------------- /doc/images/s01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/doc/images/s01.png -------------------------------------------------------------------------------- /doc/images/s02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/doc/images/s02.png -------------------------------------------------------------------------------- /doc/images/s03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/doc/images/s03.png -------------------------------------------------------------------------------- /doc/images/s04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/doc/images/s04.png -------------------------------------------------------------------------------- /doc/images/s05.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/doc/images/s05.png -------------------------------------------------------------------------------- /doc/images/s06.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/doc/images/s06.png -------------------------------------------------------------------------------- /doc/images/s11.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/doc/images/s11.jpeg -------------------------------------------------------------------------------- /doc/images/s12.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/doc/images/s12.jpeg -------------------------------------------------------------------------------- /doc/images/t0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/doc/images/t0.png -------------------------------------------------------------------------------- /doc/images/t1.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/doc/images/t1.1.png -------------------------------------------------------------------------------- /doc/images/t1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/doc/images/t1.png -------------------------------------------------------------------------------- /doc/images/t2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/doc/images/t2.png -------------------------------------------------------------------------------- /doc/images/t3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/doc/images/t3.png -------------------------------------------------------------------------------- /doc/images/t4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/doc/images/t4.png -------------------------------------------------------------------------------- /doc/images/t7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/doc/images/t7.png -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | 3 | .git/ 4 | .idea/ 5 | .run/ 6 | .github/ 7 | 8 | venv/ 9 | build/ 10 | dist/ 11 | *.egg-info/ 12 | -------------------------------------------------------------------------------- /src/pandora/__main__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from pandora import launcher 4 | 5 | if __name__ == '__main__': 6 | launcher.run() 7 | -------------------------------------------------------------------------------- /src/pandora/flask/static/favicon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/favicon-16x16.png -------------------------------------------------------------------------------- /src/pandora/flask/static/favicon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/favicon-32x32.png -------------------------------------------------------------------------------- /src/pandora/flask/static/_next/static/olf4sv64FWIcQ_zCGl90t/_ssgManifest.js: -------------------------------------------------------------------------------- 1 | self.__SSG_MANIFEST=new Set,self.__SSG_MANIFEST_CB&&self.__SSG_MANIFEST_CB(); -------------------------------------------------------------------------------- /src/pandora/flask/static/apple-touch-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/apple-touch-icon.png -------------------------------------------------------------------------------- /src/pandora/flask/static/fonts/Sohne-Buch.otf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/fonts/Sohne-Buch.otf -------------------------------------------------------------------------------- /src/pandora/flask/static/fonts/Sohne-Halbfett.otf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/fonts/Sohne-Halbfett.otf -------------------------------------------------------------------------------- /src/pandora/flask/static/fonts/SohneMono-Buch.otf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/fonts/SohneMono-Buch.otf -------------------------------------------------------------------------------- /src/pandora/flask/static/fonts/KaTeX_Main-Bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/fonts/KaTeX_Main-Bold.woff -------------------------------------------------------------------------------- /src/pandora/flask/static/fonts/KaTeX_Fraktur-Bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/fonts/KaTeX_Fraktur-Bold.woff -------------------------------------------------------------------------------- /src/pandora/flask/static/fonts/KaTeX_Main-Italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/fonts/KaTeX_Main-Italic.woff -------------------------------------------------------------------------------- /src/pandora/flask/static/fonts/KaTeX_Main-Regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/fonts/KaTeX_Main-Regular.woff -------------------------------------------------------------------------------- /src/pandora/flask/static/fonts/KaTeX_Math-Italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/fonts/KaTeX_Math-Italic.woff -------------------------------------------------------------------------------- /src/pandora/flask/static/fonts/Signifier-Regular.otf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/fonts/Signifier-Regular.otf -------------------------------------------------------------------------------- /src/pandora/flask/static/fonts/SohneMono-Halbfett.otf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/fonts/SohneMono-Halbfett.otf -------------------------------------------------------------------------------- /src/pandora/flask/static/fonts/KaTeX_SansSerif-Bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/fonts/KaTeX_SansSerif-Bold.woff -------------------------------------------------------------------------------- /src/pandora/flask/static/fonts/KaTeX_Script-Regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/fonts/KaTeX_Script-Regular.woff -------------------------------------------------------------------------------- /src/pandora/flask/static/fonts/KaTeX_Size1-Regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/fonts/KaTeX_Size1-Regular.woff -------------------------------------------------------------------------------- /src/pandora/flask/static/fonts/KaTeX_Size2-Regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/fonts/KaTeX_Size2-Regular.woff -------------------------------------------------------------------------------- /src/pandora/flask/static/fonts/KaTeX_Size3-Regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/fonts/KaTeX_Size3-Regular.woff -------------------------------------------------------------------------------- /src/pandora/flask/static/fonts/KaTeX_Size4-Regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/fonts/KaTeX_Size4-Regular.woff -------------------------------------------------------------------------------- /src/pandora/flask/static/fonts/KaTeX_Caligraphic-Bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/fonts/KaTeX_Caligraphic-Bold.woff -------------------------------------------------------------------------------- /src/pandora/flask/static/fonts/KaTeX_Fraktur-Regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/fonts/KaTeX_Fraktur-Regular.woff -------------------------------------------------------------------------------- /src/pandora/flask/static/fonts/KaTeX_Main-BoldItalic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/fonts/KaTeX_Main-BoldItalic.woff -------------------------------------------------------------------------------- /src/pandora/flask/static/fonts/KaTeX_Math-BoldItalic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/fonts/KaTeX_Math-BoldItalic.woff -------------------------------------------------------------------------------- /src/pandora/flask/static/fonts/KaTeX_SansSerif-Italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/fonts/KaTeX_SansSerif-Italic.woff -------------------------------------------------------------------------------- /src/pandora/flask/static/fonts/KaTeX_SansSerif-Regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/fonts/KaTeX_SansSerif-Regular.woff -------------------------------------------------------------------------------- /src/pandora/flask/static/fonts/KaTeX_Caligraphic-Regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/fonts/KaTeX_Caligraphic-Regular.woff -------------------------------------------------------------------------------- /src/pandora/flask/static/fonts/KaTeX_Typewriter-Regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/passcrad/Pandora-ChatGPT/HEAD/src/pandora/flask/static/fonts/KaTeX_Typewriter-Regular.woff -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include requirements.txt 2 | include requirements_api.txt 3 | include src/pandora/py.typed 4 | recursive-include src/pandora/flask * 5 | recursive-include src/pandora/migrations/scripts * 6 | -------------------------------------------------------------------------------- /src/pandora/flask/static/_next/static/chunks/pages/_error-786d27d84962122a.js: -------------------------------------------------------------------------------- 1 | (self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[820],{17917:function(n,_,u){(window.__NEXT_P=window.__NEXT_P||[]).push(["/_error",function(){return u(14902)}])}},function(n){n.O(0,[774,888,179],function(){return n(n.s=17917)}),_N_E=n.O()}]); -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | certifi 2 | pyreadline3 ~= 3.4.0; platform_system == 'Windows' 3 | httpx[socks]~=0.23.3 4 | requests[socks]~=2.28.2 5 | rich~=13.3.2 6 | appdirs~=1.4.4 7 | werkzeug~=2.2.3 8 | flask[async]~=2.2.3 9 | flask-cors~=3.0.10 10 | waitress~=2.1.2 11 | loguru~=0.6.0 12 | pyjwt[crypto]~=2.6.0 13 | pyperclip~=1.8.2 14 | -------------------------------------------------------------------------------- /src/pandora/migrations/database.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from sqlalchemy import create_engine 4 | from sqlalchemy.orm import sessionmaker 5 | 6 | from ..exts.config import DATABASE_URI 7 | 8 | engine = create_engine(DATABASE_URI, echo=False) 9 | 10 | Session = sessionmaker(bind=engine) 11 | 12 | session = Session() 13 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.9-slim@sha256:5f0192a4f58a6ce99f732fe05e3b3d00f12ae62e183886bca3ebe3d202686c7f 2 | 3 | MAINTAINER "zhile.io " 4 | 5 | VOLUME /data 6 | 7 | WORKDIR /opt/app 8 | 9 | ADD . . 10 | 11 | RUN pip --no-cache-dir install --upgrade pip && pip --no-cache-dir install .[api,cloud] 12 | 13 | ENTRYPOINT ["bin/startup.sh"] 14 | -------------------------------------------------------------------------------- /src/pandora/flask/static/_next/static/chunks/68a27ff6-1185184b61bc22d0.js: -------------------------------------------------------------------------------- 1 | "use strict";(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[798],{13002:function(t,n,r){r.d(n,{oT$:function(){return o}});var e=r(50913);function o(t){return(0,e.w_)({tag:"svg",attr:{fill:"none",viewBox:"0 0 24 24",stroke:"currentColor"},child:[{tag:"path",attr:{strokeLinecap:"round",strokeLinejoin:"round",strokeWidth:"2",d:"M10 20l4-16m4 4l4 4-4 4M6 16l-4-4 4-4"}}]})(t)}}}]); -------------------------------------------------------------------------------- /src/pandora/flask/static/_next/static/chunks/bd26816a-981e1ddc27b37cc6.js: -------------------------------------------------------------------------------- 1 | "use strict";(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[786],{54655:function(t,n,e){e.d(n,{Ny3:function(){return o}});var r=e(50913);function o(t){return(0,r.w_)({tag:"svg",attr:{viewBox:"0 0 24 24",strokeWidth:"2",stroke:"currentColor",fill:"none",strokeLinecap:"round",strokeLinejoin:"round"},child:[{tag:"desc",attr:{},child:[]},{tag:"path",attr:{stroke:"none",d:"M0 0h24v24H0z",fill:"none"}},{tag:"path",attr:{d:"M5 9h14m-14 6h14"}}]})(t)}}}]); -------------------------------------------------------------------------------- /src/pandora/openai/token.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import tiktoken 4 | 5 | 6 | def gpt_num_tokens(messages, model='gpt-3.5-turbo'): 7 | encoding = tiktoken.encoding_for_model(model) 8 | 9 | num_tokens = 0 10 | for message in messages: 11 | num_tokens += 4 12 | for key, value in message.items(): 13 | num_tokens += len(encoding.encode(value)) 14 | if 'name' == key: 15 | num_tokens -= 1 16 | num_tokens += 2 17 | 18 | return num_tokens 19 | -------------------------------------------------------------------------------- /src/pandora/exts/config.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from datetime import datetime, timedelta 4 | from os import getenv 5 | from os.path import join 6 | 7 | from appdirs import user_config_dir 8 | 9 | USER_CONFIG_DIR = getenv('USER_CONFIG_DIR', user_config_dir('Pandora-ChatGPT')) 10 | DATABASE_URI = getenv('DATABASE_URI', 11 | 'sqlite:///{}?check_same_thread=False'.format(join(USER_CONFIG_DIR, 'pandora-chatgpt.db'))) 12 | 13 | 14 | def default_api_prefix(): 15 | return 'https://ai-{}.fakeopen.com'.format((datetime.now() - timedelta(days=1)).strftime('%Y%m%d')) 16 | -------------------------------------------------------------------------------- /src/pandora/migrations/migrate.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from os import makedirs, path 4 | from os.path import abspath, join, dirname 5 | 6 | from yoyo import get_backend 7 | from yoyo import read_migrations 8 | 9 | from ..exts.config import DATABASE_URI, USER_CONFIG_DIR 10 | 11 | 12 | def do_migrate(): 13 | if not path.exists(USER_CONFIG_DIR): 14 | makedirs(USER_CONFIG_DIR) 15 | 16 | url = 'mysql:{}'.format(DATABASE_URI[14:]) if 'mysql+pymysql:' == DATABASE_URI[0:14] else DATABASE_URI 17 | backend = get_backend(url) 18 | migrations = read_migrations(abspath(join(dirname(__file__), 'scripts'))) 19 | 20 | with backend.lock(): 21 | backend.apply_migrations(backend.to_apply(migrations)) 22 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | name: Upload Python Package 2 | 3 | on: 4 | workflow_dispatch: 5 | 6 | release: 7 | types: [ published ] 8 | 9 | permissions: 10 | contents: read 11 | 12 | jobs: 13 | deploy: 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - uses: actions/checkout@v3 18 | 19 | - name: Set up Python 20 | uses: actions/setup-python@v4 21 | with: 22 | python-version: 3.7 23 | 24 | - name: Install dependencies 25 | run: | 26 | python -m pip install --upgrade pip 27 | pip install build 28 | 29 | - name: Build package 30 | run: python -m build 31 | 32 | - name: Publish package 33 | uses: pypa/gh-action-pypi-publish@release/v1 34 | with: 35 | user: __token__ 36 | password: ${{ secrets.PYPI_API_TOKEN }} 37 | -------------------------------------------------------------------------------- /src/pandora/migrations/scripts/20230308_01_7ctOr.sql: -------------------------------------------------------------------------------- 1 | -- Init database 2 | -- depends: 3 | 4 | drop table if exists conversation_official; 5 | create table conversation_official 6 | ( 7 | conversation_id char(36) 8 | primary key, 9 | title text not null, 10 | create_time integer not null 11 | ); 12 | 13 | drop table if exists conversation_info; 14 | create table conversation_info 15 | ( 16 | conversation_id char(36) 17 | primary key, 18 | title varchar(200) not null, 19 | create_time integer not null, 20 | current_node char(36) 21 | ); 22 | 23 | drop table if exists prompt_info; 24 | create table prompt_info 25 | ( 26 | prompt_id char(36) not null, 27 | conversation_id char(36) not null, 28 | model varchar(64), 29 | parent_id char(36), 30 | role varchar(20), 31 | content longtext, 32 | create_time integer not null, 33 | constraint prompt_info_pk 34 | primary key (conversation_id, prompt_id) 35 | ); 36 | -------------------------------------------------------------------------------- /src/pandora/exts/hooks.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import logging 4 | import sys 5 | 6 | from loguru import logger 7 | 8 | 9 | def __exception_handle(e_type, e_value, e_traceback): 10 | if issubclass(e_type, KeyboardInterrupt): 11 | print('\nBye...') 12 | sys.exit(0) 13 | 14 | sys.__excepthook__(e_type, e_value, e_traceback) 15 | 16 | 17 | class __InterceptHandler(logging.Handler): 18 | def emit(self, record): 19 | try: 20 | level = logger.level(record.levelname).name 21 | except ValueError: 22 | level = record.levelno 23 | 24 | frame, depth = logging.currentframe(), 2 25 | while frame.f_code.co_filename == logging.__file__: 26 | frame = frame.f_back 27 | depth += 1 28 | 29 | logger.opt(depth=depth, exception=record.exc_info).log( 30 | level, record.getMessage() 31 | ) 32 | 33 | 34 | def hook_except_handle(): 35 | sys.excepthook = __exception_handle 36 | 37 | 38 | def hook_logging(**kwargs): 39 | logging.basicConfig(handlers=[__InterceptHandler()], **kwargs) 40 | -------------------------------------------------------------------------------- /bin/startup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PANDORA_ARGS="" 4 | PANDORA_COMMAND="pandora" 5 | USER_CONFIG_DIR="/data" 6 | 7 | if [ -n "${PANDORA_PROXY}" ]; then 8 | PANDORA_ARGS="${PANDORA_ARGS} -p ${PANDORA_PROXY}" 9 | fi 10 | 11 | if [ -n "${PANDORA_ACCESS_TOKEN}" ]; then 12 | mkdir -p "${USER_CONFIG_DIR}" 13 | 14 | echo "${PANDORA_ACCESS_TOKEN}" >"${USER_CONFIG_DIR}/access_token.dat" 15 | fi 16 | 17 | if [ -n "${PANDORA_TOKENS_FILE}" ]; then 18 | PANDORA_ARGS="${PANDORA_ARGS} --tokens_file ${PANDORA_TOKENS_FILE}" 19 | fi 20 | 21 | if [ -n "${PANDORA_SERVER}" ]; then 22 | PANDORA_ARGS="${PANDORA_ARGS} -s ${PANDORA_SERVER}" 23 | fi 24 | 25 | if [ -n "${PANDORA_API}" ]; then 26 | PANDORA_ARGS="${PANDORA_ARGS} -a" 27 | fi 28 | 29 | if [ -n "${PANDORA_LOGIN_LOCAL}" ]; then 30 | PANDORA_ARGS="${PANDORA_ARGS} -l" 31 | fi 32 | 33 | if [ -n "${PANDORA_VERBOSE}" ]; then 34 | PANDORA_ARGS="${PANDORA_ARGS} -v" 35 | fi 36 | 37 | if [ -n "${PANDORA_THREADS}" ]; then 38 | PANDORA_ARGS="${PANDORA_ARGS} --threads ${PANDORA_THREADS}" 39 | fi 40 | 41 | if [ -n "${PANDORA_CLOUD}" ]; then 42 | PANDORA_COMMAND="pandora-cloud" 43 | fi 44 | 45 | export USER_CONFIG_DIR 46 | 47 | # shellcheck disable=SC2086 48 | $(command -v ${PANDORA_COMMAND}) ${PANDORA_ARGS} 49 | -------------------------------------------------------------------------------- /src/pandora/flask/static/images/2022/11/ChatGPT.jpg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 |
8 | -------------------------------------------------------------------------------- /src/pandora/exts/token.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from jwt import decode 4 | 5 | from ..openai.utils import Console 6 | 7 | __public_key = b'-----BEGIN PUBLIC KEY-----\n' \ 8 | b'MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA27rOErDOPvPc3mOADYtQ\n' \ 9 | b'BeenQm5NS5VHVaoO/Zmgsf1M0Wa/2WgLm9jX65Ru/K8Az2f4MOdpBxxLL686ZS+K\n' \ 10 | b'7eJC/oOnrxCRzFYBqQbYo+JMeqNkrCn34yed4XkX4ttoHi7MwCEpVfb05Qf/ZAmN\n' \ 11 | b'I1XjecFYTyZQFrd9LjkX6lr05zY6aM/+MCBNeBWp35pLLKhiq9AieB1wbDPcGnqx\n' \ 12 | b'lXuU/bLgIyqUltqLkr9JHsf/2T4VrXXNyNeQyBq5wjYlRkpBQDDDNOcdGpx1buRr\n' \ 13 | b'Z2hFyYuXDRrMcR6BQGC0ur9hI5obRYlchDFhlb0ElsJ2bshDDGRk5k3doHqbhj2I\n' \ 14 | b'gQIDAQAB\n' \ 15 | b'-----END PUBLIC KEY-----' 16 | 17 | 18 | def check_access_token(access_token, api=False): 19 | if access_token.startswith('fk-'): 20 | return True 21 | 22 | if api and (access_token.startswith('sk-') or access_token.startswith('pk-')): 23 | return True 24 | 25 | payload = (decode(access_token, key=__public_key, algorithms='RS256', audience=[ 26 | "https://api.openai.com/v1", 27 | "https://openai.openai.auth0app.com/userinfo" 28 | ], issuer='https://auth0.openai.com/')) 29 | 30 | if 'scope' not in payload: 31 | raise Exception('miss scope') 32 | 33 | scope = payload['scope'] 34 | if 'model.read' not in scope or 'model.request' not in scope: 35 | raise Exception('invalid scope') 36 | 37 | if 'https://api.openai.com/auth' not in payload or 'https://api.openai.com/profile' not in payload: 38 | raise Exception('belonging to an unregistered user.') 39 | 40 | return payload 41 | 42 | 43 | def check_access_token_out(access_token, api=False): 44 | try: 45 | return check_access_token(access_token, api) 46 | except Exception as e: 47 | Console.error('### Invalid access token: {}'.format(str(e))) 48 | return False 49 | -------------------------------------------------------------------------------- /doc/HTTP-API.md: -------------------------------------------------------------------------------- 1 | # Pandora HTTP API 2 | 3 | ### 特殊说明:如果有多个`Access Token`,可以使用`X-Use-Token: token_name` 头指定使用哪个。 4 | 5 | ### `/api/models` 6 | 7 | * **HTTP方法:** `GET` 8 | * **URL参数:** `无` 9 | * **接口描述:** 列出账号可用的模型。 10 | 11 | ### `/api/conversations` 12 | 13 | * **HTTP方法:** `GET` 14 | * **URL参数:** 15 | * `offset` 数字类型,默认为:`1`。 16 | * `limit` 数字类型,默认为:`20`。 17 | * **接口描述:** 以分页方式列出会话列表。 18 | 19 | ### `/api/conversations` 20 | 21 | * **HTTP方法:** `DELETE` 22 | * **URL参数:** `无` 23 | * **接口描述:** 删除所有会话。 24 | 25 | ### `/api/conversation/` 26 | 27 | * **HTTP方法:** `GET` 28 | * **URL参数:** `无` 29 | * **接口描述:** 通过会话ID获取指定会话详情。 30 | 31 | ### `/api/conversation/` 32 | 33 | * **HTTP方法:** `DELETE` 34 | * **URL参数:** `无` 35 | * **接口描述:** 通过会话ID删除指定会话。 36 | 37 | ### `/api/conversation/` 38 | 39 | * **HTTP方法:** `PATCH` 40 | * **JSON字段:** 41 | * `title` 新标题。 42 | * **接口描述:** 通过会话ID设置指定的会话标题。 43 | 44 | ### `/api/conversation/gen_title/` 45 | 46 | * **HTTP方法:** `POST` 47 | * **JSON字段:** 48 | * `model` 对话所使用的模型。 49 | * `message_id` `ChatGPT`回复的那条消息的ID。 50 | * **接口描述:** 自动生成指定新会话的标题,通常首次问答后调用。 51 | 52 | ### `/api/conversation/talk` 53 | 54 | * **HTTP方法:** `POST` 55 | * **JSON字段:** 56 | * `prompt` 提问的内容。 57 | * `model` 对话使用的模型,通常整个会话中保持不变。 58 | * `message_id` 消息ID,通常使用`str(uuid.uuid4())`来生成一个。 59 | * `parent_message_id` 父消息ID,首次同样需要生成。之后获取上一条回复的消息ID即可。 60 | * `conversation_id` 首次对话可不传。`ChatGPT`回复时可获取。 61 | * `stream` 是否使用流的方式输出内容,默认为:`True` 62 | * **接口描述:** 向`ChatGPT`提问,等待其回复。 63 | 64 | ### `/api/conversation/regenerate` 65 | 66 | * **HTTP方法:** `POST` 67 | * **JSON字段:** 68 | * `prompt` 提问的内容。 69 | * `model` 对话使用的模型,通常整个会话中保持不变。 70 | * `message_id` 上一条用户发送消息的ID。 71 | * `parent_message_id` 上一条用户发送消息的父消息ID。 72 | * `conversation_id` 会话ID,在这个接口不可不传。 73 | * `stream` 是否使用流的方式输出内容,默认为:`True` 74 | * **接口描述:** 让`ChatGPT`重新生成回复。 75 | 76 | ### `/api/conversation/goon` 77 | 78 | * **HTTP方法:** `POST` 79 | * **JSON字段:** 80 | * `model` 对话使用的模型,通常整个会话中保持不变。 81 | * `parent_message_id` 父消息ID,上一次`ChatGPT`应答的消息ID。 82 | * `conversation_id` 会话ID。 83 | * `stream` 是否使用流的方式输出内容,默认为:`True` 84 | * **接口描述:** 让`ChatGPT`讲之前的恢复继续下去。 85 | 86 | -------------------------------------------------------------------------------- /src/pandora/flask/static/_next/static/olf4sv64FWIcQ_zCGl90t/_buildManifest.js: -------------------------------------------------------------------------------- 1 | self.__BUILD_MANIFEST=function(a,c,s,t,e,u,n,h){return{__rewrites:{beforeFiles:[],afterFiles:[],fallback:[]},"/":[t,a,e,"static/chunks/pages/index-f5560462cc9a5a86.js"],"/_error":["static/chunks/pages/_error-786d27d84962122a.js"],"/account/cancel":["static/chunks/pages/account/cancel-1c0d62f7e98cea5a.js"],"/account/manage":["static/chunks/pages/account/manage-515510f9fdcd7b92.js"],"/account/upgrade":[a,c,u,s,n,"static/chunks/pages/account/upgrade-0c6c79f25e11ac2f.js"],"/aip/[pluginId]/oauth/callback":[a,c,s,"static/chunks/pages/aip/[pluginId]/oauth/callback-8bae9003d60a57e1.js"],"/auth/error":["static/chunks/pages/auth/error-31ef8e5e6df49f9f.js"],"/auth/ext_callback":["static/chunks/pages/auth/ext_callback-8a3dba350878939d.js"],"/auth/ext_callback_refresh":["static/chunks/pages/auth/ext_callback_refresh-4cd753ececf58a64.js"],"/auth/login":[c,s,h,"static/chunks/pages/auth/login-7495d2c866b44897.js"],"/auth/logout":[a,"static/chunks/pages/auth/logout-0abf409a2bbf22c9.js"],"/auth/mocked_login":[a,"static/chunks/pages/auth/mocked_login-28119a8b1a5c2bce.js"],"/bypass":["static/chunks/pages/bypass-df27e0f8a3e360f1.js"],"/chat/[[...chatId]]":[t,"static/chunks/1f110208-44a6f43ddc5e9011.js","static/chunks/bd26816a-981e1ddc27b37cc6.js",a,c,e,u,"static/chunks/113-23682f80a24dd00d.js",s,n,"static/chunks/pages/chat/[[...chatId]]-76751174916fa3f8.js"],"/error":[t,a,e,"static/chunks/pages/error-1e886758cd4aa96f.js"],"/payments/success":[a,c,"static/chunks/882-025bd3b738a692ef.js",s,"static/chunks/pages/payments/success-5424184b119a4b94.js"],"/status":[c,s,h,"static/chunks/pages/status-696d2aa0dcc8e977.js"],sortedPages:["/","/_app","/_error","/account/cancel","/account/manage","/account/upgrade","/aip/[pluginId]/oauth/callback","/auth/error","/auth/ext_callback","/auth/ext_callback_refresh","/auth/login","/auth/logout","/auth/mocked_login","/bypass","/chat/[[...chatId]]","/error","/payments/success","/status"]}}("static/chunks/762-222df1028c0c1555.js","static/chunks/424-d1d3bfe6a3ca6c4a.js","static/chunks/264-13e92c51b0315184.js","static/chunks/68a27ff6-1185184b61bc22d0.js","static/chunks/174-bd28069f281ef76f.js","static/chunks/360-442b869f1ba4bb1b.js","static/chunks/14-0cb0d20affbd720d.js","static/chunks/417-2ccfee5029bb2a8b.js"),self.__BUILD_MANIFEST_CB&&self.__BUILD_MANIFEST_CB(); -------------------------------------------------------------------------------- /src/pandora/cloud_launcher.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import argparse 4 | 5 | from loguru import logger 6 | 7 | from . import __version__ 8 | from .exts.hooks import hook_except_handle 9 | from .openai.utils import Console 10 | 11 | __show_verbose = False 12 | 13 | 14 | def main(): 15 | global __show_verbose 16 | 17 | Console.debug_b( 18 | ''' 19 | Pandora-Cloud - A web interface to ChatGPT 20 | Github: https://github.com/zhile-io/pandora 21 | Version: {}, Mode: cloud, Engine: free 22 | '''.format(__version__) 23 | ) 24 | 25 | parser = argparse.ArgumentParser() 26 | parser.add_argument( 27 | '-p', 28 | '--proxy', 29 | help='Use a proxy. Format: protocol://user:pass@ip:port', 30 | required=False, 31 | type=str, 32 | default=None, 33 | ) 34 | parser.add_argument( 35 | '-s', 36 | '--server', 37 | help='Specific server bind. Format: ip:port, default: 127.0.0.1:8018', 38 | required=False, 39 | type=str, 40 | default='127.0.0.1:8018', 41 | ) 42 | parser.add_argument( 43 | '--threads', 44 | help='Define the number of server workers, default: 4', 45 | required=False, 46 | type=int, 47 | default=4, 48 | ) 49 | parser.add_argument( 50 | '-l', 51 | '--local', 52 | help='Login locally. Pay attention to the risk control of the login ip!', 53 | action='store_true', 54 | ) 55 | parser.add_argument( 56 | '-v', 57 | '--verbose', 58 | help='Show exception traceback.', 59 | action='store_true', 60 | ) 61 | args, _ = parser.parse_known_args() 62 | __show_verbose = args.verbose 63 | 64 | try: 65 | from pandora_cloud.server import ChatBot as CloudServer 66 | 67 | return CloudServer(args.proxy, args.verbose, login_local=args.local).run(args.server, args.threads) 68 | except (ImportError, ModuleNotFoundError): 69 | Console.error_bh('### You need `pip install Pandora-ChatGPT[cloud]` to support cloud mode.') 70 | 71 | 72 | def run(): 73 | hook_except_handle() 74 | 75 | try: 76 | main() 77 | except Exception as e: 78 | Console.error_bh('### Error occurred: ' + str(e)) 79 | 80 | if __show_verbose: 81 | logger.exception('Exception occurred.') 82 | -------------------------------------------------------------------------------- /src/pandora/flask/static/_next/static/chunks/949.1a6eb804b5e91f61.js: -------------------------------------------------------------------------------- 1 | "use strict";(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[949],{93949:function(e,n,t){t.r(n),t.d(n,{SANDBOX_LINK_PREFIX:function(){return s},default:function(){return f},handleSandboxLinkClick:function(){return d}});var r=t(61706),a=t(45813),i=t(35250),c=t(70079),l=t(33264);function o(e){var n=e.accept,t=e.children,r=e.onFilePicked,a=(0,c.useRef)(null),l=(0,c.useCallback)(function(){var e;null===(e=a.current)||void 0===e||e.click()},[]),o=(0,c.useCallback)(function(e){var n,t=null===(n=e.target.files)||void 0===n?void 0:n[0];t&&(r(t),e.target.value="")},[r]);return(0,i.jsxs)(i.Fragment,{children:["function"==typeof t?t({onClick:l}):(0,i.jsx)("span",{role:"button",onClick:l,children:t}),(0,i.jsx)("input",{type:"file",accept:n,ref:a,className:"hidden",onChange:o})]})}var u=t(74516),s="sandbox:";function d(e,n,t){var i;return i=(0,r.Z)(function(r){var i,c,o,s,d,f,h,p,b,v;return(0,a.__generator)(this,function(a){switch(a.label){case 0:if(!(e.has("tools2")&&(null==n?void 0:null===(i=n.enabledTools)||void 0===i?void 0:i.includes("tools2"))))return[2];return c=r.substring(8),[4,l.ZP.checkFile(t.current,c)];case 1:if((o=a.sent()).exists)return[3,2];return u.m.warning("File does not exist: ".concat(c)),[3,6];case 2:if(!o.too_large)return[3,3];return s=(o.size/1024/1024).toFixed(0),d="100",u.m.warning("File is larger than download limit: ".concat(s," MB vs ").concat(d," MB")),[3,6];case 3:return[4,l.ZP.fetchFileForDownload(t.current,c)];case 4:return[4,a.sent().blob()];case 5:h=a.sent(),p=window.URL.createObjectURL(h),(b=document.createElement("a")).href=p,v=c.split("/").pop(),b.download=v,b.click(),a.label=6;case 6:return[2]}})}),function(e){return i.apply(this,arguments)}}function f(e){var n,t=e.onFileUpload,s=e.threadId,d=e.currentLeafId,f=e.modelBackend,h=e.disabled,p=e.children,b=(0,c.useState)(!1),v=b[0],k=b[1],g=(0,c.useCallback)((n=(0,r.Z)(function(e){var n,r,i;return(0,a.__generator)(this,function(a){switch(a.label){case 0:k(!0),a.label=1;case 1:if(a.trys.push([1,,3,4]),e.size>104857600)return n=(e.size/1024/1024).toFixed(0),r="100",u.m.warning("File is larger than upload limit: ".concat(n," MB vs ").concat(r," MB"),{hasCloseButton:!0,duration:15}),[2];return[4,l.ZP.upload(d,s,f,e)];case 2:return t(a.sent()),[3,4];case 3:return k(!1),[7];case 4:return[2]}})}),function(e){return n.apply(this,arguments)}),[t,s,d,f]);return h||v?p(v):(0,i.jsx)(o,{onFilePicked:g,children:p(v)})}}}]); -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | #Usually these files are written by a python script from a template 32 | #before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | # Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # JetBrains IDEs configuration 132 | .idea/ 133 | 134 | # macOS 135 | .DS_Store 136 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from setuptools import setup, find_packages 4 | 5 | from src.pandora import __version__ 6 | 7 | with open('README.md', 'r', encoding='utf-8') as f: 8 | long_description = f.read() 9 | 10 | with open('requirements.txt', 'r', encoding='utf-8') as f: 11 | requirements = f.read().split('\n') 12 | 13 | with open('requirements_api.txt', 'r', encoding='utf-8') as f: 14 | requirements_api = f.read().split('\n') 15 | 16 | setup( 17 | name='Pandora-ChatGPT', 18 | version=__version__, 19 | python_requires='>=3.7', 20 | author='Neo Peng', 21 | author_email='admin@zhile.io', 22 | keywords='OpenAI ChatGPT ChatGPT-Plus gpt-3.5-turbo gpt-3.5-turbo-0301', 23 | description='A command-line interface to ChatGPT', 24 | long_description=long_description, 25 | long_description_content_type='text/markdown', 26 | url='https://github.com/zhile-io/pandora', 27 | packages=find_packages('src'), 28 | package_dir={'pandora': 'src/pandora'}, 29 | include_package_data=True, 30 | install_requires=requirements, 31 | extras_require={ 32 | 'api': requirements_api, 33 | 'cloud': ['pandora-cloud~=0.7.2'], 34 | }, 35 | entry_points={ 36 | 'console_scripts': [ 37 | 'pandora = pandora.launcher:run', 38 | 'pandora-cloud = pandora.cloud_launcher:run', 39 | ] 40 | }, 41 | project_urls={ 42 | 'Source': 'https://github.com/zhile-io/pandora', 43 | 'Tracker': 'https://github.com/zhile-io/pandora/issues', 44 | }, 45 | classifiers=[ 46 | 'Development Status :: 5 - Production/Stable', 47 | 48 | 'Environment :: Console', 49 | 'Environment :: Web Environment', 50 | 51 | 'Framework :: Flask', 52 | 53 | 'Intended Audience :: Developers', 54 | 'Intended Audience :: End Users/Desktop', 55 | 56 | 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)', 57 | 58 | 'Natural Language :: English', 59 | 'Natural Language :: Chinese (Simplified)', 60 | 61 | 'Operating System :: MacOS', 62 | 'Operating System :: Microsoft :: Windows', 63 | 'Operating System :: POSIX :: Linux', 64 | 65 | 'Programming Language :: SQL', 66 | 'Programming Language :: JavaScript', 67 | 'Programming Language :: Python :: 3.7', 68 | 'Programming Language :: Python :: 3.8', 69 | 'Programming Language :: Python :: 3.9', 70 | 'Programming Language :: Python :: 3.10', 71 | 'Programming Language :: Python :: 3.11', 72 | 73 | 'Topic :: Communications :: Chat', 74 | 'Topic :: Internet :: WWW/HTTP', 75 | ], 76 | ) 77 | -------------------------------------------------------------------------------- /.github/workflows/docker-publish.yml: -------------------------------------------------------------------------------- 1 | name: Push Docker Image 2 | 3 | on: 4 | workflow_dispatch: 5 | 6 | release: 7 | types: [ published ] 8 | 9 | permissions: 10 | packages: write 11 | contents: read 12 | 13 | env: 14 | IMAGE_NAME: pandora 15 | PLATFORMS: linux/amd64,linux/arm64 16 | 17 | jobs: 18 | get-tags: 19 | runs-on: ubuntu-latest 20 | outputs: 21 | image_version: ${{ steps.get_image_version.outputs.image_version }} 22 | steps: 23 | - uses: actions/checkout@v3 24 | 25 | - name: Get Image Version 26 | id: get_image_version 27 | run: | 28 | # Strip git ref prefix from version 29 | IMAGE_VERSION=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') 30 | 31 | # Strip "v" prefix from tag name 32 | [[ "${{ github.ref }}" == "refs/tags/"* ]] && IMAGE_VERSION=$(echo $IMAGE_VERSION | sed -e 's/^v//') 33 | 34 | echo VERSION=$IMAGE_VERSION 35 | echo "image_version=${IMAGE_VERSION}" >> $GITHUB_OUTPUT 36 | 37 | push-ghcr: 38 | needs: get-tags 39 | runs-on: ubuntu-latest 40 | env: 41 | REGISTRY: ghcr.io 42 | 43 | steps: 44 | - uses: actions/checkout@v3 45 | 46 | - name: Docker Setup QEMU 47 | uses: docker/setup-qemu-action@v2 48 | 49 | - name: Set up Docker BuildX 50 | uses: docker/setup-buildx-action@v2 51 | 52 | - name: Docker Login 53 | uses: docker/login-action@v2 54 | with: 55 | registry: ${{ env.REGISTRY }} 56 | username: ${{ github.repository_owner }} 57 | password: ${{ secrets.GITHUB_TOKEN }} 58 | 59 | - name: Build and Push 60 | uses: docker/build-push-action@v4 61 | with: 62 | push: true 63 | platforms: ${{ env.PLATFORMS }} 64 | tags: | 65 | ${{ env.REGISTRY }}/${{ github.repository_owner }}/${{ env.IMAGE_NAME }}:${{ needs.get-tags.outputs.image_version }} 66 | ${{ env.REGISTRY }}/${{ github.repository_owner }}/${{ env.IMAGE_NAME }}:latest 67 | 68 | push-docker-hub: 69 | needs: get-tags 70 | runs-on: ubuntu-latest 71 | 72 | steps: 73 | - uses: actions/checkout@v3 74 | 75 | - name: Docker Setup QEMU 76 | uses: docker/setup-qemu-action@v2 77 | 78 | - name: Set up Docker BuildX 79 | uses: docker/setup-buildx-action@v2 80 | 81 | - name: Docker Login 82 | uses: docker/login-action@v2 83 | with: 84 | username: ${{ secrets.DOCKERHUB_USERNAME }} 85 | password: ${{ secrets.DOCKERHUB_TOKEN }} 86 | 87 | - name: Build and Push 88 | uses: docker/build-push-action@v4 89 | with: 90 | push: true 91 | platforms: ${{ env.PLATFORMS }} 92 | tags: | 93 | ${{ secrets.DOCKERHUB_USERNAME }}/${{ env.IMAGE_NAME }}:${{ needs.get-tags.outputs.image_version }} 94 | ${{ secrets.DOCKERHUB_USERNAME }}/${{ env.IMAGE_NAME }}:latest 95 | -------------------------------------------------------------------------------- /src/pandora/flask/static/_next/static/chunks/554.9b8bfd0762461d74.js: -------------------------------------------------------------------------------- 1 | "use strict";(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[554],{76554:function(t,e,n){n.r(e),n.d(e,{getLocalhostPluginHttpApiCallData:function(){return l},makeLocalhostPluginHttpApiCall:function(){return c}});var r=n(61706),a=n(31501),o=n(45813),s=n(8844),i=n(68619);function l(t){var e,n=null===(e=t.metadata)||void 0===e?void 0:e.http_api_call_data;if(void 0!==n){if(t.author.role!==i.uU.Assistant){console.error("Refusing to make localhost plugin HTTP call from non-assistant message",t);return}if("object"!=typeof n||"string"!=typeof n.namespace||0===n.namespace.length||"string"!=typeof n.function_name||0===n.function_name.length||"string"!=typeof n.parent_message_id||0===n.parent_message_id.length||"string"!=typeof n.url||0===n.url.length||"string"!=typeof n.method||!["get","post","put","delete","patch"].includes(n.method)||!Array.isArray(n.qs_params)||n.qs_params.some(function(t){return!Array.isArray(t)||2!==t.length||"string"!=typeof t[0]||"string"!=typeof t[1]})||"object"!=typeof n.headers||Object.keys(n.headers).some(function(t){return"string"!=typeof t})||Object.values(n.headers).some(function(t){return"string"!=typeof t})||!(null===n.body||"object"==typeof n.body&&Object.keys(n.body).every(function(t){return"string"==typeof t}))||"string"!=typeof n.api_function_type||!["kwargs","chat"].includes(n.api_function_type)){console.error("Refusing to make localhost plugin HTTP call with invalid metadata",t);return}if(!/^https?:\/\/localhost:/.test(n.url)){console.error("Refusing to make localhost plugin HTTP call with non-localhost URL",t);return}return n}}function c(t){return u.apply(this,arguments)}function u(){return(u=(0,r.Z)(function(t){var e,n;return(0,o.__generator)(this,function(n){switch(n.label){case 0:return n.trys.push([0,2,,3]),[4,function(t){return p.apply(this,arguments)}(t)];case 1:return[2,n.sent()];case 2:return e=n.sent(),console.error("Error making localhost plugin HTTP call",e),[2,[{id:(0,s.Z)(),author:{role:i.uU.System},role:i.uU.Tool,content:{content_type:"text",parts:["Error making localhost plugin HTTP call: ".concat(e)]},metadata:{parent_message_id:t.parent_message_id,is_complete:!0}}]];case 3:return[2]}})})).apply(this,arguments)}function p(){return(p=(0,r.Z)(function(t){var e,n,r,l,c,u;function p(t){return Object.keys(t).map(function(t){return t.toLowerCase()})}return(0,o.__generator)(this,function(o){switch(o.label){case 0:var l,u;if(e={"content-type":"application/json"},u=(l=[p(t.headers),p(e)]).flat(),new Set(u).size!==u.length)throw Error("Refusing to make localhost plugin HTTP call with duplicate header keys");return n=t.url,t.qs_params.length>0&&(n=n+"?"+new URLSearchParams(t.qs_params)),r=void 0,null!==t.body&&(r=JSON.stringify(t.body)),[4,fetch(n,{method:t.method,headers:(0,a.Z)({},e,t.headers),body:r})];case 1:return[4,o.sent().text()];case 2:if(c=o.sent(),"chat"===t.api_function_type)return[2,[JSON.parse(c)]];if("kwargs"===t.api_function_type)return[2,[{id:(0,s.Z)(),author:{role:i.uU.Tool,name:"".concat(t.namespace,".").concat(t.function_name)},role:i.uU.Tool,content:{content_type:"text",parts:[c]},metadata:{parent_message_id:t.parent_message_id,is_complete:!0}}]];throw Error("Not implemented")}})})).apply(this,arguments)}}}]); -------------------------------------------------------------------------------- /src/pandora/flask/static/_next/static/chunks/webpack-c9a868e8e0796ec6.js: -------------------------------------------------------------------------------- 1 | !function(){"use strict";var e,t,r,n,o,i,u,c={},a={};function d(e){var t=a[e];if(void 0!==t)return t.exports;var r=a[e]={id:e,loaded:!1,exports:{}},n=!0;try{c[e].call(r.exports,r,r.exports,d),n=!1}finally{n&&delete a[e]}return r.loaded=!0,r.exports}d.m=c,d.amdD=function(){throw Error("define cannot be used indirect")},d.amdO={},e=[],d.O=function(t,r,n,o){if(r){o=o||0;for(var i=e.length;i>0&&e[i-1][2]>o;i--)e[i]=e[i-1];e[i]=[r,n,o];return}for(var u=1/0,i=0;i=o&&Object.keys(d.O).every(function(e){return d.O[e](r[a])})?r.splice(a--,1):(c=!1,o 8 | 9 | ![Python version](https://img.shields.io/badge/python-%3E%3D3.7-green) 10 | [![Issues](https://img.shields.io/github/issues-raw/zhile-io/pandora)](https://github.com/zhile-io/pandora/issues) 11 | [![Commits](https://img.shields.io/github/last-commit/zhile-io/pandora/master)](https://github.com/zhile-io/pandora/commits/master) 12 | [![PyPi](https://img.shields.io/pypi/v/pandora-chatgpt.svg)](https://pypi.python.org/pypi/pandora-chatgpt) 13 | [![Downloads](https://static.pepy.tech/badge/pandora-chatgpt)](https://pypi.python.org/pypi/pandora-chatgpt) 14 | [![PyPi workflow](https://github.com/zhile-io/pandora/actions/workflows/python-publish.yml/badge.svg)](https://github.com/zhile-io/pandora/actions/workflows/python-publish.yml) 15 | [![Docker workflow](https://github.com/zhile-io/pandora/actions/workflows/docker-publish.yml/badge.svg)](https://github.com/zhile-io/pandora/actions/workflows/docker-publish.yml) 16 | [![Discord](https://img.shields.io/discord/1098772912242163795?label=Discord)](https://discord.gg/QBkd9JAaWa) 17 | 18 | ## 体验地址 19 | * 点击 https://chat.zhile.io 20 | * 最新拿 `Access Token` 的技术原理,我记录在[这里](https://zhile.io/2023/05/19/how-to-get-chatgpt-access-token-via-pkce.html)了。 21 | * 可以访问 [这里](http://ai-20230626.fakeopen.com/auth) 拿 `Access Token` 22 | * 也可以官方登录,然后访问 [这里](http://chat.openai.com/api/auth/session) 拿 `Access Token` 23 | * `Access Token` 有效期 `14` 天,期间访问**不需要梯子**。这意味着你在手机上也可随意使用。 24 | * 这个页面上还包含一个共享账号的链接,**没有账号**的可以点进去体验一下。 25 | 26 | ## ChatGPT使用时可能会遇到: 27 | 28 | ### 1. Please stand by, while we are checking your browser... 29 | ###    动不动来一下,有时候还不动或者出人机验证。痛! 30 | ![t0](https://github.com/zhile-io/pandora/raw/master/doc/images/t0.png) 31 | 32 | ### 2. Access denied. Sorry, you have been blocked 33 | ###    经典问题,只能到处找可用VPN,费时费力,更费钱。移动端访问更难。痛! 34 | ![t1.1](https://github.com/zhile-io/pandora/raw/master/doc/images/t1.1.png) 35 | 36 | ### 3. ChatGPT is at capacity right now 37 | ###    系统负载高,白嫖用户不给用。痛! 38 | ![t2](https://github.com/zhile-io/pandora/raw/master/doc/images/t2.png) 39 | 40 | ### 4. This content may violate our content policy. 41 | ###    道德审查,多触发几次可能就封号了。痛!!! 42 | ![t3](https://github.com/zhile-io/pandora/raw/master/doc/images/t3.png) 43 | 44 | ### 5. Something went wrong. 45 | ###    吃着火锅唱着歌,突然就出故障了。痛! 46 | ![t4](https://github.com/zhile-io/pandora/raw/master/doc/images/t4.png) 47 | 48 | ### 6. 手机和电脑的模型不通用,顾这个就顾不到那个,痛! 49 | ![t7](https://github.com/zhile-io/pandora/raw/master/doc/images/t7.png) 50 | 51 | ### 7. 蹦字慢吞吞,卡顿不流畅,不知道的甚至想换电脑。痛! 52 | ### 8. 想把 `ChatGPT` 接到其他系统,结果只能接个差强人意的 `gpt-3.5-turbo`。痛! 53 | 54 | ### _一次看完上面的噩梦,血压上来了,拳头硬了!太痛了!!!以上痛点,`Pandora` 一次全部解决。_ 55 | 56 | ## 界面截图 57 | 58 |
59 | 60 | 61 | 62 | ![alt Screenshot5](https://github.com/zhile-io/pandora/raw/master/doc/images/s05.png)
63 | ![alt Screenshot10](https://github.com/zhile-io/pandora/raw/master/doc/images/s12.jpeg) 64 | 65 |
66 | 67 | ![alt Screenshot1](https://github.com/zhile-io/pandora/raw/master/doc/images/s01.png)
68 | ![alt Screenshot2](https://github.com/zhile-io/pandora/raw/master/doc/images/s02.png)
69 | ![alt Screenshot3](https://github.com/zhile-io/pandora/raw/master/doc/images/s03.png)
70 | ![alt Screenshot4](https://github.com/zhile-io/pandora/raw/master/doc/images/s04.png)
71 | ![alt Screenshot6](https://github.com/zhile-io/pandora/raw/master/doc/images/s06.png)
72 | ![alt Screenshot11](https://github.com/zhile-io/pandora/raw/master/doc/images/s11.jpeg) 73 | 74 |
75 | 76 | ## 如何搭建运行 77 | 78 | * 访问 [doc/wiki.md](https://github.com/zhile-io/pandora/blob/master/doc/wiki.md) 获得详细指导。 79 | 80 | ## 其他说明 81 | 82 | * `开源项目可以魔改,但请保留原作者信息。确需去除,请联系作者,以免失去技术支持。` 83 | * 项目是站在其他巨人的肩膀上,感谢! 84 | * 报错、BUG之类的提出`Issue`,我会修复。 85 | * 因为之后`ChatGPT`的API变动,我可能不会跟进修复。 86 | * 喜欢的可以给颗星,都是老朋友了。 87 | * 不影响`PHP是世界上最好的编程语言!` 88 | 89 | ## 贡献者们 90 | 91 | > 感谢所有让这个项目变得更好的贡献者们! 92 | 93 | [![Star History Chart](https://contrib.rocks/image?repo=zhile-io/pandora)](https://github.com/zhile-io/pandora/graphs/contributors) 94 | 95 | ## Star历史 96 | 97 | ![Star History Chart](https://api.star-history.com/svg?repos=zhile-io/pandora&type=Date) 98 | -------------------------------------------------------------------------------- /src/pandora/migrations/models.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from datetime import datetime as dt 4 | 5 | from sqlalchemy import func, Column, Text, Integer 6 | from sqlalchemy.orm import DeclarativeBase 7 | 8 | from ..migrations.database import session 9 | 10 | 11 | class Base(DeclarativeBase): 12 | pass 13 | 14 | 15 | class ConversationOfficial(Base): 16 | __tablename__ = 'conversation_official' 17 | 18 | conversation_id = Column(Text, primary_key=True, autoincrement=False) 19 | title = Column(Text, nullable=False) 20 | create_time = Column(Integer, nullable=False) 21 | 22 | @staticmethod 23 | def get_list(offset, limit): 24 | total = session.query(func.count(ConversationOfficial.conversation_id)).scalar() 25 | return total, session.query(ConversationOfficial).order_by(ConversationOfficial.create_time.desc()).limit( 26 | limit).offset(offset).all() 27 | 28 | @staticmethod 29 | def get(conversation_id): 30 | return session.query(ConversationOfficial).get(conversation_id) 31 | 32 | def save(self): 33 | session.commit() 34 | return self 35 | 36 | def new(self): 37 | session.add(self) 38 | session.commit() 39 | 40 | return self 41 | 42 | @staticmethod 43 | def delete(conversation_id): 44 | session.query(ConversationOfficial).filter(ConversationOfficial.conversation_id == conversation_id).delete() 45 | session.commit() 46 | 47 | @staticmethod 48 | def clear(): 49 | session.query(ConversationOfficial).delete() 50 | session.commit() 51 | 52 | @staticmethod 53 | def new_conversation(conversation_id, title=None): 54 | conv = ConversationOfficial.get(conversation_id) 55 | 56 | if not conv: 57 | conv = ConversationOfficial() 58 | conv.conversation_id = conversation_id 59 | conv.title = title or 'New chat' 60 | conv.create_time = dt.now().timestamp() 61 | conv.new() 62 | else: 63 | conv.title = title or 'New chat' 64 | conv.save() 65 | 66 | @staticmethod 67 | def wrap_conversation_list(offset, limit): 68 | total, items = ConversationOfficial.get_list(offset, limit) 69 | 70 | stripped = [] 71 | for item in items: 72 | stripped.append({ 73 | 'id': item.conversation_id, 74 | 'title': item.title, 75 | 'create_time': dt.utcfromtimestamp(item.create_time).isoformat(), 76 | }) 77 | 78 | return {'items': stripped, 'total': total, 'limit': limit, 'offset': offset} 79 | 80 | 81 | class ConversationInfo(Base): 82 | __tablename__ = 'conversation_info' 83 | 84 | conversation_id = Column(Text, primary_key=True, autoincrement=False) 85 | title = Column(Text, nullable=False) 86 | create_time = Column(Integer, nullable=False) 87 | current_node = Column(Text, nullable=True) 88 | 89 | @staticmethod 90 | def get_list(offset, limit): 91 | total = session.query(func.count(ConversationInfo.conversation_id)).scalar() 92 | return total, session.query(ConversationInfo).order_by(ConversationInfo.create_time.desc()).limit( 93 | limit).offset(offset).all() 94 | 95 | @staticmethod 96 | def get(conversation_id): 97 | return session.query(ConversationInfo).get(conversation_id) 98 | 99 | def new(self): 100 | session.add(self) 101 | session.commit() 102 | 103 | return self 104 | 105 | @staticmethod 106 | def delete(conversation_id): 107 | session.query(ConversationInfo).filter(ConversationInfo.conversation_id == conversation_id).delete() 108 | session.commit() 109 | 110 | @staticmethod 111 | def clear(): 112 | session.query(ConversationInfo).delete() 113 | session.commit() 114 | 115 | 116 | class PromptInfo(Base): 117 | __tablename__ = 'prompt_info' 118 | 119 | prompt_id = Column(Text, primary_key=True, autoincrement=False) 120 | conversation_id = Column(Text, primary_key=True, autoincrement=False) 121 | model = Column(Text, nullable=True) 122 | parent_id = Column(Text, nullable=True) 123 | role = Column(Text, nullable=True) 124 | content = Column(Text, nullable=True) 125 | create_time = Column(Integer, nullable=False) 126 | 127 | @staticmethod 128 | def list_by_conversation_id(conversation_id): 129 | return session.query(PromptInfo).filter(PromptInfo.conversation_id == conversation_id).all() 130 | 131 | def new(self): 132 | session.add(self) 133 | session.commit() 134 | 135 | return self 136 | 137 | @staticmethod 138 | def clear(): 139 | session.query(PromptInfo).delete() 140 | session.commit() 141 | -------------------------------------------------------------------------------- /doc/wiki_en.md: -------------------------------------------------------------------------------- 1 |
2 | 3 |

4 |

Pandora

5 |

6 | Pandora, talking with ChatGPT in command lines, and with more surprises. 7 |
8 | Wiki in 中文 » 9 |
10 |
11 | Demo View 12 | · 13 | Bug Report 14 | · 15 | Feature Request 16 |

17 |

18 | 19 | ## Table of Contents 20 | 21 | - [Make it run](#make-it-run) 22 | - [Start parameters](#start-parameters) 23 | - [Docker](#docker) 24 | - [Access Token things](#access-token-things) 25 | - [HTTP RESTful API](#http-restful-api) 26 | - [Commands](#commands) 27 | - [Cloud mode](#cloud-mode) 28 | 29 | ## Make it run 30 | 31 | * Python version no less than `3.7` 32 | 33 | * install from `pip` 34 | 35 | ```shell 36 | pip install pandora-chatgpt 37 | pandora 38 | ``` 39 | * `gpt-3.5-turbo` mode: 40 | 41 | ```shell 42 | pip install 'pandora-chatgpt[api]' 43 | // OR 44 | pip install pandora-chatgpt[api] 45 | pandora 46 | ``` 47 | * `cloud` mode: 48 | 49 | ```shell 50 | pip install 'pandora-chatgpt[cloud]' 51 | // OR 52 | pip install pandora-chatgpt[cloud] 53 | pandora-cloud 54 | ``` 55 | 56 | * install from source 57 | 58 | ```shell 59 | pip install . 60 | pandora 61 | ``` 62 | 63 | * `gpt-3.5-turbo` mode: 64 | 65 | ```shell 66 | pip install '.[api]' 67 | // OR 68 | pip install .[api] 69 | pandora 70 | ``` 71 | 72 | * `cloud` mode: 73 | 74 | ```shell 75 | pip install '.[cloud]' 76 | // OR 77 | pip install .[cloud] 78 | pandora-cloud 79 | ``` 80 | 81 | * Docker Hub 82 | 83 | ```shell 84 | docker pull pengzhile/pandora 85 | docker run -it --rm pengzhile/pandora 86 | ``` 87 | 88 | * Docker build 89 | 90 | ```shell 91 | docker build -t pandora . 92 | docker run -it --rm pandora 93 | ``` 94 | 95 | * Serverless deploy:[pandora-cloud-serverless](https://github.com/zhile-io/pandora-cloud-serverless) 96 | 97 | * login with your credentials 98 | 99 | * stay simple, stay naive, stay elegant 100 | 101 | ## Start parameters 102 | 103 | * `pandora --help` for help text. 104 | * `-p` or `--proxy` for setting the proxy. the value should be`protocol://user:pass@ip:port`. 105 | * `-t` or `--token_file` for indicating the file that stores `Access Token`. You will login with access token if this option is in use. 106 | * `-s` or `--server` starts the HTTP server, by which you could open a web page and interact with it in a fancy UI. the value should be`ip:port`. 107 | * `-a` or `--api` use `gpt-3.5-turbo` API in backend. **NOTICE: you will be charged if this option is in use.** 108 | * `-l` or `--local` login using the local environment. **You may need a suitable proxy IP to avoid account restrictions!** 109 | * `--tokens_file` indicating a file storing multiple `Access Token`s. The file content should be like`{"key": "token"}`. 110 | * `--threads` specify the number of server workers, default is `8`, and for cloud mode, it is `4`. 111 | * `-v` or `--verbose` for verbose debugging messages. 112 | 113 | ## Docker 114 | 115 | These docker environment variables will override start parameters. 116 | 117 | * `PANDORA_ACCESS_TOKEN` =`Access Token` string. 118 | * `PANDORA_TOKENS_FILE` = the path of file which keeps `Access Token`s. 119 | * `PANDORA_PROXY` =`protocol://user:pass@ip:port`. 120 | * `PANDORA_SERVER` =`ip:port`. 121 | * `PANDORA_API` for using `gpt-3.5-turbo` API. **NOTICE: you will be charged if this option is in use.** 122 | * `PANDORA_LOGIN_LOCAL` login using the local environment. **You may need a suitable proxy IP to avoid account restrictions!** 123 | * `PANDORA_VERBOSE` for verbose debugging messages. 124 | * `PANDORA_THREADS` specify the number of server workers, default is `8`, and for cloud mode, it is `4`. 125 | 126 | ## Access Token things 127 | 128 | * no need for proxy if login with `Access Token`. 129 | * you could obtain your access token safely with [this service](https://ai.fakeopen.com/auth). 130 | * `Access Token` has a expiration time as `14 days`, you could save it and keep using within this period. 131 | * leaking your `Access Token` will lead to loss of your account. 132 | 133 | ## HTTP RESTFUL API 134 | 135 | * if you start Pandora with `-s`/`--server`/`PANDORA_SERVER`, you could access a web UI with `http://ip:port`. 136 | * you could switch access token by passing a different one with `http://ip:port/?token=xxx`. 137 | * API documents: [doc/HTTP-API.md](https://github.com/zhile-io/pandora/blob/master/doc/HTTP-API.md) 138 | 139 | ## Commands 140 | 141 | * **double** `Enter` to send prompt to `ChatGPT`. 142 | * `/?` for help text. 143 | * `/title` for setting the title of current conversation. 144 | * `/select` back to conversation choosing page. 145 | * `/reload` for refreshing. 146 | * `/regen` for regenerating answers if you are not satisfied with the last one. 147 | * `/continue` make `ChatGPT` to append responses. 148 | * `/edit` for editing your previous prompt. 149 | * `/new` to start a new conversation. 150 | * `/del` to delete current conversation and back to conversation choosing page. 151 | * `/token` for printing current access token. 152 | * `/copy` for copying the last response of `ChatGPT` to pasteboard. 153 | * `/copy_code` for copying the code in the last response of `ChatGPT` to pasteboard. 154 | * `/clear` for cleaning the screen. 155 | * `/version` for printing the version of Pandora. 156 | * `/exit` to exit Pandora. 157 | 158 | ## Cloud mode 159 | 160 | - setting up a service just like official `ChatGPT` website. it's so same as only jesus could tell it apart. 161 | 162 | * you need to use `pandora-cloud` instead of `pandora` to start Pandora. 163 | * enabling `PANDORA_CLOUD` if you are using Docker to start Pandora. 164 | * Other parameters are same as these guys in normal mode. 165 | -------------------------------------------------------------------------------- /doc/wiki.md: -------------------------------------------------------------------------------- 1 |
2 | 3 |

4 |

潘多拉 Pandora

5 |

6 | 一个不只是命令行的 "ChatGPT" 7 |
8 | Wiki in English » 9 |
10 |
11 | 查看Demo 12 | · 13 | 报告Bug 14 | · 15 | 提出新特性 16 |

17 |

18 | 19 | ## 目录 20 | 21 | - [如何运行](#如何运行) 22 | - [程序参数](#程序参数) 23 | - [Docker环境变量](#docker环境变量) 24 | - [关于 Access Token](#关于-access-token) 25 | - [HTTP服务文档](#http服务文档) 26 | - [操作命令](#操作命令) 27 | - [高阶设置](#高阶设置) 28 | - [Cloud模式](#cloud模式) 29 | - [使用Cloudflare Workers代理](#使用cloudflare-workers代理) 30 | 31 | ## 如何运行 32 | 33 | * Python版本目测起码要`3.7` 34 | 35 | * pip安装运行 36 | 37 | ```shell 38 | pip install pandora-chatgpt 39 | pandora 40 | ``` 41 | * 如果你想支持`gpt-3.5-turbo`模式: 42 | 43 | ```shell 44 | pip install 'pandora-chatgpt[api]' 45 | // 或者 46 | pip install pandora-chatgpt[api] 47 | pandora 48 | ``` 49 | * 如果你想启用`cloud`模式: 50 | 51 | ```shell 52 | pip install 'pandora-chatgpt[cloud]' 53 | // 或者 54 | pip install pandora-chatgpt[cloud] 55 | pandora-cloud 56 | ``` 57 | 58 | * 编译运行 59 | 60 | ```shell 61 | pip install . 62 | pandora 63 | ``` 64 | 65 | * 如果你想支持`gpt-3.5-turbo`模式: 66 | 67 | ```shell 68 | pip install '.[api]' 69 | // 或者 70 | pip install .[api] 71 | pandora 72 | ``` 73 | 74 | * 如果你想启用`cloud`模式: 75 | 76 | ```shell 77 | pip install '.[cloud]' 78 | // 或者 79 | pip install .[cloud] 80 | pandora-cloud 81 | ``` 82 | 83 | * Docker Hub运行 84 | 85 | ```shell 86 | docker pull pengzhile/pandora 87 | docker run -it --rm pengzhile/pandora 88 | ``` 89 | 90 | * Docker编译运行 91 | 92 | ```shell 93 | docker build -t pandora . 94 | docker run -it --rm pandora 95 | ``` 96 | 97 | * Serverless部署见项目:[pandora-cloud-serverless](https://github.com/zhile-io/pandora-cloud-serverless) 98 | 99 | * 输入用户名密码登录即可,登录密码理论上不显示出来,莫慌。 100 | * 简单而粗暴,不失优雅。 101 | 102 | ## 程序参数 103 | 104 | * 可通过 `pandora --help` 查看。 105 | * `-p` 或 `--proxy` 指定代理,格式:`protocol://user:pass@ip:port`。 106 | * `-t` 或 `--token_file` 指定一个存放`Access Token`的文件,使用`Access Token`登录。 107 | * `-s` 或 `--server` 以`http`服务方式启动,格式:`ip:port`。 108 | * `-a` 或 `--api` 使用`gpt-3.5-turbo`API请求,**你可能需要向`OpenAI`支付费用**。 109 | * `-l` 或 `--local` 使用本地环境登录,**你可能需要一个合适的代理IP以避免账号被风控!** 110 | * `--tokens_file` 指定一个存放多`Access Token`的文件,内容为`{"key": "token"}`的形式。 111 | * `--threads` 指定服务启动的线程数,默认为 `8`,Cloud模式为 `4`。 112 | * `-v` 或 `--verbose` 显示调试信息,且出错时打印异常堆栈信息,供查错使用。 113 | 114 | ## Docker环境变量 115 | 116 | * `PANDORA_ACCESS_TOKEN` 指定`Access Token`字符串。 117 | * `PANDORA_TOKENS_FILE` 指定一个存放多`Access Token`的文件路径。 118 | * `PANDORA_PROXY` 指定代理,格式:`protocol://user:pass@ip:port`。 119 | * `PANDORA_SERVER` 以`http`服务方式启动,格式:`ip:port`。 120 | * `PANDORA_API` 使用`gpt-3.5-turbo`API请求,**你可能需要向`OpenAI`支付费用**。 121 | * `PANDORA_LOGIN_LOCAL` 使用本地环境登录,**你可能需要一个合适的代理IP以避免账号被风控!** 122 | * `PANDORA_VERBOSE` 显示调试信息,且出错时打印异常堆栈信息,供查错使用。 123 | * `PANDORA_THREADS` 指定服务启动的线程数,默认为 `8`,Cloud模式为 `4`。 124 | * 使用Docker方式,设置环境变量即可,无视上述`程序参数`。 125 | 126 | ## 关于 Access Token 127 | 128 | * 使用`Access Token`方式登录,可以无代理直连。 129 | * [这个服务](https://ai-20230626.fakeopen.com/auth) 可以帮你安全有效拿到`Access Token`,无论是否第三方登录。 130 | * 其中`accessToken`字段的那一长串内容即是`Access Token`。 131 | * `Access Token`可以复制保存,其有效期目前为`14天`。 132 | * 不要泄露你的`Access Token`,使用它可以操纵你的账号。 133 | 134 | ## HTTP服务文档 135 | 136 | * 如果你以`http`服务方式启动,现在你可以打开一个极简版的`ChatGPT`了。通过你指定的`http://ip:port`来访问。 137 | * 通过`http://ip:port/?token=xxx`,传递一个Token的名字,可以切换到对应的`Access Token`。 138 | * API文档见:[doc/HTTP-API.md](https://github.com/zhile-io/pandora/blob/master/doc/HTTP-API.md) 139 | 140 | ## 操作命令 141 | 142 | * 对话界面**连敲两次**`Enter`发送你的输入给`ChatGPT`。 143 | * 对话界面使用`/?`可以打印支持的操作命令。 144 | * `/title` 重新设置当前对话的标题。 145 | * `/select` 回到选择会话界面。 146 | * `/reload` 重新加载当前会话所有内容,`F5`你能懂吧。 147 | * `/regen` 如果对`ChatGPT`当前回答不满意,可以让它重新回答。 148 | * `/continue` 让`ChatGPT`继续输出回复的剩余部分。 149 | * `/edit` 编辑你之前的一个提问。 150 | * `/new` 直接开启一个新会话。 151 | * `/del` 删除当前会话,回到会话选择界面。 152 | * `/token` 打印当前的`Access Token`,也许你用得上,但不要泄露。 153 | * `/copy` 复制`ChatGPT`上一次回复的内容到剪贴板。 154 | * `/copy_code` 复制`ChatGPT`上一次回复的代码到剪贴板 155 | * `/clear` 清屏,应该不用解释。 156 | * `/version` 打印`Pandora`的版本信息。 157 | * `/exit` 退出`潘多拉`。 158 | 159 | ## 高阶设置 160 | 161 | * 本部分内容不理解的朋友,**请勿擅动!** 162 | * 环境变量 `OPENAI_API_PREFIX` 可以替换OpenAI Api的前缀`https://api.openai.com`。 163 | * 环境变量 `CHATGPT_API_PREFIX` 可以替换ChatGPT Api的前缀`https://ai.fakeopen.com`。 164 | * 如果你想持久存储`Docker`中`Pandora`产生的数据,你可以挂载宿主机目录至`/data`。 165 | * 如果你在国内使用`pip`安装缓慢,可以考虑切换至腾讯的源:```pip config set global.index-url https://mirrors.cloud.tencent.com/pypi/simple``` 166 | * 镜像同步版本可能不及时,如果出现这种情况建议切换至官方源:```pip config set global.index-url https://pypi.org/simple``` 167 | * 默认使用`sqlite3`存储会话数据,如果你希望更换至`mysql`,可以这么做: 168 | * 执行```pip install PyMySQL```安装驱动。 169 | * 设置环境变量:`DATABASE_URI`为类似`mysql+pymysql://user:pass@localhost/dbname`的连接字符串。 170 | * 环境变量指定`OPENAI_EMAIL`可以替代登录输入用户名,`OPENAI_PASSWORD`则可以替代输入密码, `OPENAI_MFA_CODE`则可以替代输入二次验证。 171 | * 环境变量`API_SYSTEM_PROMPT`可以替换`api`模式下的系统`prompt`。 172 | 173 | ## Cloud模式 174 | 175 | * 搭建一个跟官方很像的`ChatGPT`服务,不能说很像,只能说一样。 176 | * 该模式使用`pandora-cloud`启动,前提是你如前面所说安装好了。 177 | * Docker环境变量:`PANDORA_CLOUD` 启动`cloud`模式。 178 | * 该模式参数含义与普通模式相同,可`--help`查看。 179 | 180 | ## 使用Cloudflare Workers代理 181 | 182 | * 如果你感觉默认的`https://ai.fakeopen.com`在你那里可能被墙了,可以使用如下方法自行代理。 183 | * 你需要一个`Cloudflare`账号,如果没有,可以[注册](https://dash.cloudflare.com/sign-up)一个。 184 | * 登录后,点击`Workers`,然后点击`Create a Worker`,填入服务名称后点击`创建服务`。 185 | * 点开你刚才创建的服务,点击`快速编辑`按钮,贴入下面的代码,然后点击`保存并部署`。 186 | 187 | ```javascript 188 | export default { 189 | async fetch(request, env) { 190 | const url = new URL(request.url); 191 | url.host = 'ai.fakeopen.com'; 192 | return fetch(new Request(url, request)) 193 | } 194 | } 195 | ``` 196 | 197 | * 点击`触发器`选项卡,可以添加自定义访问域名。 198 | * 参考`高阶设置`中的环境变量使用你的服务地址进行替换。 199 | 200 | ## 日抛版代理地址 201 | 202 | * 每日凌晨`1`点,将会同时生成一个当日子域名,如 `ai-20230625.fakeopen.com`。 203 | * 子域名使用效果完全等同于 `ai.fakeopen.com`。至于作用,湖北的你肯定能懂。 204 | * 可将环境变量替换成子域,如 `CHATGPT_API_PREFIX=https://ai-20230625.fakeopen.com`。 -------------------------------------------------------------------------------- /src/pandora/turbo/base.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import uuid 4 | from datetime import datetime as dt 5 | 6 | 7 | class Prompt: 8 | def __init__(self, prompt_id=None, role=None, content=None, parent=None): 9 | self.prompt_id = prompt_id or str(uuid.uuid4()) 10 | self.parent_id = None 11 | self.role = role 12 | self.content = content 13 | self.children = [] 14 | self.create_time = dt.now().timestamp() 15 | 16 | if parent: 17 | self.parent_id = parent.prompt_id 18 | parent.add_child(self.prompt_id) 19 | 20 | def add_child(self, prompt_id): 21 | self.children.append(prompt_id) 22 | 23 | def get_message(self, end=True): 24 | return None 25 | 26 | def get_info(self): 27 | return { 28 | 'id': self.prompt_id, 29 | 'message': self.get_message(), 30 | 'parent': self.parent_id, 31 | 'children': self.children 32 | } 33 | 34 | 35 | class SystemPrompt(Prompt): 36 | def __init__(self, content, parent): 37 | super().__init__(role='system', content=content, parent=parent) 38 | 39 | def get_message(self, end=True): 40 | return { 41 | 'id': self.prompt_id, 42 | 'author': { 43 | 'role': self.role, 44 | 'name': None, 45 | 'metadata': {} 46 | }, 47 | 'create_time': self.create_time, 48 | 'update_time': None, 49 | 'content': { 50 | 'content_type': 'text', 51 | 'parts': [''] 52 | }, 53 | 'end_turn': True, 54 | 'weight': 1.0, 55 | 'metadata': {}, 56 | 'recipient': 'all' 57 | } 58 | 59 | 60 | class UserPrompt(Prompt): 61 | def __init__(self, prompt_id, content, parent): 62 | super().__init__(prompt_id=prompt_id, role='user', content=content, parent=parent) 63 | 64 | def get_message(self, end=True): 65 | return { 66 | 'id': self.prompt_id, 67 | 'author': { 68 | 'role': self.role, 69 | 'name': None, 70 | 'metadata': {} 71 | }, 72 | 'create_time': self.create_time, 73 | 'update_time': None, 74 | 'content': { 75 | 'content_type': 'text', 76 | 'parts': [self.content] 77 | }, 78 | 'end_turn': None, 79 | 'weight': 1.0, 80 | 'metadata': { 81 | 'timestamp_': 'absolute', 82 | 'message_type': None 83 | }, 84 | 'recipient': 'all' 85 | } 86 | 87 | 88 | class GptPrompt(Prompt): 89 | def __init__(self, parent, model): 90 | super().__init__(role='assistant', content='', parent=parent) 91 | self.model = model 92 | 93 | def append_content(self, content): 94 | self.content += content 95 | 96 | return self 97 | 98 | def get_message(self, end=True): 99 | return { 100 | 'id': self.prompt_id, 101 | 'author': { 102 | 'role': self.role, 103 | 'name': None, 104 | 'metadata': {} 105 | }, 106 | 'create_time': self.create_time, 107 | 'update_time': None, 108 | 'content': { 109 | 'content_type': 'text', 110 | 'parts': [self.content] 111 | }, 112 | 'end_turn': False if end else None, 113 | 'weight': 1.0, 114 | 'metadata': { 115 | 'message_type': None, 116 | 'model_slug': self.model, 117 | 'finish_details': { 118 | 'type': 'stop' 119 | } if end else None, 120 | 'timestamp_': 'absolute' 121 | }, 122 | 'recipient': 'all' 123 | } 124 | 125 | 126 | class Conversation: 127 | def __init__(self): 128 | self.conversation_id = str(uuid.uuid4()) 129 | self.title = 'New chat' 130 | self.create_time = dt.now().timestamp() 131 | self.current_node = None 132 | self.prompts = {} 133 | 134 | def add_prompt(self, prompt): 135 | self.prompts[prompt.prompt_id] = prompt 136 | self.current_node = prompt.prompt_id 137 | 138 | return prompt 139 | 140 | def get_prompt(self, prompt_id): 141 | return self.prompts.get(prompt_id) 142 | 143 | def get_prompts(self): 144 | return self.prompts 145 | 146 | def set_title(self, title): 147 | self.title = title 148 | 149 | def get_title(self): 150 | return self.title 151 | 152 | def get_messages_directly(self, message_id): 153 | messages = [] 154 | while True: 155 | prompt = self.get_prompt(message_id) 156 | if not prompt.parent_id: 157 | break 158 | 159 | messages.insert(0, { 160 | 'role': prompt.role, 161 | 'content': prompt.content 162 | }) 163 | message_id = prompt.parent_id 164 | 165 | return messages 166 | 167 | def get_messages(self, message_id, model): 168 | messages = [] 169 | user_prompt = None 170 | while True: 171 | prompt = self.get_prompt(message_id) 172 | if not prompt.parent_id: 173 | break 174 | 175 | if not user_prompt and isinstance(prompt, UserPrompt): 176 | user_prompt = prompt 177 | 178 | messages.insert(0, { 179 | 'role': prompt.role, 180 | 'content': prompt.content 181 | }) 182 | message_id = prompt.parent_id 183 | 184 | return user_prompt, self.add_prompt(GptPrompt(user_prompt, model)), messages 185 | 186 | def get_info(self): 187 | mapping = {} 188 | for prompt_id in self.prompts: 189 | mapping[prompt_id] = self.prompts[prompt_id].get_info() 190 | 191 | return { 192 | 'title': self.title, 193 | 'create_time': self.create_time, 194 | 'mapping': mapping, 195 | 'moderation_results': [], 196 | 'current_node': self.current_node, 197 | } 198 | 199 | 200 | class Conversations: 201 | def __init__(self): 202 | self.__data = [] 203 | 204 | def list(self, offset, limit): 205 | return len(self.__data), self.__data[offset: limit] 206 | 207 | def clear(self): 208 | self.__data = [] 209 | 210 | def delete(self, conversation): 211 | self.__data = [x for x in self.__data if conversation.conversation_id != x.conversation_id] 212 | 213 | def new(self): 214 | conversation = Conversation() 215 | self.__data.insert(0, conversation) 216 | 217 | return conversation 218 | 219 | def get(self, conversation_id): 220 | for x in self.__data: 221 | if x.conversation_id == conversation_id: 222 | return x 223 | 224 | return None 225 | 226 | def guard_get(self, conversation_id): 227 | conversation = self.get(conversation_id) 228 | if not conversation: 229 | raise Exception('Can\'t load conversation {}'.format(conversation_id)) 230 | 231 | return conversation 232 | -------------------------------------------------------------------------------- /src/pandora/flask/templates/chat.html: -------------------------------------------------------------------------------- 1 | New chat
2 | -------------------------------------------------------------------------------- /src/pandora/launcher.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import argparse 4 | import os 5 | from os import getenv 6 | 7 | from loguru import logger 8 | from rich.prompt import Prompt, Confirm 9 | 10 | from . import __version__ 11 | from .bots.legacy import ChatBot as ChatBotLegacy 12 | from .bots.server import ChatBot as ChatBotServer 13 | from .exts.config import USER_CONFIG_DIR, default_api_prefix 14 | from .exts.hooks import hook_except_handle 15 | from .exts.token import check_access_token_out 16 | from .openai.api import ChatGPT 17 | from .openai.auth import Auth0 18 | from .openai.utils import Console 19 | 20 | if 'nt' == os.name: 21 | import pyreadline3 as readline 22 | else: 23 | import readline 24 | 25 | readline.set_completer_delims('') 26 | readline.set_auto_history(False) 27 | 28 | __show_verbose = False 29 | 30 | 31 | def read_access_token(token_file): 32 | with open(token_file, 'r') as f: 33 | return f.read().strip() 34 | 35 | 36 | def save_access_token(access_token): 37 | token_file = os.path.join(USER_CONFIG_DIR, 'access_token.dat') 38 | 39 | if not os.path.exists(USER_CONFIG_DIR): 40 | os.makedirs(USER_CONFIG_DIR) 41 | 42 | with open(token_file, 'w') as f: 43 | f.write(access_token) 44 | 45 | if __show_verbose: 46 | Console.debug_b('\nThe access token has been saved to the file:') 47 | Console.debug(token_file) 48 | print() 49 | 50 | 51 | def confirm_access_token(token_file=None, silence=False, api=False): 52 | app_token_file = os.path.join(USER_CONFIG_DIR, 'access_token.dat') 53 | 54 | app_token_file_exists = os.path.isfile(app_token_file) 55 | if app_token_file_exists and __show_verbose: 56 | Console.debug_b('Found access token file: ', end='') 57 | Console.debug(app_token_file) 58 | 59 | if token_file: 60 | if not os.path.isfile(token_file): 61 | raise Exception('Error: {} is not a file.'.format(token_file)) 62 | 63 | access_token = read_access_token(token_file) 64 | if os.path.isfile(app_token_file) and access_token == read_access_token(app_token_file): 65 | return access_token, False 66 | 67 | return access_token, True 68 | 69 | if app_token_file_exists: 70 | confirm = 'y' if silence else Prompt.ask('A saved access token has been detected. Do you want to use it?', 71 | choices=['y', 'n', 'del'], default='y') 72 | if 'y' == confirm: 73 | access_token = read_access_token(app_token_file) 74 | if not check_access_token_out(access_token, api): 75 | os.remove(app_token_file) 76 | return None, True 77 | 78 | return access_token, False 79 | elif 'del' == confirm: 80 | os.remove(app_token_file) 81 | 82 | return None, True 83 | 84 | 85 | def parse_access_tokens(tokens_file, api=False): 86 | if not os.path.isfile(tokens_file): 87 | raise Exception('Error: {} is not a file.'.format(tokens_file)) 88 | 89 | import json 90 | with open(tokens_file, 'r') as f: 91 | tokens = json.load(f) 92 | 93 | valid_tokens = {} 94 | for key, value in tokens.items(): 95 | if not check_access_token_out(value, api=api): 96 | Console.error('### Access token id: {}'.format(key)) 97 | continue 98 | valid_tokens[key] = value 99 | 100 | if not valid_tokens: 101 | Console.error('### No valid access tokens.') 102 | return None 103 | 104 | return valid_tokens 105 | 106 | 107 | def main(): 108 | global __show_verbose 109 | 110 | api_prefix = getenv('CHATGPT_API_PREFIX', default_api_prefix()) 111 | 112 | Console.debug_b( 113 | ''' 114 | Pandora - A command-line interface to ChatGPT 115 | Github: https://github.com/zhile-io/pandora 116 | Get access token: {}/auth 117 | Version: {}'''.format(api_prefix, __version__), end='' 118 | ) 119 | 120 | parser = argparse.ArgumentParser() 121 | parser.add_argument( 122 | '-p', 123 | '--proxy', 124 | help='Use a proxy. Format: protocol://user:pass@ip:port', 125 | required=False, 126 | type=str, 127 | default=None, 128 | ) 129 | parser.add_argument( 130 | '-t', 131 | '--token_file', 132 | help='Specify an access token file and login with your access token.', 133 | required=False, 134 | type=str, 135 | default=None, 136 | ) 137 | parser.add_argument( 138 | '--tokens_file', 139 | help='Specify an access tokens json file.', 140 | required=False, 141 | type=str, 142 | default=None, 143 | ) 144 | parser.add_argument( 145 | '-s', 146 | '--server', 147 | help='Start as a proxy server. Format: ip:port, default: 127.0.0.1:8008', 148 | required=False, 149 | type=str, 150 | default=None, 151 | action='store', 152 | nargs='?', 153 | const='127.0.0.1:8008', 154 | ) 155 | parser.add_argument( 156 | '--threads', 157 | help='Define the number of server workers, default: 8', 158 | required=False, 159 | type=int, 160 | default=8, 161 | ) 162 | parser.add_argument( 163 | '-a', 164 | '--api', 165 | help='Use gpt-3.5-turbo chat api. Note: OpenAI will bill you.', 166 | action='store_true', 167 | ) 168 | parser.add_argument( 169 | '-l', 170 | '--local', 171 | help='Login locally. Pay attention to the risk control of the login ip!', 172 | action='store_true', 173 | ) 174 | parser.add_argument( 175 | '-v', 176 | '--verbose', 177 | help='Show exception traceback.', 178 | action='store_true', 179 | ) 180 | args, _ = parser.parse_known_args() 181 | __show_verbose = args.verbose 182 | 183 | Console.debug_b(''', Mode: {}, Engine: {} 184 | '''.format('server' if args.server else 'cli', 'turbo' if args.api else 'free')) 185 | 186 | if args.api: 187 | try: 188 | from .openai.token import gpt_num_tokens 189 | from .migrations.migrate import do_migrate 190 | 191 | do_migrate() 192 | except (ImportError, ModuleNotFoundError): 193 | Console.error_bh('### You need `pip install Pandora-ChatGPT[api]` to support API mode.') 194 | return 195 | 196 | access_tokens = parse_access_tokens(args.tokens_file, args.api) if args.tokens_file else None 197 | 198 | if not access_tokens: 199 | access_token, need_save = confirm_access_token(args.token_file, args.server, args.api) 200 | if not access_token: 201 | Console.info_b('Please enter your email and password to log in ChatGPT!') 202 | if not args.local: 203 | Console.warn('We login via {}'.format(api_prefix)) 204 | 205 | email = getenv('OPENAI_EMAIL') or Prompt.ask(' Email') 206 | password = getenv('OPENAI_PASSWORD') or Prompt.ask(' Password', password=True) 207 | mfa = getenv('OPENAI_MFA_CODE') or Prompt.ask(' MFA Code(Optional if not set)') 208 | Console.warn('### Do login, please wait...') 209 | access_token = Auth0(email, password, args.proxy, mfa=mfa).auth(args.local) 210 | 211 | if not check_access_token_out(access_token, args.api): 212 | return 213 | 214 | if need_save: 215 | if args.server or Confirm.ask('Do you want to save your access token for the next login?', default=True): 216 | save_access_token(access_token) 217 | 218 | access_tokens = {'default': access_token} 219 | 220 | if args.api: 221 | from .turbo.chat import TurboGPT 222 | 223 | chatgpt = TurboGPT(access_tokens, args.proxy) 224 | else: 225 | chatgpt = ChatGPT(access_tokens, args.proxy) 226 | 227 | if args.server: 228 | return ChatBotServer(chatgpt, args.verbose).run(args.server, args.threads) 229 | 230 | ChatBotLegacy(chatgpt).run() 231 | 232 | 233 | def run(): 234 | hook_except_handle() 235 | 236 | try: 237 | main() 238 | except Exception as e: 239 | Console.error_bh('### Error occurred: ' + str(e)) 240 | 241 | if __show_verbose: 242 | logger.exception('Exception occurred.') 243 | -------------------------------------------------------------------------------- /src/pandora/flask/static/_next/static/chunks/174-bd28069f281ef76f.js: -------------------------------------------------------------------------------- 1 | (self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[174],{63561:function(e,t){"use strict";t.Z=function(e,t,i){return t in e?Object.defineProperty(e,t,{value:i,enumerable:!0,configurable:!0,writable:!0}):e[t]=i,e}},68561:function(e,t,i){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=i(63561).Z,o=i(95781).Z,a=i(89478).Z;Object.defineProperty(t,"__esModule",{value:!0}),t.default=function(e){var t,i,l=e.src,c=e.sizes,h=e.unoptimized,p=void 0!==h&&h,w=e.priority,k=void 0!==w&&w,E=e.loading,I=e.lazyRoot,R=e.lazyBoundary,_=e.className,L=e.quality,q=e.width,C=e.height,O=e.style,N=e.objectFit,P=e.objectPosition,W=e.onLoadingComplete,B=e.placeholder,M=void 0===B?"empty":B,Z=e.blurDataURL,D=s(e,["src","sizes","unoptimized","priority","loading","lazyRoot","lazyBoundary","className","quality","width","height","style","objectFit","objectPosition","onLoadingComplete","placeholder","blurDataURL"]),U=d.useContext(m.ImageConfigContext),V=d.useMemo(function(){var e=y||U||f.imageConfigDefault,t=a(e.deviceSizes).concat(a(e.imageSizes)).sort(function(e,t){return e-t}),i=e.deviceSizes.sort(function(e,t){return e-t});return r({},e,{allSizes:t,deviceSizes:i})},[U]),F=c?"responsive":"intrinsic";"layout"in D&&(D.layout&&(F=D.layout),delete D.layout);var H=x;if("loader"in D){if(D.loader){var G=D.loader;H=function(e){e.config;var t=s(e,["config"]);return G(t)}}delete D.loader}var T="";if(function(e){var t;return"object"==typeof e&&(z(e)||void 0!==e.src)}(l)){var J=z(l)?l.default:l;if(!J.src)throw Error("An object should only be passed to the image component src parameter if it comes from a static image import. It must include src. Received ".concat(JSON.stringify(J)));if(Z=Z||J.blurDataURL,T=J.src,(!F||"fill"!==F)&&(C=C||J.height,q=q||J.width,!J.height||!J.width))throw Error("An object should only be passed to the image component src parameter if it comes from a static image import. It must include height and width. Received ".concat(JSON.stringify(J)))}l="string"==typeof l?l:T;var Q=!k&&("lazy"===E||void 0===E);(l.startsWith("data:")||l.startsWith("blob:"))&&(p=!0,Q=!1),b.has(l)&&(Q=!1),V.unoptimized&&(p=!0);var K=o(d.useState(!1),2),X=K[0],Y=K[1],$=o(g.useIntersection({rootRef:void 0===I?null:I,rootMargin:R||"200px",disabled:!Q}),3),ee=$[0],et=$[1],ei=$[2],en=!Q||et,eo={boxSizing:"border-box",display:"block",overflow:"hidden",width:"initial",height:"initial",background:"none",opacity:1,border:0,margin:0,padding:0},ea={boxSizing:"border-box",display:"block",width:"initial",height:"initial",background:"none",opacity:1,border:0,margin:0,padding:0},er=!1,el=A(q),ec=A(C),es=A(L),ed=Object.assign({},O,{position:"absolute",top:0,left:0,bottom:0,right:0,boxSizing:"border-box",padding:0,border:"none",margin:"auto",display:"block",width:0,height:0,minWidth:"100%",maxWidth:"100%",minHeight:"100%",maxHeight:"100%",objectFit:N,objectPosition:P}),eu="blur"!==M||X?{}:{backgroundSize:N||"cover",backgroundPosition:P||"0% 0%",filter:"blur(20px)",backgroundImage:'url("'.concat(Z,'")')};if("fill"===F)eo.display="block",eo.position="absolute",eo.top=0,eo.left=0,eo.bottom=0,eo.right=0;else if(void 0!==el&&void 0!==ec){var ef=ec/el,eg=isNaN(ef)?"100%":"".concat(100*ef,"%");"responsive"===F?(eo.display="block",eo.position="relative",er=!0,ea.paddingTop=eg):"intrinsic"===F?(eo.display="inline-block",eo.position="relative",eo.maxWidth="100%",er=!0,ea.maxWidth="100%",t="data:image/svg+xml,%3csvg%20xmlns=%27http://www.w3.org/2000/svg%27%20version=%271.1%27%20width=%27".concat(el,"%27%20height=%27").concat(ec,"%27/%3e")):"fixed"===F&&(eo.display="inline-block",eo.position="relative",eo.width=el,eo.height=ec)}var em={src:v,srcSet:void 0,sizes:void 0};en&&(em=S({config:V,src:l,unoptimized:p,layout:F,width:el,quality:es,sizes:c,loader:H}));var eh=l,ep="imagesizes";ep="imageSizes";var ey=(n(i={},"imageSrcSet",em.srcSet),n(i,ep,em.sizes),n(i,"crossOrigin",D.crossOrigin),i),eb=d.default.useLayoutEffect,ev=d.useRef(W),ew=d.useRef(l);d.useEffect(function(){ev.current=W},[W]),eb(function(){ew.current!==l&&(ei(),ew.current=l)},[ei,l]);var ez=r({isLazy:Q,imgAttributes:em,heightInt:ec,widthInt:el,qualityInt:es,layout:F,className:_,imgStyle:ed,blurStyle:eu,loading:E,config:V,unoptimized:p,placeholder:M,loader:H,srcString:eh,onLoadingCompleteRef:ev,setBlurComplete:Y,setIntersection:ee,isVisible:en,noscriptSizes:c},D);return d.default.createElement(d.default.Fragment,null,d.default.createElement("span",{style:eo},er?d.default.createElement("span",{style:ea},t?d.default.createElement("img",{style:{display:"block",maxWidth:"100%",width:"initial",height:"initial",background:"none",opacity:1,border:0,margin:0,padding:0},alt:"","aria-hidden":!0,src:t}):null):null,d.default.createElement(j,Object.assign({},ez))),k?d.default.createElement(u.default,null,d.default.createElement("link",Object.assign({key:"__nimg-"+em.src+em.srcSet+em.sizes,rel:"preload",as:"image",href:em.srcSet?void 0:em.src},ey))):null)};var r=i(17858).Z,l=i(16922).Z,c=i(86905).Z,s=i(31080).Z,d=c(i(70079)),u=l(i(76109)),f=i(60239),g=i(26790),m=i(94136);i(13279);var h=i(5189);function p(e){return"/"===e[0]?e.slice(1):e}var y={deviceSizes:[640,750,828,1080,1200,1920,2048,3840],imageSizes:[16,32,48,64,96,128,256,384],path:"/_next/image",loader:"default",dangerouslyAllowSVG:!1,unoptimized:!1},b=new Set,v="data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7",w=new Map([["default",function(e){var t=e.config,i=e.src,n=e.width,o=e.quality;return i.endsWith(".svg")&&!t.dangerouslyAllowSVG?i:"".concat(h.normalizePathTrailingSlash(t.path),"?url=").concat(encodeURIComponent(i),"&w=").concat(n,"&q=").concat(o||75)}],["imgix",function(e){var t=e.config,i=e.src,n=e.width,o=e.quality,a=new URL("".concat(t.path).concat(p(i))),r=a.searchParams;return r.set("auto",r.getAll("auto").join(",")||"format"),r.set("fit",r.get("fit")||"max"),r.set("w",r.get("w")||n.toString()),o&&r.set("q",o.toString()),a.href}],["cloudinary",function(e){var t,i=e.config,n=e.src,o=e.width,a=["f_auto","c_limit","w_"+o,"q_"+(e.quality||"auto")].join(",")+"/";return"".concat(i.path).concat(a).concat(p(n))}],["akamai",function(e){var t=e.config,i=e.src,n=e.width;return"".concat(t.path).concat(p(i),"?imwidth=").concat(n)}],["custom",function(e){var t=e.src;throw Error('Image with src "'.concat(t,'" is missing "loader" prop.')+"\nRead more: https://nextjs.org/docs/messages/next-image-missing-loader")}],]);function z(e){return void 0!==e.default}function S(e){var t=e.config,i=e.src,n=e.unoptimized,o=e.layout,r=e.width,l=e.quality,c=e.sizes,s=e.loader;if(n)return{src:i,srcSet:void 0,sizes:void 0};var d=function(e,t,i,n){var o=e.deviceSizes,r=e.allSizes;if(n&&("fill"===i||"responsive"===i)){for(var l=/(^|\s)(1?\d?\d)vw/g,c=[];s=l.exec(n);s)c.push(parseInt(s[2]));if(c.length){var s,d,u=.01*(d=Math).min.apply(d,a(c));return{widths:r.filter(function(e){return e>=o[0]*u}),kind:"w"}}return{widths:r,kind:"w"}}return"number"!=typeof t||"fill"===i||"responsive"===i?{widths:o,kind:"w"}:{widths:a(new Set([t,2*t].map(function(e){return r.find(function(t){return t>=e})||r[r.length-1]}))),kind:"x"}}(t,r,o,c),u=d.widths,f=d.kind,g=u.length-1;return{sizes:c||"w"!==f?c:"100vw",srcSet:u.map(function(e,n){return"".concat(s({config:t,src:i,quality:l,width:e})," ").concat("w"===f?e:n+1).concat(f)}).join(", "),src:s({config:t,src:i,quality:l,width:u[g]})}}function A(e){return"number"==typeof e?e:"string"==typeof e?parseInt(e,10):void 0}function x(e){var t,i=(null==(t=e.config)?void 0:t.loader)||"default",n=w.get(i);if(n)return n(e);throw Error('Unknown "loader" found in "next.config.js". Expected: '.concat(f.VALID_LOADERS.join(", "),". Received: ").concat(i))}function k(e,t,i,n,o,a){e&&e.src!==v&&e["data-loaded-src"]!==t&&(e["data-loaded-src"]=t,("decode"in e?e.decode():Promise.resolve()).catch(function(){}).then(function(){if(e.parentNode&&(b.add(t),"blur"===n&&a(!0),null==o?void 0:o.current)){var i=e.naturalWidth,r=e.naturalHeight;o.current({naturalWidth:i,naturalHeight:r})}}))}var j=function(e){var t=e.imgAttributes,i=(e.heightInt,e.widthInt),n=e.qualityInt,o=e.layout,a=e.className,l=e.imgStyle,c=e.blurStyle,u=e.isLazy,f=e.placeholder,g=e.loading,m=e.srcString,h=e.config,p=e.unoptimized,y=e.loader,b=e.onLoadingCompleteRef,v=e.setBlurComplete,w=e.setIntersection,z=e.onLoad,A=e.onError,x=(e.isVisible,e.noscriptSizes),j=s(e,["imgAttributes","heightInt","widthInt","qualityInt","layout","className","imgStyle","blurStyle","isLazy","placeholder","loading","srcString","config","unoptimized","loader","onLoadingCompleteRef","setBlurComplete","setIntersection","onLoad","onError","isVisible","noscriptSizes"]);return g=u?"lazy":g,d.default.createElement(d.default.Fragment,null,d.default.createElement("img",Object.assign({},j,t,{decoding:"async","data-nimg":o,className:a,style:r({},l,c),ref:d.useCallback(function(e){w(e),(null==e?void 0:e.complete)&&k(e,m,o,f,b,v)},[w,m,o,f,b,v,]),onLoad:function(e){k(e.currentTarget,m,o,f,b,v),z&&z(e)},onError:function(e){"blur"===f&&v(!0),A&&A(e)}})),(u||"blur"===f)&&d.default.createElement("noscript",null,d.default.createElement("img",Object.assign({},j,S({config:h,src:m,unoptimized:p,layout:o,width:i,quality:n,sizes:x,loader:y}),{decoding:"async","data-nimg":o,style:l,className:a,loading:g}))))};("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},96424:function(e,t,i){e.exports=i(68561)}}]); -------------------------------------------------------------------------------- /src/pandora/openai/auth.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import datetime 4 | import re 5 | from datetime import datetime as dt 6 | from urllib.parse import urlparse, parse_qs 7 | 8 | import requests 9 | from certifi import where 10 | 11 | from ..exts.config import default_api_prefix 12 | 13 | 14 | class Auth0: 15 | def __init__(self, email: str, password: str, proxy: str = None, use_cache: bool = True, mfa: str = None): 16 | self.session_token = None 17 | self.email = email 18 | self.password = password 19 | self.use_cache = use_cache 20 | self.mfa = mfa 21 | self.session = requests.Session() 22 | self.req_kwargs = { 23 | 'proxies': { 24 | 'http': proxy, 25 | 'https': proxy, 26 | } if proxy else None, 27 | 'verify': where(), 28 | 'timeout': 100, 29 | } 30 | self.access_token = None 31 | self.refresh_token = None 32 | self.expires = None 33 | self.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) ' \ 34 | 'Chrome/109.0.0.0 Safari/537.36' 35 | 36 | @staticmethod 37 | def __check_email(email: str): 38 | regex = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,7}\b' 39 | return re.fullmatch(regex, email) 40 | 41 | def auth(self, login_local=False) -> str: 42 | if self.use_cache and self.access_token and self.expires and self.expires > dt.now(): 43 | return self.access_token 44 | 45 | if not self.__check_email(self.email) or not self.password: 46 | raise Exception('invalid email or password.') 47 | 48 | return self.__part_one() if login_local else self.get_access_token_proxy() 49 | 50 | def get_refresh_token(self): 51 | return self.refresh_token 52 | 53 | def __part_one(self) -> str: 54 | url = '{}/auth/preauth'.format(default_api_prefix()) 55 | resp = self.session.get(url, allow_redirects=False, **self.req_kwargs) 56 | 57 | if resp.status_code == 200: 58 | json = resp.json() 59 | if 'preauth_cookie' not in json or not json['preauth_cookie']: 60 | raise Exception('Get preauth cookie failed.') 61 | 62 | return self.__part_two(json['preauth_cookie']) 63 | else: 64 | raise Exception('Error request preauth.') 65 | 66 | def __part_two(self, preauth: str) -> str: 67 | code_challenge = 'w6n3Ix420Xhhu-Q5-mOOEyuPZmAsJHUbBpO8Ub7xBCY' 68 | code_verifier = 'yGrXROHx_VazA0uovsxKfE263LMFcrSrdm4SlC-rob8' 69 | 70 | url = 'https://auth0.openai.com/authorize?client_id=pdlLIX2Y72MIl2rhLhTE9VV9bN905kBh&audience=https%3A%2F' \ 71 | '%2Fapi.openai.com%2Fv1&redirect_uri=com.openai.chat%3A%2F%2Fauth0.openai.com%2Fios%2Fcom.openai.chat' \ 72 | '%2Fcallback&scope=openid%20email%20profile%20offline_access%20model.request%20model.read' \ 73 | '%20organization.read%20offline&response_type=code&code_challenge={}' \ 74 | '&code_challenge_method=S256&prompt=login&preauth_cookie={}'.format(code_challenge, preauth) 75 | return self.__part_three(code_verifier, url) 76 | 77 | def __part_three(self, code_verifier, url: str) -> str: 78 | headers = { 79 | 'User-Agent': self.user_agent, 80 | 'Referer': 'https://ios.chat.openai.com/', 81 | } 82 | resp = self.session.get(url, headers=headers, allow_redirects=True, **self.req_kwargs) 83 | 84 | if resp.status_code == 200: 85 | try: 86 | url_params = parse_qs(urlparse(resp.url).query) 87 | state = url_params['state'][0] 88 | return self.__part_four(code_verifier, state) 89 | except IndexError as exc: 90 | raise Exception('Rate limit hit.') from exc 91 | else: 92 | raise Exception('Error request login url.') 93 | 94 | def __part_four(self, code_verifier: str, state: str) -> str: 95 | url = 'https://auth0.openai.com/u/login/identifier?state=' + state 96 | headers = { 97 | 'User-Agent': self.user_agent, 98 | 'Referer': url, 99 | 'Origin': 'https://auth0.openai.com', 100 | } 101 | data = { 102 | 'state': state, 103 | 'username': self.email, 104 | 'js-available': 'true', 105 | 'webauthn-available': 'true', 106 | 'is-brave': 'false', 107 | 'webauthn-platform-available': 'false', 108 | 'action': 'default', 109 | } 110 | resp = self.session.post(url, headers=headers, data=data, allow_redirects=False, **self.req_kwargs) 111 | 112 | if resp.status_code == 302: 113 | return self.__part_five(code_verifier, state) 114 | else: 115 | raise Exception('Error check email.') 116 | 117 | def __part_five(self, code_verifier: str, state: str) -> str: 118 | url = 'https://auth0.openai.com/u/login/password?state=' + state 119 | headers = { 120 | 'User-Agent': self.user_agent, 121 | 'Referer': url, 122 | 'Origin': 'https://auth0.openai.com', 123 | } 124 | data = { 125 | 'state': state, 126 | 'username': self.email, 127 | 'password': self.password, 128 | 'action': 'default', 129 | } 130 | 131 | resp = self.session.post(url, headers=headers, data=data, allow_redirects=False, **self.req_kwargs) 132 | if resp.status_code == 302: 133 | location = resp.headers['Location'] 134 | if not location.startswith('/authorize/resume?'): 135 | raise Exception('Login failed.') 136 | 137 | return self.__part_six(code_verifier, location, url) 138 | 139 | if resp.status_code == 400: 140 | raise Exception('Wrong email or password.') 141 | else: 142 | raise Exception('Error login.') 143 | 144 | def __part_six(self, code_verifier: str, location: str, ref: str) -> str: 145 | url = 'https://auth0.openai.com' + location 146 | headers = { 147 | 'User-Agent': self.user_agent, 148 | 'Referer': ref, 149 | } 150 | 151 | resp = self.session.get(url, headers=headers, allow_redirects=False, **self.req_kwargs) 152 | if resp.status_code == 302: 153 | location = resp.headers['Location'] 154 | if location.startswith('/u/mfa-otp-challenge?'): 155 | if not self.mfa: 156 | raise Exception('MFA required.') 157 | return self.__part_seven(code_verifier, location) 158 | 159 | if not location.startswith('com.openai.chat://auth0.openai.com/ios/com.openai.chat/callback?'): 160 | raise Exception('Login callback failed.') 161 | 162 | return self.get_access_token(code_verifier, resp.headers['Location']) 163 | 164 | raise Exception('Error login.') 165 | 166 | def __part_seven(self, code_verifier: str, location: str) -> str: 167 | url = 'https://auth0.openai.com' + location 168 | data = { 169 | 'state': parse_qs(urlparse(url).query)['state'][0], 170 | 'code': self.mfa, 171 | 'action': 'default', 172 | } 173 | headers = { 174 | 'User-Agent': self.user_agent, 175 | 'Referer': url, 176 | 'Origin': 'https://auth0.openai.com', 177 | } 178 | 179 | resp = self.session.post(url, headers=headers, data=data, allow_redirects=False, **self.req_kwargs) 180 | if resp.status_code == 302: 181 | location = resp.headers['Location'] 182 | if not location.startswith('/authorize/resume?'): 183 | raise Exception('MFA failed.') 184 | 185 | return self.__part_six(code_verifier, location, url) 186 | 187 | if resp.status_code == 400: 188 | raise Exception('Wrong MFA code.') 189 | else: 190 | raise Exception('Error login.') 191 | 192 | def __parse_access_token(self, resp): 193 | if resp.status_code == 200: 194 | json = resp.json() 195 | if 'access_token' not in json: 196 | raise Exception('Get access token failed, maybe you need a proxy.') 197 | 198 | if 'refresh_token' in json: 199 | self.refresh_token = json['refresh_token'] 200 | 201 | self.access_token = json['access_token'] 202 | self.expires = dt.utcnow() + datetime.timedelta(seconds=json['expires_in']) - datetime.timedelta(minutes=5) 203 | return self.access_token 204 | else: 205 | raise Exception(resp.text) 206 | 207 | def get_access_token(self, code_verifier: str, callback_url: str) -> str: 208 | url_params = parse_qs(urlparse(callback_url).query) 209 | 210 | if 'error' in url_params: 211 | error = url_params['error'][0] 212 | error_description = url_params['error_description'][0] if 'error_description' in url_params else '' 213 | raise Exception('{}: {}'.format(error, error_description)) 214 | 215 | if 'code' not in url_params: 216 | raise Exception('Error get code from callback url.') 217 | 218 | url = 'https://auth0.openai.com/oauth/token' 219 | headers = { 220 | 'User-Agent': self.user_agent, 221 | } 222 | data = { 223 | 'redirect_uri': 'com.openai.chat://auth0.openai.com/ios/com.openai.chat/callback', 224 | 'grant_type': 'authorization_code', 225 | 'client_id': 'pdlLIX2Y72MIl2rhLhTE9VV9bN905kBh', 226 | 'code': url_params['code'][0], 227 | 'code_verifier': code_verifier, 228 | } 229 | resp = self.session.post(url, headers=headers, json=data, allow_redirects=False, **self.req_kwargs) 230 | 231 | return self.__parse_access_token(resp) 232 | 233 | def get_access_token_proxy(self) -> str: 234 | url = '{}/auth/login'.format(default_api_prefix()) 235 | headers = { 236 | 'User-Agent': self.user_agent, 237 | } 238 | data = { 239 | 'username': self.email, 240 | 'password': self.password, 241 | 'mfa_code': self.mfa, 242 | } 243 | resp = self.session.post(url=url, headers=headers, data=data, allow_redirects=False, **self.req_kwargs) 244 | 245 | return self.__parse_access_token(resp) 246 | -------------------------------------------------------------------------------- /doc/fakeopen.md: -------------------------------------------------------------------------------- 1 | # fakeopen api 文档 2 | 3 | ## 目录 4 | 5 | - [基本信息](#%E5%9F%BA%E6%9C%AC%E4%BF%A1%E6%81%AF) 6 | - [ChatGPT 相关](#chatgpt-%E7%9B%B8%E5%85%B3) 7 | - [基础信息](#%E5%9F%BA%E7%A1%80%E4%BF%A1%E6%81%AF) 8 | - [接口列表](#%E6%8E%A5%E5%8F%A3%E5%88%97%E8%A1%A8) 9 | - [1. /api/conversation](#1-apiconversation) 10 | - [2. /api/models](#2-apimodels) 11 | - [3. /api/conversations](#3-apiconversations) 12 | - [4. /api/conversation/\](#4-apiconversationconversation-id) 13 | - [5. /api/conversation/\](#5-apiconversationconversation-id) 14 | - [6. /api/conversations](#6-apiconversations) 15 | - [OpenAI API 相关](#openai-api-%E7%9B%B8%E5%85%B3) 16 | - [基础信息](#%E5%9F%BA%E7%A1%80%E4%BF%A1%E6%81%AF-1) 17 | - [接口列表](#%E6%8E%A5%E5%8F%A3%E5%88%97%E8%A1%A8-1) 18 | - [1. /v1/chat/completions](#1-v1chatcompletions) 19 | - [登录相关](#%E7%99%BB%E5%BD%95%E7%9B%B8%E5%85%B3) 20 | - [基础信息](#%E5%9F%BA%E7%A1%80%E4%BF%A1%E6%81%AF-2) 21 | - [接口列表](#%E6%8E%A5%E5%8F%A3%E5%88%97%E8%A1%A8-2) 22 | - [1. /auth/preauth](#1-authpreauth) 23 | - [2. /auth/login](#2-authlogin) 24 | - [3. /auth/refresh](#3-authrefresh) 25 | - [4. /auth/platform/login](#4-authplatformlogin) 26 | - [5. /auth/platform/refresh](#5-authplatformrefresh) 27 | - [Share Token 相关](#share-token-%E7%9B%B8%E5%85%B3) 28 | - [基础信息](#%E5%9F%BA%E7%A1%80%E4%BF%A1%E6%81%AF-3) 29 | - [接口列表](#%E6%8E%A5%E5%8F%A3%E5%88%97%E8%A1%A8-3) 30 | - [1. /token/info/\](#1-tokeninfoshare-token) 31 | - [2. /token/register](#2-tokenregister) 32 | - [Pool Token 相关](#pool-token-%E7%9B%B8%E5%85%B3) 33 | - [基础信息](#%E5%9F%BA%E7%A1%80%E4%BF%A1%E6%81%AF-4) 34 | - [接口列表](#%E6%8E%A5%E5%8F%A3%E5%88%97%E8%A1%A8-4) 35 | - [1. /pool/update](#1-poolupdate) 36 | 37 | 38 | ## 基本信息 39 | * Base URL: `https://ai.fakeopen.com` 40 | * 本服务也提供每日域名,格式为:`ai-.fakeopen.com`,如:`ai-20230913.fakeopen.com` 。 41 | * 本服务为 `Pandora` 的一部分,用于提供 `ChatGPT` / `OpenAI` 相关的接口。 42 | * **本服务完全免费,服务器及带宽均为自掏腰包,请大户手下留情,不要滥用。我和网友们谢谢你们了!** 43 | 44 | ## `ChatGPT` 相关 45 | 46 | ### 基础信息 47 | * 请求接口需要提供 `Authorization` 或 `X-Authorization` 头,值为 `Bearer ` 。 48 | * `Token` 可以是 `Access Token` 或 `Share Token` 。 49 | * **请求字段:** 请自行使用浏览器开发者工具查看。 50 | * **返回类型:** `application/json` 51 | * **返回字段:** 请自行使用浏览器开发者工具查看。 52 | * **报错信息:** 内容包含在 `detail` 字段中。 53 | 54 | ### 接口列表 55 | 56 | #### 1. `/api/conversation` 57 | * 对应 `https://chat.openai.com/backend-api/conversation` 的用法。 58 | * **接口描述:** 发送对话,获取回复。 59 | * **HTTP方法:** `POST` 60 | * **请求类型:** `application/json` 61 | * **返回字段:** 返回 `text/event-stream` 流式数据,需要自行解析。 62 | * **频率控制:** 根据 `IP` 地址 `3/10s` 限制,被限制时返回 `429` 错误码。 63 | 64 | #### 2. `/api/models` 65 | * 对应 `https://chat.openai.com/backend-api/models` 的用法。 66 | * **接口描述:** 列出账号可用的模型。 67 | * **HTTP方法:** `GET` 68 | * **频率控制:** 无。 69 | * **特别说明:** 可根据其中是否有 `GPT-4` 模型来判断账号是否为 `ChatGPT Plus` 。 70 | 71 | #### 3. `/api/conversations` 72 | * 对应 `https://chat.openai.com/backend-api/conversations` 的用法。 73 | * **接口描述:** 以分页方式列出会话列表。 74 | * **HTTP方法:** `GET` 75 | * **频率控制:** 无。 76 | * **特别说明:** 使用隔离会话 `Share Token` 时,会话列表中只会显示 `Share Token` 所属的会话。 77 | 78 | #### 4. `/api/conversation/` 79 | * 对应 `https://chat.openai.com/backend-api/conversation/` 的用法。 80 | * **接口描述:** 根据 `conversation id` 删除指定会话。 81 | * **HTTP方法:** `PUT` 82 | * **请求类型:** `application/json` 83 | * **请求字段:** ```{"is_visible": false}``` 84 | * **频率控制:** 无。 85 | 86 | #### 5. `/api/conversation/` 87 | * 对应 `https://chat.openai.com/backend-api/conversation/` 的用法。 88 | * **接口描述:** 根据 `conversation id` 修改会话标题。 89 | * **HTTP方法:** `PUT` 90 | * **请求类型:** `application/json` 91 | * **请求字段:** ```{"title": "New Title"}``` 92 | * **频率控制:** 无。 93 | 94 | #### 6. `/api/conversations` 95 | * 对应 `https://chat.openai.com/backend-api/conversations` 的用法。 96 | * **接口描述:** 清除所有会话。 97 | * **HTTP方法:** `PUT` 98 | * **请求类型:** `application/json` 99 | * **请求字段:** ```{"is_visible": false}``` 100 | * **频率控制:** 无。 101 | * **特别说明:** 使用隔离会话 `Share Token` 时,不可调用本接口。 102 | 103 | #### 更多接口请自行使用浏览器开发者工具查看。可在官方查看,也可以部署 `Pandora Cloud` 后查看。 104 | 105 | ## `OpenAI API` 相关 106 | 107 | ### 基础信息 108 | * 请求接口需要提供 `Authorization` 或 `X-Authorization` 头,值为 `Bearer ` 。 109 | * `Token` 可以是 `Access Token` 或 `sk-` 开头的官方 `API Key` ,**会扣官方额度**。 110 | * `Token` 可以时 `Share Token` 或 `Pool Token` ,此时为模拟接口,**不会扣官方额度**。 111 | * **官方文档:** https://platform.openai.com/docs/api-reference 112 | 113 | ### 接口列表 114 | 115 | #### 1. `/v1/chat/completions` 116 | * 对应 `https://api.openai.com/v1/chat/completions` 的用法。 117 | * **官方文档:** https://platform.openai.com/docs/api-reference/chat/create 118 | * **接口描述:** 发送对话,获取回复。可使用 `Share Token` 或 `Pool Token` 模拟免费调用。绝大多数 `OpenAI API` 客户端均支持。 119 | * **模型映射:** 120 | * `gpt-3.5-turbo` -> `text-davinci-002-render-sha`,真实长度为:`8K` 。 121 | * `gpt-3.5-turbo-0301` -> `text-davinci-002-render-sha`,真实长度为:`8K` 。 122 | * `gpt-3.5-turbo-0613` -> `text-davinci-002-render-sha`,真实长度为:`8K` 。 123 | * `gpt-3.5-turbo-16k` -> `text-davinci-002-render-sha`,真实长度为:`8K` 。 124 | * `gpt-3.5-turbo-16k-0613` -> `text-davinci-002-render-sha`,真实长度为:`8K` 。 125 | * `gpt-4` -> `gpt-4`,真实长度为:`4K` 。 126 | * `gpt-4-0314` -> `gpt-4`,真实长度为:`4K` 。 127 | * `gpt-4-0613` -> `gpt-4`,真实长度为:`4K` 。 128 | * `gpt-4-32k` -> `gpt-4-plugins`,真实长度为:`8K` 。 129 | * `gpt-4-32k-0314` -> `gpt-4-plugins`,真实长度为:`8K` 。 130 | * `gpt-4-32k-0613` -> `gpt-4-plugins`,真实长度为:`8K` 。 131 | * **请求字段:** 同官方,但不保证支持以下字段: 132 | * `functions` 133 | * `function_call` 134 | * `temperature` 135 | * `top_p` 136 | * `n` 137 | * `stop` 138 | * `max_tokens` 139 | * `presence_penalty` 140 | * `frequency_penalty` 141 | * `logit_bias` 142 | * `user` 143 | * **返回字段:** 返回 `text/event-stream` 流式数据,需要自行解析。 144 | * **频率控制:** 根据 `IP` 地址 `3/10s` 限制,被限制时返回 `429` 错误码。 145 | * **特别说明:** 146 | * 扣官方额度时,只存在官方频率限制。 147 | * 官方 `ChatGPT` 存在同时只能有 `1` 个会话的限制,建议使用多个账号组合 `Pool Token` 来解决。 148 | 149 | #### 其余官方接口均直接转发到官方,不做任何处理。 150 | 151 | ## 登录相关 152 | 153 | ### 基础信息 154 | * 登录相关接口并不支持 `Google` / `Microsoft` 等第三方登录。 155 | * 可以在 https://ai.fakeopen.com/auth 操作界面中进行同等操作。 156 | * **请务必仅用来操作自己的账号!!!**,撞号后果自负,且会导致拉黑 `IP` 或 `ASN` 。 157 | * **返回类型:** `application/json` 158 | * **报错信息:** 内容包含在 `detail` 字段中。 159 | 160 | ### 接口列表 161 | 162 | #### 1. `/auth/preauth` 163 | * **接口描述:** 获取登录预授权,具体见:https://zhile.io/2023/05/19/how-to-get-chatgpt-access-token-via-pkce.html 164 | * **HTTP方法:** `GET` 165 | * **频率控制:** 无。 166 | 167 | #### 2. `/auth/login` 168 | * **接口描述:** 使用账号信息登录,获取供 [ChatGPT](https://chat.openai.com) 使用的 `Access Token` 等信息。 169 | * **HTTP方法:** `POST` 170 | * **请求类型:** `application/x-www-form-urlencoded` 171 | * **请求字段:** 172 | * `username`:`ChatGPT` 账号。 173 | * `password`:`ChatGPT` 密码。 174 | * `mfa_code`:开启二次验证,需要提供。否则不需要。 175 | * **返回字段:** 返回 `Access Token` 和 `Refresh Token` 等信息。 176 | * **频率控制:** 根据IP地址 `6/1m` 限制,被限制时返回 `429` 错误码。 177 | * **特别说明:** 可直接调用,无需先调用**获取登录预授权**接口。也无需支持国家的梯子。 178 | 179 | #### 3. `/auth/refresh` 180 | * **接口描述:** 使用 `Refresh Token` 获取供 [ChatGPT](https://chat.openai.com) 使用的 `Access Token` 等信息。 181 | * **HTTP方法:** `POST` 182 | * **请求类型:** `application/x-www-form-urlencoded` 183 | * **请求字段:** 184 | * `refresh_token`:`ChatGPT` 的 `Refresh Token`。 185 | * **返回字段:** 返回 `Access Token` 等信息。 186 | * **频率控制:** 无。 187 | 188 | #### 4. `/auth/platform/login` 189 | * **接口描述:** 使用账号信息登录,获取供 [Platform](https://platform.openai.com) 使用的 `Access Token` 等信息,用做获取用度、账单等信息。 190 | * **HTTP方法:** `POST` 191 | * **请求类型:** `application/x-www-form-urlencoded` 192 | * **请求字段:** 193 | * `username`:`ChatGPT` 账号。 194 | * `password`:`ChatGPT` 密码。 195 | * `mfa_code`:开启二次验证,需要提供。否则不需要。 196 | * **返回字段:** 返回 `Access Token` 和 `Refresh Token` 等信息。 197 | * **频率控制:** 根据IP地址 `6/1m` 限制,被限制时返回 `429` 错误码。 198 | 199 | #### 5. `/auth/platform/refresh` 200 | * **接口描述:** 使用 `Refresh Token` 获取供 [Platform](https://platform.openai.com) 使用的 `Access Token` 等信息,用做获取用度、账单等信息。 201 | * **HTTP方法:** `POST` 202 | * **请求类型:** `application/x-www-form-urlencoded` 203 | * **请求字段:** 204 | * `refresh_token`:`Platform` 的 `Refresh Token`。 205 | * **返回字段:** 返回 `Access Token` 等信息。 206 | * **频率控制:** 无。 207 | 208 | ## Share Token 相关 209 | 210 | ### 基础信息 211 | * 基本格式为:`fk-[0-9a-zA-Z_\-]{43}` ,长度为:`46` 。 212 | * 使用该功能可以实现多人共享一个账号,可以进行会话隔离。 213 | * 可以在共享账号是隐藏 `邮箱` 等账号信息,防止被撞号。 214 | * 可以在共享账号时隐藏 `Access Token` ,官方 `Access Token` 在有效期内无法吊销,泄露损失很大。 215 | * 可以灵活控制 `Share Token` 的有效期,过期后会自动失效。也可随时手动吊销。 216 | * 可以限制 `Share Token` 使用的站点,防止被滥用。 217 | * 可以在 https://ai.fakeopen.com/token 操作界面中进行同等操作。 218 | * 既可以使用在 `ChatGPT` 上,也可以使用在模拟 `OpenAI API` 的调用上。 219 | * **返回类型:** `application/json` 220 | * **报错信息:** 内容包含在 `detail` 字段中。 221 | 222 | ### 接口列表 223 | 224 | #### 1. `/token/info/` 225 | * **接口描述:** 获取 `Share Token` 的详细信息。 226 | * **HTTP方法:** `GET` 227 | * **返回字段:** 返回 `Share Token` 所的详细信息。 228 | * **频率控制:** 无。 229 | * **特别说明:** 230 | * `Authorization` 可选,值为 `Bearer ` 。 231 | * 若提供有效 `Share Token` 注册用户的 `Access Token` ,则返回 `Share Token` 对各个模型的**当日**用度信息。 232 | * 此开共享 `ChatGPT Plus` 车必备统计数据功能。 233 | 234 | #### 2. `/token/register` 235 | * **接口描述:** 注册或更新 `Share Token` 。 236 | * **HTTP方法:** `POST` 237 | * **请求类型:** `application/x-www-form-urlencoded` 238 | * **请求字段:** 239 | * `unique_name`:一个唯一的名字,这里要注意相同 `unique_name` 和 `access_token` **始终生成相同**的 `Share Token` 。 240 | * `access_token`:`ChatGPT` 账号的 `Access Token` 。 241 | * `site_limit`:限制 `Share Token` 使用的站点,格式为:`https://xxx.yyy.com`,可留空不作限制。 242 | * `expires_in`:`Share Token` 的有效期,单位为:`秒`,为 `0` 时表示与 `Access Token` 同效,为 `-1` 时吊销 `Share Token` 。 243 | * `show_conversations`:是否进行会话隔离,`true` 或 `false` ,默认为 `false` 。 244 | * `show_userinfo`:是否隐藏 `邮箱` 等账号信息,`true` 或 `false` ,默认为 `false` 。 245 | * **返回字段:** 返回 `Share Token` 等信息。 246 | * **频率控制:** 无。 247 | 248 | ## Pool Token 相关 249 | 250 | ### 基础信息 251 | * 基本格式为:`pk-[0-9a-zA-Z_\-]{43}` ,长度为:`46` 。 252 | * 使用该功能可以将最多 `100` 个 `Share Token` 组合在一起。 253 | * 使用组合的 `Pool Token` 时会自动轮转,突破 `ChatGPT` 同时只能有 `1` 个会话的限制。 254 | * 可以在 https://ai.fakeopen.com/pool 操作界面中进行同等操作。 255 | * 仅可以使用在模拟 `OpenAI API` 的调用上。 256 | * `pk-this-is-a-real-free-pool-token-for-everyone` 是一个可用的共享 `Pool Token` ,容量有几千个 `Share Token` 。**感谢社区热心人士提供。** 257 | * **返回类型:** `application/json` 258 | * **报错信息:** 内容包含在 `detail` 字段中。 259 | 260 | ### 接口列表 261 | 262 | #### 1. `/pool/update` 263 | * **接口描述:** 注册或更新 `Pool Token` 。 264 | * **HTTP方法:** `POST` 265 | * **请求类型:** `application/x-www-form-urlencoded` 266 | * **请求字段:** 267 | * `share_tokens`:`Share Token` 列表,每行 `1` 个,最多 `100` 个。 268 | * `pool_token`:`Pool Token` ,可留空,留空时生成新 `Pool Token` 。不为空则更新 `Pool Token` 。 269 | * **返回字段:** 返回 `Pool Token` 等信息。 270 | * **频率控制:** 无。 271 | * **特别说明:** `share_tokens` 为空,且 `pool_token` 不为空时,会吊销指定的 `Pool Token` 。 272 | -------------------------------------------------------------------------------- /src/pandora/flask/static/_next/static/chunks/762-222df1028c0c1555.js: -------------------------------------------------------------------------------- 1 | "use strict";(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[762],{10538:function(t,e,r){/** 2 | * @license React 3 | * use-sync-external-store-shim.production.min.js 4 | * 5 | * Copyright (c) Facebook, Inc. and its affiliates. 6 | * 7 | * This source code is licensed under the MIT license found in the 8 | * LICENSE file in the root directory of this source tree. 9 | */ var s=r(70079),n="function"==typeof Object.is?Object.is:function(t,e){return t===e&&(0!==t||1/t==1/e)||t!=t&&e!=e},i=s.useState,u=s.useEffect,o=s.useLayoutEffect,c=s.useDebugValue;function l(t){var e=t.getSnapshot;t=t.value;try{var r=e();return!n(t,r)}catch(s){return!0}}function a(t,e){return e()}var h="undefined"==typeof window||void 0===window.document||void 0===window.document.createElement?a:function(t,e){var r=e(),s=i({inst:{value:r,getSnapshot:e}}),n=s[0].inst,a=s[1];return o(function(){n.value=r,n.getSnapshot=e,l(n)&&a({inst:n})},[t,r,e]),u(function(){return l(n)&&a({inst:n}),t(function(){l(n)&&a({inst:n})})},[t]),c(r),r};e.useSyncExternalStore=void 0!==s.useSyncExternalStore?s.useSyncExternalStore:h},31178:function(t,e,r){t.exports=r(10538)},89335:function(t,e,r){r.d(e,{z:function(){return c}});var s=r(49043),n=r(42422),i=r(31406),u=r(94521),o=r(99695);class c extends u.l{constructor(t,e){super(),this.client=t,this.options=e,this.trackedProps=new Set,this.selectError=null,this.bindMethods(),this.setOptions(e)}bindMethods(){this.remove=this.remove.bind(this),this.refetch=this.refetch.bind(this)}onSubscribe(){1===this.listeners.length&&(this.currentQuery.addObserver(this),l(this.currentQuery,this.options)&&this.executeFetch(),this.updateTimers())}onUnsubscribe(){this.listeners.length||this.destroy()}shouldFetchOnReconnect(){return a(this.currentQuery,this.options,this.options.refetchOnReconnect)}shouldFetchOnWindowFocus(){return a(this.currentQuery,this.options,this.options.refetchOnWindowFocus)}destroy(){this.listeners=[],this.clearStaleTimeout(),this.clearRefetchInterval(),this.currentQuery.removeObserver(this)}setOptions(t,e){let r=this.options,n=this.currentQuery;if(this.options=this.client.defaultQueryOptions(t),(0,s.VS)(r,this.options)||this.client.getQueryCache().notify({type:"observerOptionsUpdated",query:this.currentQuery,observer:this}),void 0!==this.options.enabled&&"boolean"!=typeof this.options.enabled)throw Error("Expected enabled to be a boolean");this.options.queryKey||(this.options.queryKey=r.queryKey),this.updateQuery();let i=this.hasListeners();i&&h(this.currentQuery,n,this.options,r)&&this.executeFetch(),this.updateResult(e),i&&(this.currentQuery!==n||this.options.enabled!==r.enabled||this.options.staleTime!==r.staleTime)&&this.updateStaleTimeout();let u=this.computeRefetchInterval();i&&(this.currentQuery!==n||this.options.enabled!==r.enabled||u!==this.currentRefetchInterval)&&this.updateRefetchInterval(u)}getOptimisticResult(t){let e=this.client.getQueryCache().build(this.client,t);return this.createResult(e,t)}getCurrentResult(){return this.currentResult}trackResult(t){let e={};return Object.keys(t).forEach(r=>{Object.defineProperty(e,r,{configurable:!1,enumerable:!0,get:()=>(this.trackedProps.add(r),t[r])})}),e}getCurrentQuery(){return this.currentQuery}remove(){this.client.getQueryCache().remove(this.currentQuery)}refetch({refetchPage:t,...e}={}){return this.fetch({...e,meta:{refetchPage:t}})}fetchOptimistic(t){let e=this.client.defaultQueryOptions(t),r=this.client.getQueryCache().build(this.client,e);return r.isFetchingOptimistic=!0,r.fetch().then(()=>this.createResult(r,e))}fetch(t){var e;return this.executeFetch({...t,cancelRefetch:null==(e=t.cancelRefetch)||e}).then(()=>(this.updateResult(),this.currentResult))}executeFetch(t){this.updateQuery();let e=this.currentQuery.fetch(this.options,t);return null!=t&&t.throwOnError||(e=e.catch(s.ZT)),e}updateStaleTimeout(){if(this.clearStaleTimeout(),s.sk||this.currentResult.isStale||!(0,s.PN)(this.options.staleTime))return;let t=(0,s.Kp)(this.currentResult.dataUpdatedAt,this.options.staleTime);this.staleTimeoutId=setTimeout(()=>{this.currentResult.isStale||this.updateResult()},t+1)}computeRefetchInterval(){var t;return"function"==typeof this.options.refetchInterval?this.options.refetchInterval(this.currentResult.data,this.currentQuery):null!=(t=this.options.refetchInterval)&&t}updateRefetchInterval(t){this.clearRefetchInterval(),this.currentRefetchInterval=t,!s.sk&&!1!==this.options.enabled&&(0,s.PN)(this.currentRefetchInterval)&&0!==this.currentRefetchInterval&&(this.refetchIntervalId=setInterval(()=>{(this.options.refetchIntervalInBackground||i.j.isFocused())&&this.executeFetch()},this.currentRefetchInterval))}updateTimers(){this.updateStaleTimeout(),this.updateRefetchInterval(this.computeRefetchInterval())}clearStaleTimeout(){this.staleTimeoutId&&(clearTimeout(this.staleTimeoutId),this.staleTimeoutId=void 0)}clearRefetchInterval(){this.refetchIntervalId&&(clearInterval(this.refetchIntervalId),this.refetchIntervalId=void 0)}createResult(t,e){let r=this.currentQuery,n=this.options,i=this.currentResult,u=this.currentResultState,c=this.currentResultOptions,a=t!==r,f=a?t.state:this.currentQueryInitialState,p=a?this.currentResult:this.previousQueryResult,{state:y}=t,{dataUpdatedAt:v,error:R,errorUpdatedAt:S,fetchStatus:b,status:m}=y,E=!1,Q=!1,g;if(e._optimisticResults){let I=this.hasListeners(),C=!I&&l(t,e),O=I&&h(t,r,e,n);(C||O)&&(b=(0,o.Kw)(t.options.networkMode)?"fetching":"paused",v||(m="loading")),"isRestoring"===e._optimisticResults&&(b="idle")}if(e.keepPreviousData&&!y.dataUpdatedAt&&null!=p&&p.isSuccess&&"error"!==m)g=p.data,v=p.dataUpdatedAt,m=p.status,E=!0;else if(e.select&&void 0!==y.data){if(i&&y.data===(null==u?void 0:u.data)&&e.select===this.selectFn)g=this.selectResult;else try{this.selectFn=e.select,g=e.select(y.data),g=(0,s.oE)(null==i?void 0:i.data,g,e),this.selectResult=g,this.selectError=null}catch(T){this.selectError=T}}else g=y.data;if(void 0!==e.placeholderData&&void 0===g&&"loading"===m){let w;if(null!=i&&i.isPlaceholderData&&e.placeholderData===(null==c?void 0:c.placeholderData))w=i.data;else if(w="function"==typeof e.placeholderData?e.placeholderData():e.placeholderData,e.select&&void 0!==w)try{w=e.select(w),this.selectError=null}catch(F){this.selectError=F}void 0!==w&&(m="success",g=(0,s.oE)(null==i?void 0:i.data,w,e),Q=!0)}this.selectError&&(R=this.selectError,g=this.selectResult,S=Date.now(),m="error");let U="fetching"===b,k="loading"===m,x="error"===m,P={status:m,fetchStatus:b,isLoading:k,isSuccess:"success"===m,isError:x,isInitialLoading:k&&U,data:g,dataUpdatedAt:v,error:R,errorUpdatedAt:S,failureCount:y.fetchFailureCount,failureReason:y.fetchFailureReason,errorUpdateCount:y.errorUpdateCount,isFetched:y.dataUpdateCount>0||y.errorUpdateCount>0,isFetchedAfterMount:y.dataUpdateCount>f.dataUpdateCount||y.errorUpdateCount>f.errorUpdateCount,isFetching:U,isRefetching:U&&!k,isLoadingError:x&&0===y.dataUpdatedAt,isPaused:"paused"===b,isPlaceholderData:Q,isPreviousData:E,isRefetchError:x&&0!==y.dataUpdatedAt,isStale:d(t,e),refetch:this.refetch,remove:this.remove};return P}updateResult(t){let e=this.currentResult,r=this.createResult(this.currentQuery,this.options);if(this.currentResultState=this.currentQuery.state,this.currentResultOptions=this.options,(0,s.VS)(r,e))return;this.currentResult=r;let n={cache:!0};(null==t?void 0:t.listeners)!==!1&&(()=>{if(!e)return!0;let{notifyOnChangeProps:t}=this.options;if("all"===t||!t&&!this.trackedProps.size)return!0;let r=new Set(null!=t?t:this.trackedProps);return this.options.useErrorBoundary&&r.add("error"),Object.keys(this.currentResult).some(t=>{let s=this.currentResult[t]!==e[t];return s&&r.has(t)})})()&&(n.listeners=!0),this.notify({...n,...t})}updateQuery(){let t=this.client.getQueryCache().build(this.client,this.options);if(t===this.currentQuery)return;let e=this.currentQuery;this.currentQuery=t,this.currentQueryInitialState=t.state,this.previousQueryResult=this.currentResult,this.hasListeners()&&(null==e||e.removeObserver(this),t.addObserver(this))}onQueryUpdate(t){let e={};"success"===t.type?e.onSuccess=!t.manual:"error"!==t.type||(0,o.DV)(t.error)||(e.onError=!0),this.updateResult(e),this.hasListeners()&&this.updateTimers()}notify(t){n.V.batch(()=>{var e,r,s,n,i,u,o,c;t.onSuccess?(null==(e=(r=this.options).onSuccess)||e.call(r,this.currentResult.data),null==(s=(n=this.options).onSettled)||s.call(n,this.currentResult.data,null)):t.onError&&(null==(i=(u=this.options).onError)||i.call(u,this.currentResult.error),null==(o=(c=this.options).onSettled)||o.call(c,void 0,this.currentResult.error)),t.listeners&&this.listeners.forEach(t=>{t(this.currentResult)}),t.cache&&this.client.getQueryCache().notify({query:this.currentQuery,type:"observerResultsUpdated"})})}}function l(t,e){var r,s;return!1!==e.enabled&&!t.state.dataUpdatedAt&&!("error"===t.state.status&&!1===e.retryOnMount)||t.state.dataUpdatedAt>0&&a(t,e,e.refetchOnMount)}function a(t,e,r){if(!1!==e.enabled){let s="function"==typeof r?r(t):r;return"always"===s||!1!==s&&d(t,e)}return!1}function h(t,e,r,s){return!1!==r.enabled&&(t!==e||!1===s.enabled)&&(!r.suspense||"error"!==t.state.status)&&d(t,r)}function d(t,e){return t.isStaleByTime(e.staleTime)}},404:function(t,e,r){r.d(e,{_:function(){return u}});var s=r(70079);let n,i=s.createContext((n=!1,{clearReset(){n=!1},reset(){n=!0},isReset:()=>n})),u=()=>s.useContext(i)},60112:function(t,e,r){r.d(e,{JN:function(){return u},KJ:function(){return o},pf:function(){return i}});var s=r(70079),n=r(83793);let i=(t,e)=>{(t.suspense||t.useErrorBoundary)&&!e.isReset()&&(t.retryOnMount=!1)},u=t=>{s.useEffect(()=>{t.clearReset()},[t])},o=({result:t,errorResetBoundary:e,useErrorBoundary:r,query:s})=>t.isError&&!e.isReset()&&!t.isFetching&&(0,n.L)(r,[t.error,s])},17866:function(t,e,r){r.d(e,{S:function(){return i}});var s=r(70079);let n=s.createContext(!1),i=()=>s.useContext(n);n.Provider},86857:function(t,e,r){r.d(e,{Fb:function(){return s},SB:function(){return i},Z$:function(){return n},j8:function(){return u}});let s=t=>{t.suspense&&"number"!=typeof t.staleTime&&(t.staleTime=1e3)},n=(t,e)=>t.isLoading&&t.isFetching&&!e,i=(t,e,r)=>(null==t?void 0:t.suspense)&&n(e,r),u=(t,e,r)=>e.fetchOptimistic(t).then(({data:e})=>{null==t.onSuccess||t.onSuccess(e),null==t.onSettled||t.onSettled(e,null)}).catch(e=>{r.clearReset(),null==t.onError||t.onError(e),null==t.onSettled||t.onSettled(void 0,e)})},52696:function(t,e,r){r.d(e,{r:function(){return h}});var s=r(70079),n=r(39858),i=r(42422),u=r(404),o=r(62906),c=r(17866),l=r(60112),a=r(86857);function h(t,e){let r=(0,o.NL)({context:t.context}),h=(0,c.S)(),d=(0,u._)(),f=r.defaultQueryOptions(t);f._optimisticResults=h?"isRestoring":"optimistic",f.onError&&(f.onError=i.V.batchCalls(f.onError)),f.onSuccess&&(f.onSuccess=i.V.batchCalls(f.onSuccess)),f.onSettled&&(f.onSettled=i.V.batchCalls(f.onSettled)),(0,a.Fb)(f),(0,l.pf)(f,d),(0,l.JN)(d);let[p]=s.useState(()=>new e(r,f)),y=p.getOptimisticResult(f);if((0,n.$)(s.useCallback(t=>h?()=>void 0:p.subscribe(i.V.batchCalls(t)),[p,h]),()=>p.getCurrentResult(),()=>p.getCurrentResult()),s.useEffect(()=>{p.setOptions(f,{listeners:!1})},[f,p]),(0,a.SB)(f,y,h))throw(0,a.j8)(f,p,d);if((0,l.KJ)({result:y,errorResetBoundary:d,useErrorBoundary:f.useErrorBoundary,query:p.getCurrentQuery()}))throw y.error;return f.notifyOnChangeProps?y:p.trackResult(y)}},87762:function(t,e,r){r.d(e,{a:function(){return u}});var s=r(49043),n=r(89335),i=r(52696);function u(t,e,r){let u=(0,s._v)(t,e,r);return(0,i.r)(u,n.z)}},39858:function(t,e,r){r.d(e,{$:function(){return n}});var s=r(31178);let n=s.useSyncExternalStore},83793:function(t,e,r){r.d(e,{L:function(){return s}});function s(t,e){return"function"==typeof t?t(...e):!!t}}}]); -------------------------------------------------------------------------------- /src/pandora/bots/server.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import logging 4 | from datetime import timedelta 5 | from os.path import join, abspath, dirname 6 | 7 | from flask import Flask, jsonify, make_response, request, Response, render_template 8 | from flask_cors import CORS 9 | from waitress import serve 10 | from werkzeug.exceptions import default_exceptions 11 | from werkzeug.middleware.proxy_fix import ProxyFix 12 | from werkzeug.serving import WSGIRequestHandler 13 | 14 | from .. import __version__ 15 | from ..exts.hooks import hook_logging 16 | from ..openai.api import API 17 | 18 | 19 | class ChatBot: 20 | __default_ip = '127.0.0.1' 21 | __default_port = 8008 22 | 23 | def __init__(self, chatgpt, debug=False, sentry=False): 24 | self.chatgpt = chatgpt 25 | self.debug = debug 26 | self.sentry = sentry 27 | self.log_level = logging.DEBUG if debug else logging.WARN 28 | 29 | hook_logging(level=self.log_level, format='[%(asctime)s] %(levelname)s in %(module)s: %(message)s') 30 | self.logger = logging.getLogger('waitress') 31 | 32 | def run(self, bind_str, threads=8): 33 | host, port = self.__parse_bind(bind_str) 34 | 35 | resource_path = abspath(join(dirname(__file__), '..', 'flask')) 36 | app = Flask(__name__, static_url_path='', 37 | static_folder=join(resource_path, 'static'), 38 | template_folder=join(resource_path, 'templates')) 39 | app.wsgi_app = ProxyFix(app.wsgi_app, x_port=1) 40 | app.after_request(self.__after_request) 41 | 42 | CORS(app, resources={r'/api/*': {'supports_credentials': True, 'expose_headers': [ 43 | 'Content-Type', 44 | 'Authorization', 45 | 'X-Requested-With', 46 | 'Accept', 47 | 'Origin', 48 | 'Access-Control-Request-Method', 49 | 'Access-Control-Request-Headers', 50 | 'Content-Disposition', 51 | ], 'max_age': 600}}) 52 | 53 | for ex in default_exceptions: 54 | app.register_error_handler(ex, self.__handle_error) 55 | 56 | app.route('/api/models')(self.list_models) 57 | app.route('/api/conversations')(self.list_conversations) 58 | app.route('/api/conversations', methods=['DELETE'])(self.clear_conversations) 59 | app.route('/api/conversation/')(self.get_conversation) 60 | app.route('/api/conversation/', methods=['DELETE'])(self.del_conversation) 61 | app.route('/api/conversation/', methods=['PATCH'])(self.set_conversation_title) 62 | app.route('/api/conversation/gen_title/', methods=['POST'])(self.gen_conversation_title) 63 | app.route('/api/conversation/talk', methods=['POST'])(self.talk) 64 | app.route('/api/conversation/regenerate', methods=['POST'])(self.regenerate) 65 | app.route('/api/conversation/goon', methods=['POST'])(self.goon) 66 | 67 | app.route('/api/auth/session')(self.session) 68 | app.route('/api/accounts/check')(self.check) 69 | app.route('/_next/data/olf4sv64FWIcQ_zCGl90t/chat.json')(self.chat_info) 70 | 71 | app.route('/')(self.chat) 72 | app.route('/chat')(self.chat) 73 | app.route('/chat/')(self.chat) 74 | 75 | if not self.debug: 76 | self.logger.warning('Serving on http://{}:{}'.format(host, port)) 77 | 78 | WSGIRequestHandler.protocol_version = 'HTTP/1.1' 79 | serve(app, host=host, port=port, ident=None, threads=threads) 80 | 81 | @staticmethod 82 | def __after_request(resp): 83 | resp.headers['X-Server'] = 'pandora/{}'.format(__version__) 84 | 85 | return resp 86 | 87 | def __parse_bind(self, bind_str): 88 | sections = bind_str.split(':', 2) 89 | if len(sections) < 2: 90 | try: 91 | port = int(sections[0]) 92 | return self.__default_ip, port 93 | except ValueError: 94 | return sections[0], self.__default_port 95 | 96 | return sections[0], int(sections[1]) 97 | 98 | def __handle_error(self, e): 99 | self.logger.error(e) 100 | 101 | return make_response(jsonify({ 102 | 'code': e.code, 103 | 'message': str(e.original_exception if self.debug and hasattr(e, 'original_exception') else e.name) 104 | }), 500) 105 | 106 | @staticmethod 107 | def __set_cookie(resp, token_key, max_age): 108 | resp.set_cookie('token-key', token_key, max_age=max_age, path='/', domain=None, httponly=True, samesite='Lax') 109 | 110 | @staticmethod 111 | def __get_token_key(): 112 | return request.headers.get('X-Use-Token', request.cookies.get('token-key')) 113 | 114 | def chat(self, conversation_id=None): 115 | query = {'chatId': [conversation_id]} if conversation_id else {} 116 | 117 | token_key = request.args.get('token') 118 | rendered = render_template('chat.html', pandora_base=request.url_root.strip('/'), query=query) 119 | resp = make_response(rendered) 120 | 121 | if token_key: 122 | self.__set_cookie(resp, token_key, timedelta(days=30)) 123 | 124 | return resp 125 | 126 | @staticmethod 127 | def session(): 128 | ret = { 129 | 'user': { 130 | 'id': 'user-000000000000000000000000', 131 | 'name': 'admin@openai.com', 132 | 'email': 'admin@openai.com', 133 | 'image': None, 134 | 'picture': None, 135 | 'groups': [] 136 | }, 137 | 'expires': '2089-08-08T23:59:59.999Z', 138 | 'accessToken': 'secret', 139 | } 140 | 141 | return jsonify(ret) 142 | 143 | @staticmethod 144 | def chat_info(): 145 | ret = { 146 | 'pageProps': { 147 | 'user': { 148 | 'id': 'user-000000000000000000000000', 149 | 'name': 'admin@openai.com', 150 | 'email': 'admin@openai.com', 151 | 'image': None, 152 | 'picture': None, 153 | 'groups': [] 154 | }, 155 | 'serviceStatus': {}, 156 | 'userCountry': 'US', 157 | 'geoOk': True, 158 | 'serviceAnnouncement': { 159 | 'paid': {}, 160 | 'public': {} 161 | }, 162 | 'isUserInCanPayGroup': True 163 | }, 164 | '__N_SSP': True 165 | } 166 | 167 | return jsonify(ret) 168 | 169 | @staticmethod 170 | def check(): 171 | ret = { 172 | 'account_plan': { 173 | 'is_paid_subscription_active': True, 174 | 'subscription_plan': 'chatgptplusplan', 175 | 'account_user_role': 'account-owner', 176 | 'was_paid_customer': True, 177 | 'has_customer_object': True, 178 | 'subscription_expires_at_timestamp': 3774355199 179 | }, 180 | 'user_country': 'US', 181 | 'features': [ 182 | 'model_switcher', 183 | 'dfw_message_feedback', 184 | 'dfw_inline_message_regen_comparison', 185 | 'model_preview', 186 | 'system_message', 187 | 'can_continue', 188 | ], 189 | } 190 | 191 | return jsonify(ret) 192 | 193 | def list_models(self): 194 | return self.__proxy_result(self.chatgpt.list_models(True, self.__get_token_key())) 195 | 196 | def list_conversations(self): 197 | offset = request.args.get('offset', '0') 198 | limit = request.args.get('limit', '20') 199 | 200 | return self.__proxy_result(self.chatgpt.list_conversations(offset, limit, True, self.__get_token_key())) 201 | 202 | def get_conversation(self, conversation_id): 203 | return self.__proxy_result(self.chatgpt.get_conversation(conversation_id, True, self.__get_token_key())) 204 | 205 | def del_conversation(self, conversation_id): 206 | return self.__proxy_result(self.chatgpt.del_conversation(conversation_id, True, self.__get_token_key())) 207 | 208 | def clear_conversations(self): 209 | return self.__proxy_result(self.chatgpt.clear_conversations(True, self.__get_token_key())) 210 | 211 | def set_conversation_title(self, conversation_id): 212 | title = request.json['title'] 213 | 214 | return self.__proxy_result( 215 | self.chatgpt.set_conversation_title(conversation_id, title, True, self.__get_token_key())) 216 | 217 | def gen_conversation_title(self, conversation_id): 218 | payload = request.json 219 | model = payload['model'] 220 | message_id = payload['message_id'] 221 | 222 | return self.__proxy_result( 223 | self.chatgpt.gen_conversation_title(conversation_id, model, message_id, True, self.__get_token_key())) 224 | 225 | def talk(self): 226 | payload = request.json 227 | prompt = payload['prompt'] 228 | model = payload['model'] 229 | message_id = payload['message_id'] 230 | parent_message_id = payload['parent_message_id'] 231 | conversation_id = payload.get('conversation_id') 232 | stream = payload.get('stream', True) 233 | 234 | return self.__process_stream( 235 | *self.chatgpt.talk(prompt, model, message_id, parent_message_id, conversation_id, stream, 236 | self.__get_token_key()), stream) 237 | 238 | def goon(self): 239 | payload = request.json 240 | model = payload['model'] 241 | parent_message_id = payload['parent_message_id'] 242 | conversation_id = payload.get('conversation_id') 243 | stream = payload.get('stream', True) 244 | 245 | return self.__process_stream( 246 | *self.chatgpt.goon(model, parent_message_id, conversation_id, stream, self.__get_token_key()), stream) 247 | 248 | def regenerate(self): 249 | payload = request.json 250 | 251 | conversation_id = payload.get('conversation_id') 252 | if not conversation_id: 253 | return self.talk() 254 | 255 | prompt = payload['prompt'] 256 | model = payload['model'] 257 | message_id = payload['message_id'] 258 | parent_message_id = payload['parent_message_id'] 259 | stream = payload.get('stream', True) 260 | 261 | return self.__process_stream( 262 | *self.chatgpt.regenerate_reply(prompt, model, conversation_id, message_id, parent_message_id, stream, 263 | self.__get_token_key()), stream) 264 | 265 | @staticmethod 266 | def __process_stream(status, headers, generator, stream): 267 | if stream: 268 | return Response(API.wrap_stream_out(generator, status), mimetype=headers['Content-Type'], status=status) 269 | 270 | last_json = None 271 | for json in generator: 272 | last_json = json 273 | 274 | return make_response(last_json, status) 275 | 276 | @staticmethod 277 | def __proxy_result(remote_resp): 278 | resp = make_response(remote_resp.text) 279 | resp.content_type = remote_resp.headers['Content-Type'] 280 | resp.status_code = remote_resp.status_code 281 | 282 | return resp 283 | -------------------------------------------------------------------------------- /src/pandora/flask/static/_next/static/chunks/264-13e92c51b0315184.js: -------------------------------------------------------------------------------- 1 | "use strict";(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[264],{34388:function(e,t,n){n.d(t,{T:function(){return o}});function o(e){return void 0!==e.userMessage}},33264:function(e,t,n){var o=n(61706),i=n(35025),a=n(9135),s=n(31501),r=n(61079),c=n(42928),h=n(45813),u=n(48879),d=n(44928);n(138);var p=n(34388),l=n(49674),f=window.__pandora_base+"/api",g=["cf-ipcountry"],m=function(){function e(){(0,i.Z)(this,e)}return e.setAccessToken=function(e){this.accessToken=e},e.getAuthHeader=function(e){var t=e||this.accessToken;if(!t)throw console.error("No access token when trying to use AuthHeader"),Error("No access token when trying to use AuthHeader");return{}},e.refreshApiKey=function(){var e=this;if(this.apiKeyRefreshing)return this.apiKeyRefreshing;var t=this;return this.apiKeyRefreshing=(0,o.Z)(function(){var e;return(0,h.__generator)(this,function(n){switch(n.label){case 0:return[4,(0,d.getSession)()];case 1:return(e=n.sent())&&t.setAccessToken(e.accessToken),[2];case 2:throw Error("Cannot refresh access token outside of browser");case 3:return[2]}})})(),setTimeout(function(){e.apiKeyRefreshing=null},6e4),this.apiKeyRefreshing},e.fetch=function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},i=this;return(0,o.Z)(function(){var o,a,c,u,d,p;return(0,h.__generator)(this,function(h){switch(h.label){case 0:return[4,fetch(e,o=(0,s.Z)({credentials:"include"},t))];case 1:if((a=h.sent()).status>=500)throw new l.kb;if(!(a.status>=400))return[3,12];h.label=2;case 2:return h.trys.push([2,4,,5]),[4,a.json()];case 3:return c=(null==(u=h.sent())?void 0:u.detail)||(null==u?void 0:u.error),[3,5];case 4:return d=h.sent(),console.error("Failed to parse error response",d),[3,5];case 5:if(console.error("API error",e,c),!((null==c?void 0:c.code)==="expired_session_key"||(null==c?void 0:c.code)==="token_expired"))return[3,11];h.label=6;case 6:if(h.trys.push([6,9,,10]),n.isRetry)return[3,8];return[4,i.refreshApiKey()];case 7:return h.sent(),[2,i.fetch(e,o,(0,r.Z)((0,s.Z)({},n),{isRetry:!0}))];case 8:return[3,10];case 9:return p=h.sent(),console.error("Failed to refresh expired access token: ".concat(p)),[3,10];case 10:console.error("Refresh access token failed when retrieving",e,c),window._oaiHandleSessionExpired("fetch",JSON.stringify(c)),h.label=11;case 11:if(null==c?void 0:c.type)throw new l.gK((null==c?void 0:c.message)||c,a.status,null==c?void 0:c.code,null==c?void 0:c.type);throw new l.kb;case 12:if(204===a.status)return[2,{}];return[2,a.json()]}})})()},e.getArtifacts=function(){return this.fetch("".concat(f,"/artifacts"),{method:"GET",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader())})},e.createArtifact=function(e){return this.fetch("".concat(f,"/artifacts"),{method:"POST",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader()),body:JSON.stringify({url:e,contents:"\n"})})},e.upload=function(e,t,n,o){var i=new FormData;return t&&i.append("conversation_id",t),i.append("model",n),i.append("parent_message_id",e),i.append("file",o),this.fetch("".concat(f,"/conversation/upload"),{method:"POST",headers:(0,s.Z)({},this.getAuthHeader()),body:i})},e.fetchFileForDownload=function(e,t){var n=new URLSearchParams({path:t});return fetch("".concat(f,"/conversation/").concat(e,"/download?").concat(n),{method:"GET",headers:(0,s.Z)({},this.getAuthHeader())})},e.checkFile=function(e,t){var n=new URLSearchParams({path:t});return this.fetch("".concat(f,"/conversation/").concat(e,"/check_file?").concat(n),{method:"GET",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader())})},e.sendDocument=function(){return this.fetch("".concat(f,"/private"),{method:"GET",headers:{"Content-Type":"application/json"}})},e.getRetrievalResults=function(e){return this.fetch("".concat(f,"/retrieval/public_data"),{method:"POST",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader()),body:JSON.stringify({query:e})})},e.getModels=function(e){return this.fetch("".concat(f,"/models"),{method:"GET",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader(e))})},e.getConversations=function(e,t,n){var o=new URLSearchParams({offset:e.toString(),limit:t.toString()});return this.fetch("".concat(f,"/conversations?").concat(o),{method:"GET",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader(n))})},e.getConversation=function(e,t){return this.fetch("".concat(f,"/conversation/").concat(e),{method:"GET",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader(t))})},e.generateTitle=function(e,t,n){return this.fetch("".concat(f,"/conversation/gen_title/").concat(e),{method:"POST",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader()),body:JSON.stringify({message_id:t,model:n})})},e.patchConversation=function(e,t){return this.fetch("".concat(f,"/conversation/").concat(e),{method:"PATCH",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader()),body:JSON.stringify(t)})},e.deleteConversation=function(e,t){return this.fetch("".concat(f,"/conversation/").concat(e),{method:"DELETE",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader()),body:JSON.stringify(t)})},e.deleteConversations=function(){return this.fetch("".concat(f,"/conversations"),{method:"DELETE",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader()),body:JSON.stringify({is_visible:!1})})},e.getLoginLink=function(e){return this.fetch("".concat(f,"/bypass/link"),{method:"POST",headers:{"Content-Type":"application/json"},body:JSON.stringify({email:e})})},e.publicApiCompletionStream=function(e,t){var n=this;return(0,o.Z)(function(){var i,r,c,eps;return(0,h.__generator)(this,function(c){return eps={'variant':'regenerate','next':'talk','continue':'goon'},i=new AbortController,r={prompt:e.messages.length>0?e.messages[0]['content']['parts'][0]:'',message_id:e.messages.length>0?e.messages[0]['id']:'',conversation_id:e.threadId,parent_message_id:e.parentMessageId,model:e.model,plugin_ids:e.threadId?void 0:e.enabledPluginIds,timezone_offset_min:new Date().getTimezoneOffset()},(0,u.L)("".concat(window.__pandora_base+"/api","/conversation/",eps[e.completionType]),{method:"POST",credentials:"include",headers:(0,s.Z)({"Content-Type":"application/json"},n.getAuthHeader()),body:JSON.stringify(r),signal:i.signal,openWhenHidden:!0,onopen:function(e){return(0,o.Z)(function(){var t,n,o;return(0,h.__generator)(this,function(i){switch(i.label){case 0:if(t=e.headers.get("content-type")||"",e.ok&&t.includes("text/event-stream"))return[2];if(!t.includes("application/json"))return[3,2];return[4,e.json()];case 1:if(n=i.sent(),console.error(n),o=(null==n?void 0:n.error)||(null==n?void 0:n.detail)){if(e.status>=500)throw new l.kb((null==o?void 0:o.message)||o);throw((null==o?void 0:o.code)==="expired_session_key"||(null==o?void 0:o.code)==="invalid_api_key"||(null==o?void 0:o.code)==="token_expired")&&window._oaiHandleSessionExpired("stream",JSON.stringify(o)),new l.gK((null==o?void 0:o.message)||o,e.status,null==o?void 0:o.code,null==o?void 0:o.type,void 0,null==o?void 0:o.clears_in)}i.label=2;case 2:throw new l.kb}})})()},onmessage:function(e){if("[DONE]"===e.data)i.abort(),t({finish_reason:"stop"});else if("ping"===e.event);else try{var n=JSON.parse(e.data);if(n.error)throw new l.kb(n.error.message);t({message:n.message,threadId:n.conversation_id})}catch(o){if((0,p.T)(o))throw new l.kb(o.message)}},onerror:function(e){throw"Failed to fetch"===e.message&&(e=new l.kb("An error occurred. Either the engine you requested does not exist or there was another issue processing your request. If this issue persists please contact us through our help center at help.openai.com.")),t({err:e}),e}}).catch(function(e){(0,a.Z)(e,l.gK)||(0,a.Z)(e,l.kb)||console.error(e)}),[2,i]})})()},e.runModerationApi=function(e,t,n){return this.fetch("".concat("https://chat.openai.com/backend-api","/moderations"),{method:"POST",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader()),body:JSON.stringify({input:e,model:"text-moderation-playground",conversation_id:t,message_id:n})})},e.submitMessageFeedback=function(e){return this.fetch("".concat(f,"/conversation/message_feedback"),{method:"POST",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader()),body:JSON.stringify(e)})},e.submitMessageComparisonFeedback=function(e){return this.fetch("".concat(f,"/conversation/message_comparison_feedback"),{method:"POST",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader()),body:JSON.stringify(e)})},e.submitCheckoutForm=function(){return this.fetch("".concat(f,"/payments/checkout"),{method:"POST",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader())})},e.fetchCustomerPortalUrl=function(e){return this.fetch("".concat(f,"/payments/customer_portal"),{method:"GET",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader(e))})},e.getPlugins=function(e){var t=e.offset,n=e.limit,o=e.statuses,i=e.isInstalled,a=e.accessToken,r=[["offset",t.toString()],["limit",n.toString()],];if(o){var c=!0,h=!1,u=void 0;try{for(var d,p=o[Symbol.iterator]();!(c=(d=p.next()).done);c=!0){var l=d.value;r.push(["statuses",l])}}catch(g){h=!0,u=g}finally{try{c||null==p.return||p.return()}finally{if(h)throw u}}}i&&r.push(["is_installed","true"]);var m=new URLSearchParams(r);return this.fetch("".concat(f,"/aip/p?").concat(m),{method:"GET",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader(a))})},e.getPluginByDomain=function(e){var t=e.domain,n=e.accessToken,o=new URLSearchParams({offset:"0",limit:"1",domains:t});return this.fetch("".concat(f,"/aip/p?").concat(o),{method:"GET",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader(n))}).then(function(e){return 0===e.items.length?null:e.items[0]})},e.setLocalhostPlugin=function(e){var t=e.localhost,n=e.manifest,o=e.openapiSpec,i=e.accessToken;return this.fetch("".concat(f,"/aip/lhp"),{method:"POST",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader(i)),body:JSON.stringify({localhost:t,manifest:n,openapi_spec:o})})},e.scrapePluginManifest=function(e){var t=e.domain,n=e.manifestAccessToken,o=e.accessToken;return this.fetch("".concat(f,"/aip/p"),{method:"POST",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader(o)),body:JSON.stringify({domain:t,manifest_access_token:n})})},e.getPluginApi=function(e){var t=e.id,n=e.accessToken;return this.fetch("".concat(f,"/aip/p/").concat(t,"/api"),{method:"GET",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader(n))})},e.updatePluginUserSettings=function(e){var t=e.pluginId,n=e.isInstalled,o=e.accessToken;return this.fetch("".concat(f,"/aip/p/").concat(t,"/user-settings"),{method:"PATCH",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader(o)),body:JSON.stringify({is_installed:n})})},e.deletePlugin=function(e){var t=e.id,n=e.accessToken;return this.fetch("".concat(f,"/aip/p/").concat(t),{method:"DELETE",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader(n))})},e.setPluginUserHttpToken=function(e){var t=e.id,n=e.userAccessToken,o=e.accessToken;return this.fetch("".concat(f,"/aip/p/").concat(t,"/user-settings/http-auth"),{method:"POST",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader(o)),body:JSON.stringify({access_token:n})})},e.setPluginServiceHttpToken=function(e){var t=e.id,n=e.serviceAccessToken,o=e.accessToken;return this.fetch("".concat(f,"/aip/p/").concat(t,"/http-auth"),{method:"POST",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader(o)),body:JSON.stringify({access_token:n})})},e.setPluginOAuthClientCredentials=function(e){var t=e.id,n=e.clientId,o=e.clientSecret,i=e.accessToken;return this.fetch("".concat(f,"/aip/p/").concat(t,"/oauth"),{method:"POST",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader(i)),body:JSON.stringify({client_id:n,client_secret:o})})},e.getAccountStatus=function(e,t){var n=(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader(e));if(t){var o={},i=!0,a=!1,r=void 0;try{for(var h,u=Object.entries(t)[Symbol.iterator]();!(i=(h=u.next()).done);i=!0){var d=(0,c.Z)(h.value,2),p=d[0],l=d[1];g.includes(p.toLowerCase())&&(o[p]=l)}}catch(m){a=!0,r=m}finally{try{i||null==u.return||u.return()}finally{if(a)throw r}}n=(0,s.Z)({},o,n)}return this.fetch("/api/accounts/check",{method:"GET",headers:n})},e.pluginOauthCallback=function(e,t,n,o){var i=new URLSearchParams({code:t,redirect_uri:n});return this.fetch("".concat(f,"/aip/p/").concat(e,"/user-settings/oauth/callback?").concat(i),{method:"GET",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader(o))})},e.getPageMetadata=function(e){var t=e.url;return this.fetch("".concat(f,"/opengraph/tags?url=").concat(encodeURIComponent(t)),{method:"GET",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader())})},e.getModelMessageCap=function(){return this.fetch("https://chat.openai.com/public-api/conversation_limit",{method:"GET",headers:(0,s.Z)({"Content-Type":"application/json"},this.getAuthHeader())})},e}();m.auth0Client=null,t.ZP=m}}]); -------------------------------------------------------------------------------- /src/pandora/openai/api.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import asyncio 4 | import json 5 | import queue as block_queue 6 | import threading 7 | from os import getenv 8 | 9 | import httpx 10 | import requests 11 | from certifi import where 12 | 13 | from .. import __version__ 14 | from ..exts.config import default_api_prefix 15 | 16 | 17 | class API: 18 | def __init__(self, proxy, ca_bundle): 19 | self.proxy = proxy 20 | self.ca_bundle = ca_bundle 21 | 22 | @staticmethod 23 | def wrap_stream_out(generator, status): 24 | if status != 200: 25 | for line in generator: 26 | yield json.dumps(line) 27 | 28 | return 29 | 30 | for line in generator: 31 | yield b'data: ' + json.dumps(line).encode('utf-8') + b'\n\n' 32 | 33 | yield b'data: [DONE]\n\n' 34 | 35 | async def __process_sse(self, resp): 36 | yield resp.status_code 37 | yield resp.headers 38 | 39 | if resp.status_code != 200: 40 | yield await self.__process_sse_except(resp) 41 | return 42 | 43 | async for utf8_line in resp.aiter_lines(): 44 | if 'data: [DONE]' == utf8_line[0:12]: 45 | break 46 | 47 | if 'data: {"message":' == utf8_line[0:17] or 'data: {"id":' == utf8_line[0:12]: 48 | yield json.loads(utf8_line[6:]) 49 | 50 | @staticmethod 51 | async def __process_sse_except(resp): 52 | result = b'' 53 | async for line in resp.aiter_bytes(): 54 | result += line 55 | 56 | return json.loads(result.decode('utf-8')) 57 | 58 | @staticmethod 59 | def __generate_wrap(queue, thread, event): 60 | while True: 61 | try: 62 | item = queue.get() 63 | if item is None: 64 | break 65 | 66 | yield item 67 | except BaseException as e: 68 | event.set() 69 | thread.join() 70 | 71 | if isinstance(e, GeneratorExit): 72 | raise e 73 | 74 | async def _do_request_sse(self, url, headers, data, queue, event): 75 | async with httpx.AsyncClient(verify=self.ca_bundle, proxies=self.proxy) as client: 76 | async with client.stream('POST', url, json=data, headers=headers, timeout=600) as resp: 77 | async for line in self.__process_sse(resp): 78 | queue.put(line) 79 | 80 | if event.is_set(): 81 | await client.aclose() 82 | break 83 | 84 | queue.put(None) 85 | 86 | def _request_sse(self, url, headers, data): 87 | queue, e = block_queue.Queue(), threading.Event() 88 | t = threading.Thread(target=asyncio.run, args=(self._do_request_sse(url, headers, data, queue, e),)) 89 | t.start() 90 | 91 | return queue.get(), queue.get(), self.__generate_wrap(queue, t, e) 92 | 93 | 94 | class ChatGPT(API): 95 | def __init__(self, access_tokens: dict, proxy=None): 96 | self.access_tokens = access_tokens 97 | self.access_token_key_list = list(access_tokens) 98 | self.default_token_key = self.access_token_key_list[0] 99 | self.session = requests.Session() 100 | self.req_kwargs = { 101 | 'proxies': { 102 | 'http': proxy, 103 | 'https': proxy, 104 | } if proxy else None, 105 | 'verify': where(), 106 | 'timeout': 100, 107 | 'allow_redirects': False, 108 | } 109 | 110 | self.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) ' \ 111 | 'Pandora/{} Safari/537.36'.format(__version__) 112 | 113 | super().__init__(proxy, self.req_kwargs['verify']) 114 | 115 | def __get_headers(self, token_key=None): 116 | return { 117 | 'Authorization': 'Bearer ' + self.get_access_token(token_key), 118 | 'User-Agent': self.user_agent, 119 | 'Content-Type': 'application/json', 120 | } 121 | 122 | @staticmethod 123 | def __get_api_prefix(): 124 | return getenv('CHATGPT_API_PREFIX', default_api_prefix()) 125 | 126 | def get_access_token(self, token_key=None): 127 | return self.access_tokens[token_key or self.default_token_key] 128 | 129 | def list_token_keys(self): 130 | return self.access_token_key_list 131 | 132 | def list_models(self, raw=False, token=None): 133 | url = '{}/api/models'.format(self.__get_api_prefix()) 134 | resp = self.session.get(url=url, headers=self.__get_headers(token), **self.req_kwargs) 135 | 136 | if raw: 137 | return resp 138 | 139 | if resp.status_code != 200: 140 | raise Exception('list models failed: ' + self.__get_error(resp)) 141 | 142 | result = resp.json() 143 | if 'models' not in result: 144 | raise Exception('list models failed: ' + resp.text) 145 | 146 | return result['models'] 147 | 148 | def list_conversations(self, offset, limit, raw=False, token=None): 149 | url = '{}/api/conversations?offset={}&limit={}'.format(self.__get_api_prefix(), offset, limit) 150 | resp = self.session.get(url=url, headers=self.__get_headers(token), **self.req_kwargs) 151 | 152 | if raw: 153 | return resp 154 | 155 | if resp.status_code != 200: 156 | raise Exception('list conversations failed: ' + self.__get_error(resp)) 157 | 158 | return resp.json() 159 | 160 | def get_conversation(self, conversation_id, raw=False, token=None): 161 | url = '{}/api/conversation/{}'.format(self.__get_api_prefix(), conversation_id) 162 | resp = self.session.get(url=url, headers=self.__get_headers(token), **self.req_kwargs) 163 | 164 | if raw: 165 | return resp 166 | 167 | if resp.status_code != 200: 168 | raise Exception('get conversation failed: ' + self.__get_error(resp)) 169 | 170 | return resp.json() 171 | 172 | def clear_conversations(self, raw=False, token=None): 173 | data = { 174 | 'is_visible': False, 175 | } 176 | 177 | url = '{}/api/conversations'.format(self.__get_api_prefix()) 178 | resp = self.session.patch(url=url, headers=self.__get_headers(token), json=data, **self.req_kwargs) 179 | 180 | if raw: 181 | return resp 182 | 183 | if resp.status_code != 200: 184 | raise Exception('clear conversations failed: ' + self.__get_error(resp)) 185 | 186 | result = resp.json() 187 | if 'success' not in result: 188 | raise Exception('clear conversations failed: ' + resp.text) 189 | 190 | return result['success'] 191 | 192 | def del_conversation(self, conversation_id, raw=False, token=None): 193 | data = { 194 | 'is_visible': False, 195 | } 196 | 197 | return self.__update_conversation(conversation_id, data, raw, token) 198 | 199 | def gen_conversation_title(self, conversation_id, model, message_id, raw=False, token=None): 200 | url = '{}/api/conversation/gen_title/{}'.format(self.__get_api_prefix(), conversation_id) 201 | data = { 202 | 'model': model, 203 | 'message_id': message_id, 204 | } 205 | resp = self.session.post(url=url, headers=self.__get_headers(token), json=data, **self.req_kwargs) 206 | 207 | if raw: 208 | return resp 209 | 210 | if resp.status_code != 200: 211 | raise Exception('gen title failed: ' + self.__get_error(resp)) 212 | 213 | result = resp.json() 214 | if 'title' not in result: 215 | raise Exception('gen title failed: ' + resp.text) 216 | 217 | return result['title'] 218 | 219 | def set_conversation_title(self, conversation_id, title, raw=False, token=None): 220 | data = { 221 | 'title': title, 222 | } 223 | 224 | return self.__update_conversation(conversation_id, data, raw, token) 225 | 226 | def talk(self, prompt, model, message_id, parent_message_id, conversation_id=None, stream=True, token=None): 227 | data = { 228 | 'action': 'next', 229 | 'messages': [ 230 | { 231 | 'id': message_id, 232 | 'role': 'user', 233 | 'author': { 234 | 'role': 'user', 235 | }, 236 | 'content': { 237 | 'content_type': 'text', 238 | 'parts': [prompt], 239 | }, 240 | } 241 | ], 242 | 'model': model, 243 | 'parent_message_id': parent_message_id, 244 | } 245 | 246 | if conversation_id: 247 | data['conversation_id'] = conversation_id 248 | 249 | return self.__request_conversation(data, token) 250 | 251 | def goon(self, model, parent_message_id, conversation_id, stream=True, token=None): 252 | data = { 253 | 'action': 'continue', 254 | 'conversation_id': conversation_id, 255 | 'model': model, 256 | 'parent_message_id': parent_message_id, 257 | } 258 | 259 | return self.__request_conversation(data, token) 260 | 261 | def regenerate_reply(self, prompt, model, conversation_id, message_id, parent_message_id, stream=True, token=None): 262 | data = { 263 | 'action': 'variant', 264 | 'messages': [ 265 | { 266 | 'id': message_id, 267 | 'role': 'user', 268 | 'author': { 269 | 'role': 'user', 270 | }, 271 | 'content': { 272 | 'content_type': 'text', 273 | 'parts': [prompt], 274 | }, 275 | } 276 | ], 277 | 'model': model, 278 | 'conversation_id': conversation_id, 279 | 'parent_message_id': parent_message_id, 280 | } 281 | 282 | return self.__request_conversation(data, token) 283 | 284 | def __request_conversation(self, data, token=None): 285 | url = '{}/api/conversation'.format(self.__get_api_prefix()) 286 | headers = {**self.session.headers, **self.__get_headers(token), 'Accept': 'text/event-stream'} 287 | 288 | return self._request_sse(url, headers, data) 289 | 290 | def __update_conversation(self, conversation_id, data, raw=False, token=None): 291 | url = '{}/api/conversation/{}'.format(self.__get_api_prefix(), conversation_id) 292 | resp = self.session.patch(url=url, headers=self.__get_headers(token), json=data, **self.req_kwargs) 293 | 294 | if raw: 295 | return resp 296 | 297 | if resp.status_code != 200: 298 | raise Exception('update conversation failed: ' + self.__get_error(resp)) 299 | 300 | result = resp.json() 301 | if 'success' not in result: 302 | raise Exception('update conversation failed: ' + resp.text) 303 | 304 | return result['success'] 305 | 306 | @staticmethod 307 | def __get_error(resp): 308 | try: 309 | return str(resp.json()['detail']) 310 | except: 311 | return resp.text 312 | 313 | 314 | class ChatCompletion(API): 315 | def __init__(self, proxy=None): 316 | self.session = requests.Session() 317 | self.req_kwargs = { 318 | 'proxies': { 319 | 'http': proxy, 320 | 'https': proxy, 321 | } if proxy else None, 322 | 'verify': where(), 323 | 'timeout': 600, 324 | 'allow_redirects': False, 325 | } 326 | 327 | self.user_agent = 'pandora/{}'.format(__version__) 328 | 329 | super().__init__(proxy, self.req_kwargs['verify']) 330 | 331 | def __get_headers(self, api_key): 332 | return { 333 | 'Authorization': 'Bearer ' + api_key, 334 | 'User-Agent': self.user_agent, 335 | 'Content-Type': 'application/json', 336 | } 337 | 338 | def request(self, api_key, model, messages, stream=True, **kwargs): 339 | data = { 340 | 'model': model, 341 | 'messages': messages, 342 | **kwargs, 343 | 'stream': stream, 344 | } 345 | 346 | return self.__request_conversation(api_key, data, stream) 347 | 348 | def __request_conversation(self, api_key, data, stream): 349 | default = default_api_prefix() 350 | 351 | if api_key.startswith('fk-') or api_key.startswith('pk-'): 352 | prefix = default 353 | else: 354 | prefix = getenv('OPENAI_API_PREFIX', default) 355 | url = '{}/v1/chat/completions'.format(prefix) 356 | 357 | if stream: 358 | headers = {**self.__get_headers(api_key), 'Accept': 'text/event-stream'} 359 | return self._request_sse(url, headers, data) 360 | 361 | resp = self.session.post(url=url, headers=self.__get_headers(api_key), json=data, **self.req_kwargs) 362 | 363 | def __generate_wrap(): 364 | yield resp.json() 365 | 366 | return resp.status_code, resp.headers, __generate_wrap() 367 | -------------------------------------------------------------------------------- /src/pandora/flask/static/_next/static/chunks/14-0cb0d20affbd720d.js: -------------------------------------------------------------------------------- 1 | "use strict";(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[14],{77064:function(e,n,t){t.d(n,{$:function(){return m},u:function(){return p}});var r=t(31501),a=t(61079),s=t(14806),o=t(35250),i=t(19841),l=t(70079),u=t(65921),c=t(34303);function d(){var e=(0,s.Z)(['\nbefore:absolute before:w-2 before:h-2 before:visible before:content-[""] before:bg-gray-100 before:border-b before:border-r before:border-black/10\n',"\n","\n"]);return d=function(){return e},e}function f(){var e=(0,s.Z)(["absolute w-2 h-2 -z-10"]);return f=function(){return e},e}function p(e){var n=e.text,t=e.children;return(0,o.jsx)("div",{className:"tooltip-label flex items-center whitespace-pre-wrap py-1 px-2 text-center text-sm font-medium normal-case text-gray-700","data-content":n,children:t})}var m=function(e){var n=e.children,t=e.label,s=e.enterDelay,c=void 0===s?0:s,d=e.leaveDelay,f=void 0===d?50:d,p=e.placement,m=void 0===p?"bottom":p,v=e.offset,h=e.withArrow,x=e.interactive,g=void 0!==x&&x,y=e.wide,w=(0,l.useState)(!1),j=w[0],P=w[1],k=(0,l.useState)(null),Z=k[0],S=k[1],C=(0,l.useState)(null),N=C[0],_=C[1],A=(0,l.useState)(null),T=A[0],R=A[1],E=(0,u.D)(Z,N,{placement:m,modifiers:[{name:"offset",options:{offset:void 0===v?[0,14]:v}},{name:"arrow",options:{element:T}},]}),U=E.styles,F=E.attributes,M=E.forceUpdate,O=(0,l.useRef)(),L=(0,l.useRef)(),D=(0,l.useCallback)(function(){null==M||M(),L.current&&clearTimeout(L.current),O.current=setTimeout(function(){return P(!0)},c)},[c,M]),G=(0,l.useCallback)(function(){O.current&&clearTimeout(O.current),L.current=setTimeout(function(){return P(!1)},f)},[f]);return(0,o.jsxs)(o.Fragment,{children:[(0,o.jsx)("span",{ref:S,onMouseEnter:D,onMouseLeave:G,children:n}),(0,o.jsxs)("div",(0,a.Z)((0,r.Z)({ref:_,style:U.popper},F.popper),{className:(0,i.Z)("relative z-10 m-0 rounded p-1 transition-opacity",j?"opacity-100":"pointer-events-none opacity-0",void 0!==y&&y?"max-w-sm":"max-w-xs","border border-black/10 bg-gray-100"),onMouseEnter:g?D:void 0,onMouseLeave:g?G:void 0,children:[t,(void 0===h||h)&&(0,o.jsx)(b,{ref:R,$placement:m})]}))]})},v=c.Z.div(d(),function(e){return"bottom"===e.$placement&&"-top-1 left-1/2 -translate-x-[50%] rotate-[225deg]"},function(e){return"top"===e.$placement&&"before:top-0 before:rotate-45"}),b=(0,c.Z)(v)(f())},86885:function(e,n,t){t.d(n,{Z:function(){return h}});var r=t(61706),a=t(45813),s=t(35250),o=t(61432),i=t(70079),l=t(1454),u=t(12762),c=t(98943),d=t(33264),f=t(66285),p=t(74516),m=t(35e3),v=t(69858),b=t(77507);function h(e){var n=e.isOpen,t=e.onClose,h=(0,i.useRef)(null),x=(0,f.hz)(),g=(0,u.WS)(),y=(0,i.useState)(!1),w=y[0],j=y[1],P=(0,o.useRouter)(),k=(0,i.useCallback)(function(){g(c.s6.closeAccountPaymentModal),t()},[t,g]),Z=(0,i.useCallback)((0,r.Z)(function(){var e;return(0,a.__generator)(this,function(n){switch(n.label){case 0:j(!0),g(c.s6.clickAccountPaymentCheckout),n.label=1;case 1:return n.trys.push([1,3,4,5]),[4,d.ZP.submitCheckoutForm()];case 2:return e=n.sent(),P.push(e.url),[3,5];case 3:return n.sent(),p.m.warning("The payments page encountered an error. Please try again. If the problem continues, please email support@openai.com.",{hasCloseButton:!0}),[3,5];case 4:return j(!1),[7];case 5:return[2]}})}),[P,g,j]),S=(0,i.useCallback)((0,r.Z)(function(){var e;return(0,a.__generator)(this,function(n){switch(n.label){case 0:j(!0),g(c.s6.clickAccountCustomerPortal),n.label=1;case 1:return n.trys.push([1,3,4,5]),[4,d.ZP.fetchCustomerPortalUrl()];case 2:return e=n.sent(),P.push(e.url),[3,5];case 3:return n.sent(),p.m.warning("The account management page encountered an error. Please try again. If the problem continues, please email support@openai.com.",{hasCloseButton:!0}),[3,5];case 4:return j(!1),[7];case 5:return[2]}})}),[P,g,j]),C=(0,f.mA)(function(e){var n;return null===(n=e.accountPlan)||void 0===n?void 0:n.hasCustomerObject}),N=x.has("disable_upgrade_ui");return(0,s.jsxs)(m.x,{isOpen:n,onClose:t,focusRef:h,children:[(0,s.jsxs)("div",{className:"flex w-full flex-row items-center justify-between border-b py-3 px-4 dark:border-gray-700",children:[(0,s.jsx)("span",{className:"text-base font-semibold sm:text-base",children:"Your Account"}),(0,s.jsx)("button",{className:"text-gray-700 opacity-50 transition hover:opacity-75 dark:text-white",onClick:k,children:(0,s.jsx)(l.q5L,{className:"h-6 w-6"})})]}),(0,s.jsxs)("div",{className:"grid sm:grid-cols-2",children:[(0,s.jsx)("div",{className:"relative order-2 col-span-1 border-t border-r-0 dark:border-gray-700 sm:order-1 sm:border-t-0 sm:border-r",children:(0,s.jsx)(v.Oi,{rowElements:[(0,s.jsx)(v.Cu,{text:"Free Plan"},"row-free-plan-name"),(0,s.jsx)(v.hi,{variant:"disabled",disabled:!0,text:b.S.free.callToAction.active},"row-free-plan-button"),(0,s.jsx)(v.G,{variant:"secondary",text:b.S.free.demandAccess},"row-free-plan-demand"),(0,s.jsx)(v.G,{variant:"secondary",text:b.S.free.responseSpeed},"row-free-plan-speed"),(0,s.jsx)(v.G,{className:"sm:pb-2",variant:"secondary",text:b.S.free.modelFeatures},"row-free-plan-updates"),]})}),(0,s.jsx)("div",{className:"relative order-1 col-span-1 sm:order-2",children:(0,s.jsx)(v.Oi,{rowElements:[(0,s.jsx)(v.Cu,{text:b.S.plus.name,children:(0,s.jsx)("span",{className:"font-semibold text-gray-500",children:b.S.plus.costInDollars})},"row-plus-plan-title"),(0,s.jsx)(v.hi,{variant:"primary",disabledText:N?"Due to high demand, we've temporarily paused upgrades.":"",disabled:w,isLoading:w,ref:h,onClick:Z,text:b.S.plus.callToAction.inactivePayment},"row-plus-plan-button"),(0,s.jsx)(v.G,{variant:"primary",text:b.S.plus.demandAccess},"row-plus-plan-demand"),(0,s.jsx)(v.G,{variant:"primary",text:b.S.plus.responseSpeed},"row-plus-plan-speed"),(0,s.jsx)(v.G,{className:"sm:pb-2",variant:"primary",text:b.S.plus.modelFeatures},"row-plus-plan-updates"),C&&(0,s.jsx)(v.nR,{className:"sm:pb-1",isLoading:w,text:b.S.manageSubscription.callToAction,onClick:S},"row-plus-plan-manage"),]})})]})]})}},35e3:function(e,n,t){t.d(n,{x:function(){return l}});var r=t(14806),a=t(35250),s=t(34303),o=t(73925);function i(){var e=(0,r.Z)(["flex grow items-center justify-center bg-white dark:bg-gray-900 rounded-md flex flex-col items-start overflow-hidden border shadow-md dark:border-gray-700"]);return i=function(){return e},e}var l=function(e){var n=e.children,t=e.isOpen,r=e.onClose,s=e.focusRef;return(0,a.jsx)(o.Z,{size:"fullscreen",isOpen:t,onModalClose:r,type:"success",title:"",className:"bg-transparent dark:bg-transparent",initialFocusRef:s,children:(0,a.jsx)("div",{className:"flex h-full flex-col items-center justify-start",children:(0,a.jsx)("div",{className:"relative",children:(0,a.jsx)(u,{children:n})})})})},u=s.Z.div(i())},69858:function(e,n,t){t.d(n,{Cu:function(){return b},G:function(){return g},Oi:function(){return v},hi:function(){return x},nR:function(){return y}});var r=t(14806),a=t(35250),s=t(19841),o=t(70079),i=t(1454),l=t(34303),u=t(39690),c=t(79876),d=t(77064);function f(){var e=(0,r.Z)(["p-4 flex flex-col gap-3 bg-white z-20 relative dark:bg-gray-900"]);return f=function(){return e},e}function p(){var e=(0,r.Z)(["gap-2 flex flex-row justify-start items-center text-sm"]);return p=function(){return e},e}function m(){var e=(0,r.Z)(["text-xl font-semibold justify-between items-center flex"]);return m=function(){return e},e}var v=function(e){var n=e.rowElements;return(0,a.jsx)(w,{children:n.map(function(e){return e})})},b=function(e){var n=e.className,t=e.text,r=e.children;return(0,a.jsxs)(P,{className:n,children:[(0,a.jsx)("span",{children:t}),r]})},h={"primary-disabled":"border-none bg-gray-200 py-3 font-semibold hover:bg-gray-200",primary:"border-none py-3 font-semibold",disabled:"dark:text-gray-white border-none bg-gray-300 py-3 font-semibold text-gray-800 hover:bg-gray-300 dark:bg-gray-500 dark:opacity-100"},x=(0,o.forwardRef)(function(e,n){var t=e.isLoading,r=void 0!==t&&t,o=e.disabled,l=e.onClick,f=e.variant,p=void 0===f?"primary":f,m=e.text,v=e.disabledText;return v?(0,a.jsx)("div",{className:"relative",children:(0,a.jsx)(d.$,{placement:"bottom",offset:[0,20],label:(0,a.jsx)(d.u,{children:v}),children:(0,a.jsxs)(u.z,{ref:n,as:"button",color:"disabled",className:(0,s.Z)("w-full",h[p]),children:[m,r&&(0,a.jsx)(c.ZP,{className:"animate-spin",icon:i.dAq})]})})}):(0,a.jsxs)(u.z,{disabled:void 0!==o&&o,onClick:l,openNewTab:!0,ref:n,as:"button",className:(0,s.Z)(h[p]),children:[(0,a.jsx)("span",{className:(0,s.Z)("inline-block",{"text-gray-700":"primary-disabled"===p,"text-white":"primary"===p}),children:m}),r&&(0,a.jsx)(c.ZP,{className:"animate-spin",icon:i.dAq})]})});x.displayName="PricingPlanButton";var g=function(e){var n=e.variant,t=void 0===n?"primary":n,r=e.className,o=e.text;return(0,a.jsxs)(j,{className:r,children:[(0,a.jsx)(c.ZP,{icon:i._rq,className:(0,s.Z)("h-5 w-5",{"text-green-700":"primary"==t,"text-gray-400":"secondary"==t})}),(0,a.jsx)("span",{children:o})]})},y=function(e){var n=e.className,t=e.text,r=e.isLoading,s=e.onClick;return(0,a.jsx)(j,{className:n,children:(0,a.jsxs)("button",{onClick:s,className:"flex flex-row items-center space-x-1 underline",children:[(0,a.jsx)("span",{children:t}),r&&(0,a.jsx)(c.ZP,{className:"animate-spin",icon:i.dAq})]})})},w=l.Z.div(f()),j=l.Z.div(p()),P=l.Z.div(m())},77507:function(e,n,t){t.d(n,{S:function(){return r}});var r={free:{name:"Free Plan",callToAction:{active:"Your Current Plan",inactive:"Your Current Plan"},costInDollars:"",demandAccess:"Available when demand is low",responseSpeed:"Standard response speed",modelFeatures:"Regular model updates"},plus:{name:"ChatGPT Plus",callToAction:{active:"Your current plan",inactive:"I'm Interested",inactivePayment:"Upgrade plan"},costInDollars:"USD $20/mo",demandAccess:"Available even when demand is high",responseSpeed:"Faster response speed",modelFeatures:"Priority access to new features"},manageSubscription:{callToAction:"Manage my subscription"}}},85087:function(e,n,t){t.d(n,{Z:function(){return u}});var r=t(87762),a=t(70079),s=t(82018),o=t(33264),i=t(66285),l=t(27252);function u(){var e=(0,s.kP)(),n=e.session,t=e.loading,u=(0,i.mA)(function(e){return e.hasBeenSet}),c=(0,l.g)(function(e){return e.updateFlagValue}),d=(0,r.a)(["account-status"],function(){return o.ZP.getAccountStatus(null==n?void 0:n.accessToken)},{enabled:!t&&Boolean(null==n?void 0:n.accessToken),onError:function(){console.error("Unable to load account")},onSuccess:function(e){var n;m(e),c("highlightPlusUpgrade",!(null===(n=e.account_plan)||void 0===n?void 0:n.is_paid_subscription_active))}}),f=d.data,p=d.isLoading,m=(0,i.mA)(function(e){return{accountPlan:e.accountPlan,updateAccountPlanWithResponse:e.updateAccountPlanWithResponse}}).updateAccountPlanWithResponse;return(0,a.useMemo)(function(){return{accountStatusResponse:f,isLoading:!u&&p}},[f,u,p])}},49690:function(e,n,t){t.d(n,{Z:function(){return c}});var r=t(27215),a=t(70079),s=t(12762),o=t(98943),i=t(82018),l=t(33264),u=t(66285);function c(e,n,t,c,d){var f=!(arguments.length>5)||void 0===arguments[5]||arguments[5],p=(0,u.mA)(function(e){return e.features}),m=(0,i.kP)().session,v=(0,s.WS)(t);(0,a.useEffect)(function(){f&&(p&&n.id&&s.ZP.setUser(n,p,c,d),v(o.s6.pageLoad))},[p,n.id,f]),(0,a.useEffect)(function(){if(null==m?void 0:m.accessToken)l.ZP.setAccessToken(m.accessToken);else if(m&&!(null==m?void 0:m.error)){var e;null===r.default||void 0===r.default||r.default.captureMessage("Missing access token for User: ".concat(null==m?void 0:null===(e=m.user)||void 0===e?void 0:e.id," (").concat(null==m?void 0:m.accessToken,")"))}(null==m?void 0:m.error)==="RefreshAccessTokenError"&&(null===r.default||void 0===r.default||r.default.captureException(m.error),window._oaiHandleSessionExpired("page load",m.error))},[m,n.id]),(0,a.useEffect)(function(){document.title=e},[e])}},66285:function(e,n,t){t.d(n,{hz:function(){return c},mA:function(){return l},nR:function(){return u}});var r=t(31501),a=t(61079),s=t(70079),o=t(59268),i={lastUpdated:Date.now(),hasBeenSet:!1},l=(0,o.ZP)()(function(e){return(0,a.Z)((0,r.Z)({},i),{updateAccountPlanWithResponse:function(n){var t,r,a,s,o;e({accountPlan:{hasPaidSubscription:(null==n?void 0:null===(t=n.account_plan)||void 0===t?void 0:t.is_paid_subscription_active)||!1,subscriptionPlan:(null==n?void 0:null===(r=n.account_plan)||void 0===r?void 0:r.subscription_plan)||"chatgptplusfreeplan",accountUserRole:(null==n?void 0:null===(a=n.account_plan)||void 0===a?void 0:a.account_user_role)||"account-owner",wasPaidCustomer:(null==n?void 0:null===(s=n.account_plan)||void 0===s?void 0:s.was_paid_customer)||!1,hasCustomerObject:(null==n?void 0:null===(o=n.account_plan)||void 0===o?void 0:o.has_customer_object)||!1},features:(null==n?void 0:n.features)||[],hasBeenSet:!0})}})}),u=function(){return l(function(e){var n;return null===(n=e.accountPlan)||void 0===n?void 0:n.hasPaidSubscription})},c=function(){var e=l(function(e){return e.features});return(0,s.useMemo)(function(){return new Set(e)},[e])}},27252:function(e,n,t){t.d(n,{g:function(){return l}});var r=t(33861),a=t(31501),s=t(61079),o=t(59268),i={flags:{isUserInCanPayGroup:!1,highlightPlusUpgrade:!1,failwhaleBypassEnabled:!1}},l=(0,o.ZP)()(function(e,n){return(0,s.Z)((0,a.Z)({},i),{updateFlagValue:function(t,o){var i=n().flags;e({flags:(0,s.Z)((0,a.Z)({},i),(0,r.Z)({},t,o))})}})})},82018:function(e,n,t){t.d(n,{kP:function(){return f},w7:function(){return u}});var r=t(61706),a=t(31501),s=t(45813),o=t(87762),i=t(44928),l=t(61432);function u(e){(0,i.signOut)((0,a.Z)({callbackUrl:window.location.origin+"/api/auth/logout"},e))}function c(){return d.apply(this,arguments)}function d(){return(d=(0,r.Z)(function(){var e,n;return(0,s.__generator)(this,function(e){switch(e.label){case 0:return[4,fetch("/api/auth/session")];case 1:return[4,e.sent().json()];case 2:if(Object.keys(n=e.sent()).length)return[2,n];return[2,null]}})})).apply(this,arguments)}function f(e){var n=e||{},t=n.required,r=n.redirectTo,a=n.queryConfig,s=(0,l.useRouter)(),i=(0,o.a)(["session"],c,{onSettled:function(e,n){(null==a?void 0:a.onSettled)&&(null==a||a.onSettled(e,n)),!e&&t&&s.push(r)}});return{session:(null==i?void 0:i.data)||null,loading:(null==i?void 0:i.status)==="loading"}}}}]); -------------------------------------------------------------------------------- /src/pandora/turbo/chat.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import json 4 | from datetime import datetime as dt 5 | from os import getenv 6 | 7 | from requests import Response 8 | 9 | from .base import Conversations, UserPrompt, Prompt, SystemPrompt 10 | from ..openai.api import ChatCompletion 11 | from ..openai.token import gpt_num_tokens 12 | 13 | 14 | class TurboGPT: 15 | DEFAULT_SYSTEM_PROMPT = 'You are ChatGPT, a large language model trained by OpenAI. ' \ 16 | 'Answer as concisely as possible.\nKnowledge cutoff: 2021-09-01\n' \ 17 | 'Current date: {}'.format(dt.now().strftime('%Y-%m-%d')) 18 | TITLE_PROMPT = 'Generate a brief title for our conversation.' 19 | MAX_TOKENS = { 20 | 'gpt-3.5-turbo': 4096, 21 | 'gpt-4': 8192, 22 | 'gpt-4-32k': 32768, 23 | } 24 | FAKE_TOKENS = { 25 | 'gpt-3.5-turbo': 8191, 26 | 'gpt-4': 4095, 27 | 'gpt-4-32k': 8195, 28 | } 29 | 30 | def __init__(self, api_keys: dict, proxy=None): 31 | self.api_keys = api_keys 32 | self.api_keys_key_list = list(api_keys) 33 | self.default_api_keys_key = self.api_keys_key_list[0] 34 | 35 | self.api = ChatCompletion(proxy) 36 | self.conversations_map = {} 37 | self.system_prompt = getenv('API_SYSTEM_PROMPT', self.DEFAULT_SYSTEM_PROMPT) 38 | 39 | def __get_conversations(self, api_keys_key=None): 40 | if api_keys_key is None: 41 | api_keys_key = self.default_api_keys_key 42 | 43 | if api_keys_key not in self.conversations_map: 44 | self.conversations_map[api_keys_key] = Conversations() 45 | 46 | return self.conversations_map[api_keys_key] 47 | 48 | def __is_fake_api(self, token=None): 49 | api_key = self.get_access_token(token) 50 | return api_key.startswith('fk-') or api_key.startswith('pk-') 51 | 52 | 53 | def get_access_token(self, token_key=None): 54 | return self.api_keys[token_key or self.default_api_keys_key] 55 | 56 | def list_token_keys(self): 57 | return self.api_keys_key_list 58 | 59 | def list_models(self, raw=False, token=None): 60 | fake_api = self.__is_fake_api(token) 61 | 62 | models = { 63 | 'models': [ 64 | { 65 | 'slug': 'gpt-3.5-turbo', 66 | 'max_tokens': self.FAKE_TOKENS['gpt-3.5-turbo'] if fake_api else self.MAX_TOKENS['gpt-3.5-turbo'], 67 | 'title': 'GPT-3.5', 68 | 'description': 'Turbo is the api model that powers ChatGPT', 69 | 'tags': [] 70 | }, 71 | { 72 | 'slug': 'gpt-4', 73 | 'max_tokens': self.FAKE_TOKENS['gpt-4'] if fake_api else self.MAX_TOKENS['gpt-4'], 74 | 'title': 'GPT-4', 75 | 'description': 'More capable than any GPT-3.5, able to do complex tasks, and optimized for chat', 76 | 'tags': [] 77 | }, 78 | { 79 | 'slug': 'gpt-4-32k', 80 | 'max_tokens': self.FAKE_TOKENS['gpt-4-32k'] if fake_api else self.MAX_TOKENS['gpt-4-32k'], 81 | 'title': 'GPT-4 32K', 82 | 'description': 'Same capabilities as the base gpt-4 mode but with 4x the context length', 83 | 'tags': [] 84 | } 85 | ] 86 | } 87 | 88 | if raw: 89 | return self.__wrap_response(models) 90 | 91 | return models['models'] 92 | 93 | def list_conversations(self, offset, limit, raw=False, token=None): 94 | offset = int(offset) 95 | limit = int(limit) 96 | total, items = self.__get_conversations(token).list(offset, limit) 97 | 98 | stripped = [] 99 | for item in items: 100 | stripped.append({ 101 | 'id': item.conversation_id, 102 | 'title': item.title, 103 | 'create_time': dt.utcfromtimestamp(item.create_time).isoformat(), 104 | }) 105 | 106 | result = {'items': stripped, 'total': total, 'limit': limit, 'offset': offset} 107 | 108 | if raw: 109 | return self.__wrap_response(result) 110 | 111 | return result 112 | 113 | def get_conversation(self, conversation_id, raw=False, token=None): 114 | def __shadow(): 115 | try: 116 | conversation = self.__get_conversations(token).guard_get(conversation_id) 117 | except Exception as e: 118 | return self.__out_error(str(e), 404) 119 | 120 | return self.__wrap_response(conversation.get_info()) 121 | 122 | resp = __shadow() 123 | 124 | if raw: 125 | return resp 126 | 127 | if resp.status_code != 200: 128 | raise Exception('get conversation failed: ' + resp.json()['detail']) 129 | 130 | return resp.json() 131 | 132 | def clear_conversations(self, raw=False, token=None): 133 | def __shadow(): 134 | self.__get_conversations(token).clear() 135 | 136 | result = { 137 | 'success': True 138 | } 139 | 140 | return self.__wrap_response(result) 141 | 142 | resp = __shadow() 143 | 144 | if raw: 145 | return resp 146 | 147 | return resp.json()['success'] 148 | 149 | def del_conversation(self, conversation_id, raw=False, token=None): 150 | def __shadow(): 151 | conversations = self.__get_conversations(token) 152 | 153 | try: 154 | conversation = conversations.guard_get(conversation_id) 155 | except Exception as e: 156 | return self.__out_error(str(e), 404) 157 | 158 | conversations.delete(conversation) 159 | 160 | result = { 161 | 'success': True 162 | } 163 | 164 | return self.__wrap_response(result) 165 | 166 | resp = __shadow() 167 | 168 | if raw: 169 | return resp 170 | 171 | if resp.status_code != 200: 172 | raise Exception('delete conversation failed: ' + resp.json()['detail']) 173 | 174 | return resp.json()['success'] 175 | 176 | def gen_conversation_title(self, conversation_id, model, message_id, raw=False, token=None): 177 | def __shadow(): 178 | conversation = self.__get_conversations(token).get(conversation_id) 179 | if not conversation: 180 | return self.__out_error('Conversation not found', 404) 181 | 182 | if 'New chat' != conversation.title: 183 | message = { 184 | 'message': 'Conversation {} already has title \'{}\''.format(conversation_id, conversation.title) 185 | } 186 | return self.__wrap_response(message) 187 | 188 | messages = conversation.get_messages_directly(message_id) 189 | messages.append({'role': 'user', 'content': self.TITLE_PROMPT}) 190 | 191 | status, header, generator = self.api.request(self.get_access_token(token), model, messages, False) 192 | last_ok, last = self.__get_completion(status, next(generator)) 193 | 194 | if not last_ok: 195 | return self.__out_error(last['detail'], status) 196 | 197 | conversation.set_title(last.strip('"')) 198 | 199 | result = { 200 | 'title': conversation.title 201 | } 202 | 203 | return self.__wrap_response(result) 204 | 205 | resp = __shadow() 206 | 207 | if raw: 208 | return resp 209 | 210 | if resp.status_code != 200: 211 | raise Exception('generate title failed: ' + resp.text) 212 | 213 | return resp.json()['title'] 214 | 215 | def set_conversation_title(self, conversation_id, title, raw=False, token=None): 216 | def __shadow(): 217 | try: 218 | conversation = self.__get_conversations(token).guard_get(conversation_id) 219 | except Exception as e: 220 | return self.__out_error(str(e), 404) 221 | 222 | conversation.set_title(title) 223 | 224 | result = { 225 | 'success': True 226 | } 227 | 228 | return self.__wrap_response(result) 229 | 230 | resp = __shadow() 231 | 232 | if raw: 233 | return resp 234 | 235 | if resp.status_code != 200: 236 | raise Exception('update conversation failed: ' + resp.json()['detail']) 237 | 238 | return resp.json()['success'] 239 | 240 | def talk(self, content, model, message_id, parent_message_id, conversation_id=None, stream=True, token=None): 241 | system_prompt = None 242 | if conversation_id: 243 | conversation = self.__get_conversations(token).get(conversation_id) 244 | if not conversation: 245 | return self.__out_error_stream('Conversation not found', 404) 246 | 247 | parent = conversation.get_prompt(parent_message_id) 248 | else: 249 | conversation = self.__get_conversations(token).new() 250 | parent = conversation.add_prompt(Prompt(parent_message_id)) 251 | parent = system_prompt = conversation.add_prompt(SystemPrompt(self.system_prompt, parent)) 252 | 253 | conversation.add_prompt(UserPrompt(message_id, content, parent)) 254 | 255 | user_prompt, gpt_prompt, messages = conversation.get_messages(message_id, model) 256 | try: 257 | status, headers, generator = self.api.request(self.get_access_token(token), model, 258 | self.__reduce_messages(messages, model, token), stream) 259 | except Exception as e: 260 | return self.__out_error_stream(str(e)) 261 | 262 | def __out_generator(): 263 | if 200 == status and system_prompt and stream: 264 | yield self.__out_stream(conversation, system_prompt) 265 | yield self.__out_stream(conversation, user_prompt) 266 | 267 | for line in generator: 268 | yield self.__map_conversation(status, conversation, gpt_prompt, line) 269 | 270 | return status, headers, __out_generator() 271 | 272 | def goon(self, model, parent_message_id, conversation_id, stream=True, token=None): 273 | return self.regenerate_reply(None, model, conversation_id, parent_message_id, None, stream, token) 274 | 275 | def regenerate_reply(self, prompt, model, conversation_id, message_id, parent_message_id, stream=True, token=None): 276 | if not conversation_id: 277 | return self.__out_error_stream('Miss conversation_id', 400) 278 | 279 | conversation = self.__get_conversations(token).get(conversation_id) 280 | if not conversation: 281 | return self.__out_error_stream('Conversation not found', 404) 282 | 283 | user_prompt, gpt_prompt, messages = conversation.get_messages(message_id, model) 284 | try: 285 | status, headers, generator = self.api.request(self.get_access_token(token), model, 286 | self.__reduce_messages(messages, model, token), stream) 287 | except Exception as e: 288 | return self.__out_error_stream(str(e)) 289 | 290 | def __out_generator(): 291 | for line in generator: 292 | yield self.__map_conversation(status, conversation, gpt_prompt, line) 293 | 294 | return status, headers, __out_generator() 295 | 296 | def __reduce_messages(self, messages, model, token=None): 297 | max_tokens = self.FAKE_TOKENS[model] if self.__is_fake_api(token) else self.MAX_TOKENS[model] 298 | 299 | while gpt_num_tokens(messages) > max_tokens - 200: 300 | if len(messages) < 2: 301 | raise Exception('prompt too long') 302 | 303 | messages.pop(1) 304 | 305 | return messages 306 | 307 | def __out_error(self, error, status=500): 308 | result = { 309 | 'detail': error 310 | } 311 | 312 | return self.__wrap_response(result, status) 313 | 314 | def __out_error_stream(self, error, status=500): 315 | resp = self.__out_error(error, status) 316 | 317 | def __generator(): 318 | yield resp.json() 319 | 320 | return resp.status_code, resp.headers, __generator() 321 | 322 | @staticmethod 323 | def __out_stream(conversation, prompt, end=True): 324 | return { 325 | 'message': prompt.get_message(end), 326 | 'conversation_id': conversation.conversation_id, 327 | 'error': None, 328 | } 329 | 330 | @staticmethod 331 | def __wrap_response(data, status=200): 332 | resp = Response() 333 | resp.status_code = status 334 | resp._content = json.dumps(data).encode('utf-8') 335 | resp.headers['Content-Type'] = 'application/json' 336 | 337 | return resp 338 | 339 | @staticmethod 340 | def __get_completion(status, data): 341 | if status != 200: 342 | error = data['error']['message'] if 'error' in data else 'Unknown error' 343 | result = { 344 | 'detail': error 345 | } 346 | return False, result 347 | 348 | choice = data['choices'][0] 349 | if 'message' in choice: 350 | text = choice['message'].get('content', '') 351 | else: 352 | text = choice['delta'].get('content', '') 353 | 354 | return True, text 355 | 356 | def __map_conversation(self, status, conversation, gpt_prompt, data): 357 | success, result = self.__get_completion(status, data) 358 | if not success: 359 | return result 360 | 361 | choice = data['choices'][0] 362 | is_stop = 'stop' == choice['finish_reason'] 363 | 364 | return self.__out_stream(conversation, gpt_prompt.append_content(result), is_stop) 365 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 2, June 1991 3 | 4 | Copyright (C) 1989, 1991 Free Software Foundation, Inc., 5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 6 | Everyone is permitted to copy and distribute verbatim copies 7 | of this license document, but changing it is not allowed. 8 | 9 | Preamble 10 | 11 | The licenses for most software are designed to take away your 12 | freedom to share and change it. By contrast, the GNU General Public 13 | License is intended to guarantee your freedom to share and change free 14 | software--to make sure the software is free for all its users. This 15 | General Public License applies to most of the Free Software 16 | Foundation's software and to any other program whose authors commit to 17 | using it. (Some other Free Software Foundation software is covered by 18 | the GNU Lesser General Public License instead.) You can apply it to 19 | your programs, too. 20 | 21 | When we speak of free software, we are referring to freedom, not 22 | price. Our General Public Licenses are designed to make sure that you 23 | have the freedom to distribute copies of free software (and charge for 24 | this service if you wish), that you receive source code or can get it 25 | if you want it, that you can change the software or use pieces of it 26 | in new free programs; and that you know you can do these things. 27 | 28 | To protect your rights, we need to make restrictions that forbid 29 | anyone to deny you these rights or to ask you to surrender the rights. 30 | These restrictions translate to certain responsibilities for you if you 31 | distribute copies of the software, or if you modify it. 32 | 33 | For example, if you distribute copies of such a program, whether 34 | gratis or for a fee, you must give the recipients all the rights that 35 | you have. You must make sure that they, too, receive or can get the 36 | source code. And you must show them these terms so they know their 37 | rights. 38 | 39 | We protect your rights with two steps: (1) copyright the software, and 40 | (2) offer you this license which gives you legal permission to copy, 41 | distribute and/or modify the software. 42 | 43 | Also, for each author's protection and ours, we want to make certain 44 | that everyone understands that there is no warranty for this free 45 | software. If the software is modified by someone else and passed on, we 46 | want its recipients to know that what they have is not the original, so 47 | that any problems introduced by others will not reflect on the original 48 | authors' reputations. 49 | 50 | Finally, any free program is threatened constantly by software 51 | patents. We wish to avoid the danger that redistributors of a free 52 | program will individually obtain patent licenses, in effect making the 53 | program proprietary. To prevent this, we have made it clear that any 54 | patent must be licensed for everyone's free use or not licensed at all. 55 | 56 | The precise terms and conditions for copying, distribution and 57 | modification follow. 58 | 59 | GNU GENERAL PUBLIC LICENSE 60 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 61 | 62 | 0. This License applies to any program or other work which contains 63 | a notice placed by the copyright holder saying it may be distributed 64 | under the terms of this General Public License. The "Program", below, 65 | refers to any such program or work, and a "work based on the Program" 66 | means either the Program or any derivative work under copyright law: 67 | that is to say, a work containing the Program or a portion of it, 68 | either verbatim or with modifications and/or translated into another 69 | language. (Hereinafter, translation is included without limitation in 70 | the term "modification".) Each licensee is addressed as "you". 71 | 72 | Activities other than copying, distribution and modification are not 73 | covered by this License; they are outside its scope. The act of 74 | running the Program is not restricted, and the output from the Program 75 | is covered only if its contents constitute a work based on the 76 | Program (independent of having been made by running the Program). 77 | Whether that is true depends on what the Program does. 78 | 79 | 1. You may copy and distribute verbatim copies of the Program's 80 | source code as you receive it, in any medium, provided that you 81 | conspicuously and appropriately publish on each copy an appropriate 82 | copyright notice and disclaimer of warranty; keep intact all the 83 | notices that refer to this License and to the absence of any warranty; 84 | and give any other recipients of the Program a copy of this License 85 | along with the Program. 86 | 87 | You may charge a fee for the physical act of transferring a copy, and 88 | you may at your option offer warranty protection in exchange for a fee. 89 | 90 | 2. You may modify your copy or copies of the Program or any portion 91 | of it, thus forming a work based on the Program, and copy and 92 | distribute such modifications or work under the terms of Section 1 93 | above, provided that you also meet all of these conditions: 94 | 95 | a) You must cause the modified files to carry prominent notices 96 | stating that you changed the files and the date of any change. 97 | 98 | b) You must cause any work that you distribute or publish, that in 99 | whole or in part contains or is derived from the Program or any 100 | part thereof, to be licensed as a whole at no charge to all third 101 | parties under the terms of this License. 102 | 103 | c) If the modified program normally reads commands interactively 104 | when run, you must cause it, when started running for such 105 | interactive use in the most ordinary way, to print or display an 106 | announcement including an appropriate copyright notice and a 107 | notice that there is no warranty (or else, saying that you provide 108 | a warranty) and that users may redistribute the program under 109 | these conditions, and telling the user how to view a copy of this 110 | License. (Exception: if the Program itself is interactive but 111 | does not normally print such an announcement, your work based on 112 | the Program is not required to print an announcement.) 113 | 114 | These requirements apply to the modified work as a whole. If 115 | identifiable sections of that work are not derived from the Program, 116 | and can be reasonably considered independent and separate works in 117 | themselves, then this License, and its terms, do not apply to those 118 | sections when you distribute them as separate works. But when you 119 | distribute the same sections as part of a whole which is a work based 120 | on the Program, the distribution of the whole must be on the terms of 121 | this License, whose permissions for other licensees extend to the 122 | entire whole, and thus to each and every part regardless of who wrote it. 123 | 124 | Thus, it is not the intent of this section to claim rights or contest 125 | your rights to work written entirely by you; rather, the intent is to 126 | exercise the right to control the distribution of derivative or 127 | collective works based on the Program. 128 | 129 | In addition, mere aggregation of another work not based on the Program 130 | with the Program (or with a work based on the Program) on a volume of 131 | a storage or distribution medium does not bring the other work under 132 | the scope of this License. 133 | 134 | 3. You may copy and distribute the Program (or a work based on it, 135 | under Section 2) in object code or executable form under the terms of 136 | Sections 1 and 2 above provided that you also do one of the following: 137 | 138 | a) Accompany it with the complete corresponding machine-readable 139 | source code, which must be distributed under the terms of Sections 140 | 1 and 2 above on a medium customarily used for software interchange; or, 141 | 142 | b) Accompany it with a written offer, valid for at least three 143 | years, to give any third party, for a charge no more than your 144 | cost of physically performing source distribution, a complete 145 | machine-readable copy of the corresponding source code, to be 146 | distributed under the terms of Sections 1 and 2 above on a medium 147 | customarily used for software interchange; or, 148 | 149 | c) Accompany it with the information you received as to the offer 150 | to distribute corresponding source code. (This alternative is 151 | allowed only for noncommercial distribution and only if you 152 | received the program in object code or executable form with such 153 | an offer, in accord with Subsection b above.) 154 | 155 | The source code for a work means the preferred form of the work for 156 | making modifications to it. For an executable work, complete source 157 | code means all the source code for all modules it contains, plus any 158 | associated interface definition files, plus the scripts used to 159 | control compilation and installation of the executable. However, as a 160 | special exception, the source code distributed need not include 161 | anything that is normally distributed (in either source or binary 162 | form) with the major components (compiler, kernel, and so on) of the 163 | operating system on which the executable runs, unless that component 164 | itself accompanies the executable. 165 | 166 | If distribution of executable or object code is made by offering 167 | access to copy from a designated place, then offering equivalent 168 | access to copy the source code from the same place counts as 169 | distribution of the source code, even though third parties are not 170 | compelled to copy the source along with the object code. 171 | 172 | 4. You may not copy, modify, sublicense, or distribute the Program 173 | except as expressly provided under this License. Any attempt 174 | otherwise to copy, modify, sublicense or distribute the Program is 175 | void, and will automatically terminate your rights under this License. 176 | However, parties who have received copies, or rights, from you under 177 | this License will not have their licenses terminated so long as such 178 | parties remain in full compliance. 179 | 180 | 5. You are not required to accept this License, since you have not 181 | signed it. However, nothing else grants you permission to modify or 182 | distribute the Program or its derivative works. These actions are 183 | prohibited by law if you do not accept this License. Therefore, by 184 | modifying or distributing the Program (or any work based on the 185 | Program), you indicate your acceptance of this License to do so, and 186 | all its terms and conditions for copying, distributing or modifying 187 | the Program or works based on it. 188 | 189 | 6. Each time you redistribute the Program (or any work based on the 190 | Program), the recipient automatically receives a license from the 191 | original licensor to copy, distribute or modify the Program subject to 192 | these terms and conditions. You may not impose any further 193 | restrictions on the recipients' exercise of the rights granted herein. 194 | You are not responsible for enforcing compliance by third parties to 195 | this License. 196 | 197 | 7. If, as a consequence of a court judgment or allegation of patent 198 | infringement or for any other reason (not limited to patent issues), 199 | conditions are imposed on you (whether by court order, agreement or 200 | otherwise) that contradict the conditions of this License, they do not 201 | excuse you from the conditions of this License. If you cannot 202 | distribute so as to satisfy simultaneously your obligations under this 203 | License and any other pertinent obligations, then as a consequence you 204 | may not distribute the Program at all. For example, if a patent 205 | license would not permit royalty-free redistribution of the Program by 206 | all those who receive copies directly or indirectly through you, then 207 | the only way you could satisfy both it and this License would be to 208 | refrain entirely from distribution of the Program. 209 | 210 | If any portion of this section is held invalid or unenforceable under 211 | any particular circumstance, the balance of the section is intended to 212 | apply and the section as a whole is intended to apply in other 213 | circumstances. 214 | 215 | It is not the purpose of this section to induce you to infringe any 216 | patents or other property right claims or to contest validity of any 217 | such claims; this section has the sole purpose of protecting the 218 | integrity of the free software distribution system, which is 219 | implemented by public license practices. Many people have made 220 | generous contributions to the wide range of software distributed 221 | through that system in reliance on consistent application of that 222 | system; it is up to the author/donor to decide if he or she is willing 223 | to distribute software through any other system and a licensee cannot 224 | impose that choice. 225 | 226 | This section is intended to make thoroughly clear what is believed to 227 | be a consequence of the rest of this License. 228 | 229 | 8. If the distribution and/or use of the Program is restricted in 230 | certain countries either by patents or by copyrighted interfaces, the 231 | original copyright holder who places the Program under this License 232 | may add an explicit geographical distribution limitation excluding 233 | those countries, so that distribution is permitted only in or among 234 | countries not thus excluded. In such case, this License incorporates 235 | the limitation as if written in the body of this License. 236 | 237 | 9. The Free Software Foundation may publish revised and/or new versions 238 | of the General Public License from time to time. Such new versions will 239 | be similar in spirit to the present version, but may differ in detail to 240 | address new problems or concerns. 241 | 242 | Each version is given a distinguishing version number. If the Program 243 | specifies a version number of this License which applies to it and "any 244 | later version", you have the option of following the terms and conditions 245 | either of that version or of any later version published by the Free 246 | Software Foundation. If the Program does not specify a version number of 247 | this License, you may choose any version ever published by the Free Software 248 | Foundation. 249 | 250 | 10. If you wish to incorporate parts of the Program into other free 251 | programs whose distribution conditions are different, write to the author 252 | to ask for permission. For software which is copyrighted by the Free 253 | Software Foundation, write to the Free Software Foundation; we sometimes 254 | make exceptions for this. Our decision will be guided by the two goals 255 | of preserving the free status of all derivatives of our free software and 256 | of promoting the sharing and reuse of software generally. 257 | 258 | NO WARRANTY 259 | 260 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY 261 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN 262 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES 263 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED 264 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 265 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS 266 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE 267 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, 268 | REPAIR OR CORRECTION. 269 | 270 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 271 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR 272 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, 273 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING 274 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED 275 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY 276 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER 277 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE 278 | POSSIBILITY OF SUCH DAMAGES. 279 | 280 | END OF TERMS AND CONDITIONS 281 | 282 | How to Apply These Terms to Your New Programs 283 | 284 | If you develop a new program, and you want it to be of the greatest 285 | possible use to the public, the best way to achieve this is to make it 286 | free software which everyone can redistribute and change under these terms. 287 | 288 | To do so, attach the following notices to the program. It is safest 289 | to attach them to the start of each source file to most effectively 290 | convey the exclusion of warranty; and each file should have at least 291 | the "copyright" line and a pointer to where the full notice is found. 292 | 293 | 294 | Copyright (C) 295 | 296 | This program is free software; you can redistribute it and/or modify 297 | it under the terms of the GNU General Public License as published by 298 | the Free Software Foundation; either version 2 of the License, or 299 | (at your option) any later version. 300 | 301 | This program is distributed in the hope that it will be useful, 302 | but WITHOUT ANY WARRANTY; without even the implied warranty of 303 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 304 | GNU General Public License for more details. 305 | 306 | You should have received a copy of the GNU General Public License along 307 | with this program; if not, write to the Free Software Foundation, Inc., 308 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 309 | 310 | Also add information on how to contact you by electronic and paper mail. 311 | 312 | If the program is interactive, make it output a short notice like this 313 | when it starts in an interactive mode: 314 | 315 | Gnomovision version 69, Copyright (C) year name of author 316 | Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 317 | This is free software, and you are welcome to redistribute it 318 | under certain conditions; type `show c' for details. 319 | 320 | The hypothetical commands `show w' and `show c' should show the appropriate 321 | parts of the General Public License. Of course, the commands you use may 322 | be called something other than `show w' and `show c'; they could even be 323 | mouse-clicks or menu items--whatever suits your program. 324 | 325 | You should also get your employer (if you work as a programmer) or your 326 | school, if any, to sign a "copyright disclaimer" for the program, if 327 | necessary. Here is a sample; alter the names: 328 | 329 | Yoyodyne, Inc., hereby disclaims all copyright interest in the program 330 | `Gnomovision' (which makes passes at compilers) written by James Hacker. 331 | 332 | , 1 April 1989 333 | Ty Coon, President of Vice 334 | 335 | This General Public License does not permit incorporating your program into 336 | proprietary programs. If your program is a subroutine library, you may 337 | consider it more useful to permit linking proprietary applications with the 338 | library. If this is what you want to do, use the GNU Lesser General 339 | Public License instead of this License. 340 | --------------------------------------------------------------------------------