├── g4f ├── Provider │ ├── bing │ │ ├── __init__.py │ │ └── __pycache__ │ │ │ ├── __init__.cpython-310.pyc │ │ │ ├── conversation.cpython-310.pyc │ │ │ ├── create_images.cpython-310.pyc │ │ │ └── upload_image.cpython-310.pyc │ ├── selenium │ │ ├── __init__.py │ │ └── __pycache__ │ │ │ ├── Phind.cpython-310.pyc │ │ │ └── __init__.cpython-310.pyc │ ├── deprecated │ │ ├── __pycache__ │ │ │ ├── Aibn.cpython-310.pyc │ │ │ ├── Ails.cpython-310.pyc │ │ │ ├── H2o.cpython-310.pyc │ │ │ ├── V50.cpython-310.pyc │ │ │ ├── Acytoo.cpython-310.pyc │ │ │ ├── Aivvm.cpython-310.pyc │ │ │ ├── DfeHub.cpython-310.pyc │ │ │ ├── Equing.cpython-310.pyc │ │ │ ├── FastGpt.cpython-310.pyc │ │ │ ├── GetGpt.cpython-310.pyc │ │ │ ├── Myshell.cpython-310.pyc │ │ │ ├── AiService.cpython-310.pyc │ │ │ ├── Cromicle.cpython-310.pyc │ │ │ ├── EasyChat.cpython-310.pyc │ │ │ ├── Forefront.cpython-310.pyc │ │ │ ├── Lockchat.cpython-310.pyc │ │ │ ├── Wewordle.cpython-310.pyc │ │ │ ├── Wuguokai.cpython-310.pyc │ │ │ ├── __init__.cpython-310.pyc │ │ │ ├── ChatgptDuo.cpython-310.pyc │ │ │ ├── CodeLinkAva.cpython-310.pyc │ │ │ └── Vitalentum.cpython-310.pyc │ │ ├── __init__.py │ │ ├── AiService.py │ │ ├── Forefront.py │ │ ├── ChatgptDuo.py │ │ ├── Cromicle.py │ │ ├── Aibn.py │ │ ├── Acytoo.py │ │ ├── Lockchat.py │ │ ├── CodeLinkAva.py │ │ ├── Vitalentum.py │ │ ├── Wuguokai.py │ │ ├── V50.py │ │ ├── Wewordle.py │ │ ├── DfeHub.py │ │ ├── NoowAi.py │ │ └── GetGpt.py │ ├── needs_auth │ │ ├── __pycache__ │ │ │ ├── Bard.cpython-310.pyc │ │ │ ├── Poe.cpython-310.pyc │ │ │ ├── Theb.cpython-310.pyc │ │ │ ├── Raycast.cpython-310.pyc │ │ │ ├── ThebApi.cpython-310.pyc │ │ │ ├── __init__.cpython-310.pyc │ │ │ ├── HuggingChat.cpython-310.pyc │ │ │ ├── OpenaiChat.cpython-310.pyc │ │ │ └── OpenAssistant.cpython-310.pyc │ │ ├── __init__.py │ │ ├── Raycast.py │ │ └── HuggingChat.py │ ├── unfinished │ │ ├── __pycache__ │ │ │ ├── Komo.cpython-310.pyc │ │ │ ├── ChatAiGpt.cpython-310.pyc │ │ │ ├── MikuChat.cpython-310.pyc │ │ │ ├── __init__.cpython-310.pyc │ │ │ └── AiChatting.cpython-310.pyc │ │ ├── __init__.py │ │ ├── Komo.py │ │ ├── AiChatting.py │ │ └── ChatAiGpt.py │ ├── GptChatly.py │ ├── You.py │ ├── AiAsk.py │ ├── FreeGpt.py │ ├── GptTalkRu.py │ ├── Yqcloud.py │ ├── ChatAnywhere.py │ ├── Ylokh.py │ ├── ChatgptDemoAi.py │ ├── GptGod.py │ ├── OnlineGpt.py │ ├── GeminiProChat.py │ ├── Aura.py │ ├── Bestim.py │ ├── AiChatOnline.py │ ├── Gpt6.py │ ├── Chatxyz.py │ ├── ChatgptNext.py │ ├── GptGo.py │ ├── Opchatgpts.py │ ├── Koala.py │ ├── ChatForAi.py │ ├── MyShell.py │ ├── Aichat.py │ └── Pi.py ├── debug.py ├── api │ ├── run.py │ ├── _tokenizer.py │ └── _logging.py ├── errors.py ├── typing.py ├── cli.py └── utils.py ├── .vscode └── settings.json ├── babel.cfg ├── mobile-ui.png ├── webui-gpt.png ├── client ├── img │ ├── gpt.png │ ├── user.png │ ├── favicon.ico │ ├── favicon-16x16.png │ ├── favicon-32x32.png │ ├── apple-touch-icon.png │ ├── android-chrome-192x192.png │ ├── android-chrome-512x512.png │ └── site.webmanifest ├── css │ ├── buttons.css │ ├── options.css │ ├── dropdown.css │ ├── typing.css │ ├── field.css │ ├── main.css │ ├── label.css │ ├── message-input.css │ ├── style.css │ ├── button.css │ ├── stop-generating.css │ ├── select.css │ ├── settings.css │ ├── checkbox.css │ ├── message.css │ ├── global.css │ └── hljs.css ├── .prettierrc └── js │ ├── theme-toggler.js │ ├── highlightjs-copy.min.js │ ├── sidebar-toggler.js │ ├── change-language.js │ └── fullscreen-toggle.js ├── server ├── app.py ├── bp.py ├── babel.py └── website.py ├── config.json ├── translations ├── ar_SA │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── cs_CZ │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── da_DK │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── de_DE │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── el_GR │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── en_US │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── es_ES │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── es_MX │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── fi_FI │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── fr_FR │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── he_IL │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── hi_IN │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── hu_HU │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── id_ID │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── it_IT │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── ja_JP │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── ko_KR │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── nb_NO │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── nl_NL │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── pl_PL │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── pt_BR │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── ro_RO │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── ru_RU │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── sk_SK │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── sv_SE │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── th_TH │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── tr_TR │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── uk_UA │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── vi_VN │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── zh_Hans_CN │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po ├── zh_Hant_TW │ └── LC_MESSAGES │ │ ├── messages.mo │ │ └── messages.po └── README.md ├── get_working_providers.py ├── Dockerfile ├── requirements.txt └── run.py /g4f/Provider/bing/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "git.autofetch": true 3 | } -------------------------------------------------------------------------------- /g4f/Provider/selenium/__init__.py: -------------------------------------------------------------------------------- 1 | from .Phind import Phind -------------------------------------------------------------------------------- /babel.cfg: -------------------------------------------------------------------------------- 1 | [python: **/server/.py] 2 | [jinja2: **/client/html/**.html] 3 | -------------------------------------------------------------------------------- /mobile-ui.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/mobile-ui.png -------------------------------------------------------------------------------- /webui-gpt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/webui-gpt.png -------------------------------------------------------------------------------- /client/img/gpt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/client/img/gpt.png -------------------------------------------------------------------------------- /client/img/user.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/client/img/user.png -------------------------------------------------------------------------------- /client/css/buttons.css: -------------------------------------------------------------------------------- 1 | .buttons { 2 | display: flex; 3 | justify-content: left; 4 | } 5 | 6 | -------------------------------------------------------------------------------- /client/img/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/client/img/favicon.ico -------------------------------------------------------------------------------- /server/app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | 3 | app = Flask(__name__, template_folder='./../client/html') -------------------------------------------------------------------------------- /client/img/favicon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/client/img/favicon-16x16.png -------------------------------------------------------------------------------- /client/img/favicon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/client/img/favicon-32x32.png -------------------------------------------------------------------------------- /client/img/apple-touch-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/client/img/apple-touch-icon.png -------------------------------------------------------------------------------- /client/img/android-chrome-192x192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/client/img/android-chrome-192x192.png -------------------------------------------------------------------------------- /client/img/android-chrome-512x512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/client/img/android-chrome-512x512.png -------------------------------------------------------------------------------- /config.json: -------------------------------------------------------------------------------- 1 | { 2 | "site_config": { 3 | "host": "0.0.0.0", 4 | "port": 1338, 5 | "debug": false 6 | }, 7 | "url_prefix": "" 8 | } 9 | -------------------------------------------------------------------------------- /translations/ar_SA/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/ar_SA/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/cs_CZ/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/cs_CZ/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/da_DK/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/da_DK/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/de_DE/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/de_DE/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/el_GR/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/el_GR/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/en_US/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/en_US/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/es_ES/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/es_ES/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/es_MX/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/es_MX/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/fi_FI/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/fi_FI/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/fr_FR/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/fr_FR/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/he_IL/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/he_IL/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/hi_IN/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/hi_IN/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/hu_HU/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/hu_HU/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/id_ID/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/id_ID/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/it_IT/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/it_IT/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/ja_JP/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/ja_JP/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/ko_KR/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/ko_KR/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/nb_NO/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/nb_NO/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/nl_NL/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/nl_NL/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/pl_PL/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/pl_PL/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/pt_BR/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/pt_BR/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/ro_RO/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/ro_RO/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/ru_RU/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/ru_RU/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/sk_SK/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/sk_SK/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/sv_SE/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/sv_SE/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/th_TH/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/th_TH/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/tr_TR/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/tr_TR/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/uk_UA/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/uk_UA/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/vi_VN/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/vi_VN/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /g4f/debug.py: -------------------------------------------------------------------------------- 1 | from .base_provider import ProviderType 2 | 3 | logging: bool = False 4 | version_check: bool = True 5 | last_provider: ProviderType = None -------------------------------------------------------------------------------- /translations/zh_Hans_CN/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/zh_Hans_CN/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /translations/zh_Hant_TW/LC_MESSAGES/messages.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/translations/zh_Hant_TW/LC_MESSAGES/messages.mo -------------------------------------------------------------------------------- /g4f/Provider/bing/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/bing/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/deprecated/__pycache__/Aibn.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/deprecated/__pycache__/Aibn.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/deprecated/__pycache__/Ails.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/deprecated/__pycache__/Ails.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/deprecated/__pycache__/H2o.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/deprecated/__pycache__/H2o.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/deprecated/__pycache__/V50.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/deprecated/__pycache__/V50.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/needs_auth/__pycache__/Bard.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/needs_auth/__pycache__/Bard.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/needs_auth/__pycache__/Poe.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/needs_auth/__pycache__/Poe.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/needs_auth/__pycache__/Theb.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/needs_auth/__pycache__/Theb.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/selenium/__pycache__/Phind.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/selenium/__pycache__/Phind.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/unfinished/__pycache__/Komo.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/unfinished/__pycache__/Komo.cpython-310.pyc -------------------------------------------------------------------------------- /client/.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "printWidth": 120, 3 | "tabWidth": 4, 4 | "useTabs": true, 5 | "semi": true, 6 | "singleQuote": false, 7 | "trailingComma": "es5" 8 | } 9 | -------------------------------------------------------------------------------- /g4f/Provider/bing/__pycache__/conversation.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/bing/__pycache__/conversation.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/bing/__pycache__/create_images.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/bing/__pycache__/create_images.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/bing/__pycache__/upload_image.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/bing/__pycache__/upload_image.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/deprecated/__pycache__/Acytoo.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/deprecated/__pycache__/Acytoo.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/deprecated/__pycache__/Aivvm.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/deprecated/__pycache__/Aivvm.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/deprecated/__pycache__/DfeHub.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/deprecated/__pycache__/DfeHub.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/deprecated/__pycache__/Equing.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/deprecated/__pycache__/Equing.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/deprecated/__pycache__/FastGpt.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/deprecated/__pycache__/FastGpt.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/deprecated/__pycache__/GetGpt.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/deprecated/__pycache__/GetGpt.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/deprecated/__pycache__/Myshell.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/deprecated/__pycache__/Myshell.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/needs_auth/__pycache__/Raycast.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/needs_auth/__pycache__/Raycast.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/needs_auth/__pycache__/ThebApi.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/needs_auth/__pycache__/ThebApi.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/selenium/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/selenium/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/deprecated/__pycache__/AiService.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/deprecated/__pycache__/AiService.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/deprecated/__pycache__/Cromicle.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/deprecated/__pycache__/Cromicle.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/deprecated/__pycache__/EasyChat.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/deprecated/__pycache__/EasyChat.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/deprecated/__pycache__/Forefront.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/deprecated/__pycache__/Forefront.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/deprecated/__pycache__/Lockchat.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/deprecated/__pycache__/Lockchat.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/deprecated/__pycache__/Wewordle.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/deprecated/__pycache__/Wewordle.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/deprecated/__pycache__/Wuguokai.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/deprecated/__pycache__/Wuguokai.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/deprecated/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/deprecated/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/needs_auth/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/needs_auth/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/unfinished/__pycache__/ChatAiGpt.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/unfinished/__pycache__/ChatAiGpt.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/unfinished/__pycache__/MikuChat.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/unfinished/__pycache__/MikuChat.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/unfinished/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/unfinished/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/deprecated/__pycache__/ChatgptDuo.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/deprecated/__pycache__/ChatgptDuo.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/deprecated/__pycache__/CodeLinkAva.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/deprecated/__pycache__/CodeLinkAva.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/deprecated/__pycache__/Vitalentum.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/deprecated/__pycache__/Vitalentum.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/needs_auth/__pycache__/HuggingChat.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/needs_auth/__pycache__/HuggingChat.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/needs_auth/__pycache__/OpenaiChat.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/needs_auth/__pycache__/OpenaiChat.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/unfinished/__pycache__/AiChatting.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/unfinished/__pycache__/AiChatting.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/needs_auth/__pycache__/OpenAssistant.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChatTeach/FreeGPT-4/HEAD/g4f/Provider/needs_auth/__pycache__/OpenAssistant.cpython-310.pyc -------------------------------------------------------------------------------- /g4f/Provider/unfinished/__init__.py: -------------------------------------------------------------------------------- 1 | from .MikuChat import MikuChat 2 | from .Komo import Komo 3 | from .ChatAiGpt import ChatAiGpt 4 | from .AiChatting import AiChatting -------------------------------------------------------------------------------- /client/css/options.css: -------------------------------------------------------------------------------- 1 | .options-container { 2 | display: flex; 3 | flex-wrap: wrap; 4 | } 5 | 6 | @media screen and (max-width: 990px) { 7 | .options-container { 8 | justify-content: space-between; 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /server/bp.py: -------------------------------------------------------------------------------- 1 | from flask import Blueprint 2 | 3 | bp = Blueprint('bp', __name__, 4 | template_folder='./../client/html', 5 | static_folder='./../client', 6 | static_url_path='assets') 7 | -------------------------------------------------------------------------------- /g4f/api/run.py: -------------------------------------------------------------------------------- 1 | import g4f 2 | import g4f.api 3 | 4 | if __name__ == "__main__": 5 | print(f'Starting server... [g4f v-{g4f.version.utils.current_version}]') 6 | g4f.api.Api(engine = g4f, debug = True).run(ip = "0.0.0.0:10000") 7 | -------------------------------------------------------------------------------- /client/css/dropdown.css: -------------------------------------------------------------------------------- 1 | .dropdown { 2 | border: var(--blur-border); 3 | box-shadow: var(--blur-box-shadow); 4 | } 5 | 6 | @media screen and (max-width: 990px) { 7 | .dropdown { 8 | padding: 8px 8px; 9 | font-size: 0.75rem; 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /client/css/typing.css: -------------------------------------------------------------------------------- 1 | .typing { 2 | position: absolute; 3 | top: -25px; 4 | left: 0; 5 | font-size: 14px; 6 | animation: show_popup 0.4s; 7 | } 8 | 9 | .typing-hiding { 10 | animation: hide_popup 0.4s; 11 | } 12 | 13 | .typing-hidden { 14 | display: none; 15 | } 16 | -------------------------------------------------------------------------------- /get_working_providers.py: -------------------------------------------------------------------------------- 1 | from g4f.active_providers import get_active_model_providers 2 | 3 | working_providers = get_active_model_providers() 4 | 5 | print("\nWorking providers by model:") 6 | for model, providers in working_providers.items(): 7 | print(f"{model}: {', '.join(providers)}") 8 | -------------------------------------------------------------------------------- /client/css/field.css: -------------------------------------------------------------------------------- 1 | .field { 2 | display: flex; 3 | align-items: center; 4 | padding: 10px 0; 5 | } 6 | 7 | .fullscreen { 8 | display: none; 9 | } 10 | 11 | @media screen and (max-width: 990px) { 12 | .field { 13 | flex-wrap: nowrap; 14 | } 15 | .fullscreen { 16 | display: flex !important; 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /g4f/api/_tokenizer.py: -------------------------------------------------------------------------------- 1 | # import tiktoken 2 | # from typing import Union 3 | 4 | # def tokenize(text: str, model: str = 'gpt-3.5-turbo') -> Union[int, str]: 5 | # encoding = tiktoken.encoding_for_model(model) 6 | # encoded = encoding.encode(text) 7 | # num_tokens = len(encoded) 8 | 9 | # return num_tokens, encoded -------------------------------------------------------------------------------- /g4f/Provider/needs_auth/__init__.py: -------------------------------------------------------------------------------- 1 | from .Bard import Bard 2 | from .Raycast import Raycast 3 | from .Theb import Theb 4 | from .ThebApi import ThebApi 5 | from .HuggingChat import HuggingChat 6 | from .OpenaiChat import OpenaiChat 7 | from .OpenAssistant import OpenAssistant 8 | from .Poe import Poe -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.10-slim-buster 2 | 3 | WORKDIR /app 4 | 5 | COPY requirements.txt requirements.txt 6 | 7 | RUN python -m venv venv 8 | ENV PATH="/app/venv/bin:$PATH" 9 | 10 | RUN pip install --upgrade pip && \ 11 | pip install --no-cache-dir -r requirements.txt 12 | 13 | COPY . . 14 | 15 | RUN chmod -R 777 translations 16 | 17 | CMD ["python3", "./run.py"] 18 | -------------------------------------------------------------------------------- /client/css/main.css: -------------------------------------------------------------------------------- 1 | .main-container { 2 | display: flex; 3 | width: max(100%); 4 | height: max(100%); 5 | padding: 20px; 6 | align-content: space-around; 7 | justify-content: space-around; 8 | align-items: center; 9 | box-sizing: border-box; 10 | flex-direction: row; 11 | } 12 | 13 | @media screen and (max-width: 990px) { 14 | .main-container { 15 | padding: 40px 10px 10px 10px; 16 | } 17 | } 18 | 19 | 20 | -------------------------------------------------------------------------------- /client/css/label.css: -------------------------------------------------------------------------------- 1 | label { 2 | cursor: pointer; 3 | text-indent: -9999px; 4 | width: 50px; 5 | height: 30px; 6 | backdrop-filter: blur(20px); 7 | -webkit-backdrop-filter: blur(20px); 8 | background-color: var(--blur-bg); 9 | border-radius: var(--border-radius-1); 10 | border: 1px solid var(--blur-border); 11 | display: block; 12 | border-radius: 100px; 13 | position: relative; 14 | overflow: hidden; 15 | transition: 0.33s; 16 | } 17 | -------------------------------------------------------------------------------- /client/css/message-input.css: -------------------------------------------------------------------------------- 1 | #message-input::-webkit-scrollbar { 2 | width: 5px; 3 | } 4 | 5 | #message-input::-webkit-scrollbar-track { 6 | background: #f1f1f1; 7 | } 8 | 9 | #message-input::-webkit-scrollbar-thumb { 10 | background: #c7a2ff; 11 | } 12 | 13 | #message-input::-webkit-scrollbar-thumb:hover { 14 | background: #8b3dff; 15 | } 16 | 17 | @media screen and (max-width: 360px) { 18 | #message-input { 19 | margin: 0; 20 | } 21 | } 22 | 23 | -------------------------------------------------------------------------------- /client/css/style.css: -------------------------------------------------------------------------------- 1 | @import "./global.css"; 2 | @import "./hljs.css"; 3 | @import "./main.css"; 4 | @import "./sidebar.css"; 5 | @import "./conversation.css"; 6 | @import "./message.css"; 7 | @import "./stop-generating.css"; 8 | @import "./typing.css"; 9 | @import "./checkbox.css"; 10 | @import "./label.css"; 11 | @import "./button.css"; 12 | @import "./buttons.css"; 13 | @import "./dropdown.css"; 14 | @import "./field.css"; 15 | @import "./select.css"; 16 | @import "./options.css"; 17 | @import "./settings.css"; 18 | @import "./message-input.css"; 19 | -------------------------------------------------------------------------------- /client/css/button.css: -------------------------------------------------------------------------------- 1 | .button { 2 | display: flex; 3 | padding: 8px 12px; 4 | align-items: center; 5 | justify-content: center; 6 | border: var(--blur-border); 7 | border-radius: var(--border-radius-1); 8 | box-shadow: var(--blur-box-shadow); 9 | width: 100%; 10 | background: transparent; 11 | cursor: pointer; 12 | } 13 | 14 | .button span { 15 | color: var(--colour-3); 16 | font-size: 0.875rem; 17 | padding: 8px; 18 | } 19 | 20 | .button i::before { 21 | margin-right: 8px; 22 | } 23 | 24 | @media screen and (max-width: 990px) { 25 | .button span { 26 | font-size: 0.75rem; 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /g4f/errors.py: -------------------------------------------------------------------------------- 1 | class ProviderNotFoundError(Exception): 2 | pass 3 | 4 | class ProviderNotWorkingError(Exception): 5 | pass 6 | 7 | class StreamNotSupportedError(Exception): 8 | pass 9 | 10 | class AuthenticationRequiredError(Exception): 11 | pass 12 | 13 | class ModelNotFoundError(Exception): 14 | pass 15 | 16 | class ModelNotAllowedError(Exception): 17 | pass 18 | 19 | class RetryProviderError(Exception): 20 | pass 21 | 22 | class RetryNoProviderError(Exception): 23 | pass 24 | 25 | class VersionNotFoundError(Exception): 26 | pass 27 | 28 | class NestAsyncioError(Exception): 29 | pass -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | websocket-client 2 | requests 3 | tls-client 4 | pypasser 5 | names 6 | colorama 7 | curl_cffi 8 | aiohttp 9 | flask 10 | streamlit 11 | selenium 12 | fake-useragent 13 | twocaptcha 14 | pydantic 15 | pymailtm 16 | Levenshtein 17 | retrying 18 | pycryptodome 19 | random-password-generator 20 | numpy>=1.22.2 # pinned to avoid a vulnerability 21 | tornado>=6.3.2 # pinned to avoid a vulnerability 22 | PyExecJS 23 | browser_cookie3 24 | js2py 25 | platformdirs 26 | undetected-chromedriver 27 | py-arkose-generator 28 | asyncstdlib 29 | async_property 30 | pybabel 31 | Brotli 32 | bs4 33 | -------------------------------------------------------------------------------- /client/img/site.webmanifest: -------------------------------------------------------------------------------- 1 | { 2 | "name": "FreeGPT", 3 | "short_name": "FreeGPT", 4 | "icons": [ 5 | { 6 | "src": "/assets/img/android-chrome-192x192.png", 7 | "sizes": "192x192", 8 | "type": "image/png" 9 | }, 10 | { 11 | "src": "/assets/img/android-chrome-512x512.png", 12 | "sizes": "512x512", 13 | "type": "image/png" 14 | } 15 | ], 16 | "theme_color": "#ffffff", 17 | "background_color": "#ffffff", 18 | "display": "standalone" 19 | "display_override": ["fullscreen", "minimal-ui"], 20 | "start_url": "/index.html", 21 | } 22 | 23 | 24 | -------------------------------------------------------------------------------- /client/js/theme-toggler.js: -------------------------------------------------------------------------------- 1 | var switch_theme_toggler = document.getElementById("theme-toggler"); 2 | 3 | switch_theme_toggler.addEventListener("change", toggleTheme); 4 | 5 | function setTheme(themeName) { 6 | localStorage.setItem("theme", themeName); 7 | document.documentElement.className = themeName; 8 | } 9 | 10 | function toggleTheme() { 11 | var currentTheme = localStorage.getItem("theme"); 12 | var newTheme = currentTheme === "theme-dark" ? "theme-light" : "theme-dark"; 13 | 14 | setTheme(newTheme); 15 | switch_theme_toggler.checked = newTheme === "theme-dark"; 16 | } 17 | 18 | (function () { 19 | var currentTheme = localStorage.getItem("theme") || "theme-dark"; 20 | setTheme(currentTheme); 21 | switch_theme_toggler.checked = currentTheme === "theme-dark"; 22 | })(); 23 | -------------------------------------------------------------------------------- /client/css/stop-generating.css: -------------------------------------------------------------------------------- 1 | .stop-generating { 2 | position: absolute; 3 | bottom: 128px; 4 | left: 50%; 5 | transform: translateX(-50%); 6 | z-index: 1000000; 7 | 8 | } 9 | 10 | .stop-generating button { 11 | backdrop-filter: blur(20px); 12 | -webkit-backdrop-filter: blur(20px); 13 | background-color: var(--blur-bg); 14 | color: var(--colour-3); 15 | cursor: pointer; 16 | animation: show_popup 0.4s; 17 | } 18 | 19 | @keyframes show_popup { 20 | from { 21 | opacity: 0; 22 | transform: translateY(10px); 23 | } 24 | } 25 | 26 | @keyframes hide_popup { 27 | to { 28 | opacity: 0; 29 | transform: translateY(10px); 30 | } 31 | } 32 | 33 | .stop-generating-hiding button { 34 | animation: hide_popup 0.4s; 35 | } 36 | 37 | .stop-generating-hidden button { 38 | display: none; 39 | } 40 | -------------------------------------------------------------------------------- /client/css/select.css: -------------------------------------------------------------------------------- 1 | select { 2 | -webkit-border-radius: 8px; 3 | -moz-border-radius: 8px; 4 | border-radius: 8px; 5 | 6 | -webkit-backdrop-filter: blur(20px); 7 | backdrop-filter: blur(20px); 8 | 9 | cursor: pointer; 10 | background-color: var(--blur-bg); 11 | border: 1px solid var(--blur-border); 12 | color: var(--colour-3); 13 | display: block; 14 | position: relative; 15 | overflow: hidden; 16 | outline: none; 17 | padding: 8px 16px; 18 | 19 | appearance: none; 20 | } 21 | 22 | /* scrollbar */ 23 | select.dropdown::-webkit-scrollbar { 24 | width: 4px; 25 | padding: 8px 0px; 26 | } 27 | 28 | select.dropdown::-webkit-scrollbar-track { 29 | background-color: #ffffff00; 30 | } 31 | 32 | select.dropdown::-webkit-scrollbar-thumb { 33 | background-color: #555555; 34 | border-radius: 10px; 35 | } 36 | -------------------------------------------------------------------------------- /g4f/typing.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from typing import Any, AsyncGenerator, Generator, NewType, Tuple, Union, List, Dict, Type, IO 3 | from PIL.Image import Image 4 | 5 | if sys.version_info >= (3, 8): 6 | from typing import TypedDict 7 | else: 8 | from typing_extensions import TypedDict 9 | 10 | SHA256 = NewType('sha_256_hash', str) 11 | CreateResult = Generator[str, None, None] 12 | AsyncResult = AsyncGenerator[str, None] 13 | Messages = List[Dict[str, str]] 14 | ImageType = Union[str, bytes, IO, Image, None] 15 | 16 | __all__ = [ 17 | 'Any', 18 | 'AsyncGenerator', 19 | 'Generator', 20 | 'Tuple', 21 | 'Union', 22 | 'List', 23 | 'Dict', 24 | 'Type', 25 | 'TypedDict', 26 | 'SHA256', 27 | 'CreateResult', 28 | 'AsyncResult', 29 | 'Messages', 30 | 'ImageType' 31 | ] 32 | -------------------------------------------------------------------------------- /g4f/Provider/deprecated/__init__.py: -------------------------------------------------------------------------------- 1 | from .AiService import AiService 2 | from .CodeLinkAva import CodeLinkAva 3 | from .DfeHub import DfeHub 4 | from .EasyChat import EasyChat 5 | from .Forefront import Forefront 6 | from .GetGpt import GetGpt 7 | from .Lockchat import Lockchat 8 | from .Wewordle import Wewordle 9 | from .Equing import Equing 10 | from .Wuguokai import Wuguokai 11 | from .V50 import V50 12 | from .FastGpt import FastGpt 13 | from .Aivvm import Aivvm 14 | from .Vitalentum import Vitalentum 15 | from .H2o import H2o 16 | from .Myshell import Myshell 17 | from .Acytoo import Acytoo 18 | from .Aibn import Aibn 19 | from .Ails import Ails 20 | from .ChatgptDuo import ChatgptDuo 21 | from .Cromicle import Cromicle -------------------------------------------------------------------------------- /g4f/api/_logging.py: -------------------------------------------------------------------------------- 1 | import sys,logging 2 | 3 | from loguru import logger 4 | 5 | def __exception_handle(e_type, e_value, e_traceback): 6 | if issubclass(e_type, KeyboardInterrupt): 7 | print('\nBye...') 8 | sys.exit(0) 9 | 10 | sys.__excepthook__(e_type, e_value, e_traceback) 11 | 12 | class __InterceptHandler(logging.Handler): 13 | def emit(self, record): 14 | try: 15 | level = logger.level(record.levelname).name 16 | except ValueError: 17 | level = record.levelno 18 | 19 | frame, depth = logging.currentframe(), 2 20 | while frame.f_code.co_filename == logging.__file__: 21 | frame = frame.f_back 22 | depth += 1 23 | 24 | logger.opt(depth=depth, exception=record.exc_info).log( 25 | level, record.getMessage() 26 | ) 27 | 28 | def hook_except_handle(): 29 | sys.excepthook = __exception_handle 30 | 31 | def hook_logging(**kwargs): 32 | logging.basicConfig(handlers=[__InterceptHandler()], **kwargs) 33 | -------------------------------------------------------------------------------- /client/js/highlightjs-copy.min.js: -------------------------------------------------------------------------------- 1 | class CopyButtonPlugin{constructor(options={}){self.hook=options.hook;self.callback=options.callback}"after:highlightElement"({el,text}){let button=Object.assign(document.createElement("button"),{innerHTML:"Copy",className:"hljs-copy-button"});button.dataset.copied=false;el.parentElement.classList.add("hljs-copy-wrapper");el.parentElement.appendChild(button);el.parentElement.style.setProperty("--hljs-theme-background",window.getComputedStyle(el).backgroundColor);button.onclick=function(){if(!navigator.clipboard)return;let newText=text;if(hook&&typeof hook==="function"){newText=hook(text,el)||text}navigator.clipboard.writeText(newText).then(function(){button.innerHTML="Copied!";button.dataset.copied=true;let alert=Object.assign(document.createElement("div"),{role:"status",className:"hljs-copy-alert",innerHTML:"Copied to clipboard"});el.parentElement.appendChild(alert);setTimeout(()=>{button.innerHTML="Copy";button.dataset.copied=false;el.parentElement.removeChild(alert);alert=null},2e3)}).then(function(){if(typeof callback==="function")return callback(newText,el)})}}} -------------------------------------------------------------------------------- /client/css/settings.css: -------------------------------------------------------------------------------- 1 | .settings-container { 2 | color: var(--colour-2); 3 | margin: 24px 0px 8px 0px; 4 | display: flex; 5 | flex-direction: column; 6 | } 7 | 8 | .settings-container span { 9 | font-size: 0.875rem; 10 | margin: 0; 11 | } 12 | 13 | .settings-container label { 14 | width: 24px; 15 | height: 16px; 16 | } 17 | 18 | .settings-container .field { 19 | justify-content: space-between; 20 | } 21 | 22 | .settings-container .checkbox input + label, 23 | .settings-container .checkbox input:checked + label:after { 24 | background: var(--colour-1); 25 | } 26 | 27 | .settings-container .checkbox input + label:after, 28 | .settings-container .checkbox input:checked + label { 29 | background: var(--colour-3); 30 | } 31 | 32 | .settings-container .checkbox label:after { 33 | left: 2px; 34 | width: 10px; 35 | height: 10px; 36 | } 37 | 38 | .settings-container .checkbox input:checked + label:after { 39 | left: calc(100% - 2px - 10px); 40 | } 41 | 42 | .settings-container .dropdown { 43 | padding: 4px 8px; 44 | margin-left: 6px; 45 | font-size: 0.75rem; 46 | width: max(70%); 47 | } 48 | -------------------------------------------------------------------------------- /client/css/checkbox.css: -------------------------------------------------------------------------------- 1 | .checkbox input { 2 | height: 0; 3 | width: 0; 4 | display: none; 5 | } 6 | 7 | .checkbox span { 8 | font-size: 0.875rem; 9 | color: var(--colour-2); 10 | margin-left: 4px; 11 | } 12 | 13 | .checkbox label:after { 14 | content: ""; 15 | position: absolute; 16 | top: 50%; 17 | transform: translateY(-50%); 18 | left: 5px; 19 | width: 20px; 20 | height: 20px; 21 | background: var(--blur-border); 22 | border-radius: 90px; 23 | transition: 0.33s; 24 | } 25 | 26 | .checkbox input + label:after, 27 | .checkbox input:checked + label { 28 | background: var(--colour-3); 29 | } 30 | 31 | .checkbox input + label, 32 | .checkbox input:checked + label:after { 33 | background: var(--blur-border); 34 | } 35 | 36 | .checkbox input:checked + label:after { 37 | left: calc(100% - 5px - 20px); 38 | } 39 | 40 | @media screen and (max-width: 990px) { 41 | .checkbox label { 42 | width: 25px; 43 | height: 15px; 44 | } 45 | 46 | .checkbox label:after { 47 | left: 2px; 48 | width: 10px; 49 | height: 10px; 50 | } 51 | 52 | .checkbox input:checked + label:after { 53 | left: calc(100% - 2px - 10px); 54 | } 55 | 56 | } 57 | -------------------------------------------------------------------------------- /g4f/Provider/GptChatly.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from ..requests import Session, get_session_from_browser 4 | from ..typing import Messages 5 | from .base_provider import AsyncProvider 6 | 7 | 8 | class GptChatly(AsyncProvider): 9 | url = "https://gptchatly.com" 10 | working = True 11 | supports_message_history = True 12 | supports_gpt_35_turbo = True 13 | supports_gpt_4 = True 14 | 15 | @classmethod 16 | async def create_async( 17 | cls, 18 | model: str, 19 | messages: Messages, 20 | proxy: str = None, 21 | timeout: int = 120, 22 | session: Session = None, 23 | **kwargs 24 | ) -> str: 25 | if not session: 26 | session = get_session_from_browser(cls.url, proxy=proxy, timeout=timeout) 27 | if model.startswith("gpt-4"): 28 | chat_url = f"{cls.url}/fetch-gpt4-response" 29 | else: 30 | chat_url = f"{cls.url}/felch-response" 31 | data = { 32 | "past_conversations": messages 33 | } 34 | response = session.post(chat_url, json=data) 35 | response.raise_for_status() 36 | return response.json()["chatGPTResponse"] -------------------------------------------------------------------------------- /client/js/sidebar-toggler.js: -------------------------------------------------------------------------------- 1 | const sidebar = document.querySelector(".sidebar"); 2 | const menuButton = document.querySelector(".menu-button"); 3 | 4 | function toggleSidebar(event) { 5 | if (sidebar.classList.contains("shown")) { 6 | hideSidebar(event.target); 7 | } else { 8 | showSidebar(event.target); 9 | } 10 | window.scrollTo(0, 0); 11 | } 12 | 13 | function showSidebar(target) { 14 | sidebar.classList.add("shown"); 15 | target.classList.add("rotated"); 16 | document.body.style.overflow = "hidden"; 17 | } 18 | 19 | function hideSidebar(target) { 20 | sidebar.classList.remove("shown"); 21 | target.classList.remove("rotated"); 22 | document.body.style.overflow = "auto"; 23 | } 24 | 25 | menuButton.addEventListener("click", toggleSidebar); 26 | 27 | document.body.addEventListener('click', function(event) { 28 | if (!event.target.closest('.sidebar') && !event.target.closest('.menu-button') && !event.target.closest('.conversation-sidebar')){ 29 | hideSidebar(event.target); 30 | } 31 | if (event.target.matches('.conversation-title')) { 32 | const menuButtonStyle = window.getComputedStyle(menuButton); 33 | if (menuButtonStyle.display !== 'none') { 34 | hideSidebar(menuButton); 35 | } 36 | } 37 | }); 38 | -------------------------------------------------------------------------------- /g4f/Provider/deprecated/AiService.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import requests 4 | 5 | from ...typing import Any, CreateResult, Messages 6 | from ..base_provider import AbstractProvider 7 | 8 | 9 | class AiService(AbstractProvider): 10 | url = "https://aiservice.vercel.app/" 11 | working = False 12 | supports_gpt_35_turbo = True 13 | 14 | @staticmethod 15 | def create_completion( 16 | model: str, 17 | messages: Messages, 18 | stream: bool, 19 | **kwargs: Any, 20 | ) -> CreateResult: 21 | base = ( 22 | "\n".join( 23 | f"{message['role']}: {message['content']}" for message in messages 24 | ) 25 | + "\nassistant: " 26 | ) 27 | headers = { 28 | "accept": "*/*", 29 | "content-type": "text/plain;charset=UTF-8", 30 | "sec-fetch-dest": "empty", 31 | "sec-fetch-mode": "cors", 32 | "sec-fetch-site": "same-origin", 33 | "Referer": "https://aiservice.vercel.app/chat", 34 | } 35 | data = {"input": base} 36 | url = "https://aiservice.vercel.app/api/chat/answer" 37 | response = requests.post(url, headers=headers, json=data) 38 | response.raise_for_status() 39 | yield response.json()["data"] 40 | -------------------------------------------------------------------------------- /client/css/message.css: -------------------------------------------------------------------------------- 1 | .message { 2 | width: max(100%); 3 | overflow-wrap: anywhere; 4 | display: flex; 5 | gap: var(--section-gap); 6 | padding: var(--section-gap); 7 | padding-bottom: 0; 8 | } 9 | 10 | .message:last-child { 11 | animation: 0.6s show_message; 12 | } 13 | 14 | @keyframes show_message { 15 | from { 16 | transform: translateY(10px); 17 | opacity: 0; 18 | } 19 | } 20 | 21 | .message .avatar-container img { 22 | max-width: 48px; 23 | max-height: 48px; 24 | box-shadow: 0.4px 0.5px 0.7px -2px rgba(0, 0, 0, 0.08), 1.1px 1.3px 2px -2px rgba(0, 0, 0, 0.041), 25 | 2.7px 3px 4.8px -2px rgba(0, 0, 0, 0.029), 9px 10px 16px -2px rgba(0, 0, 0, 0.022); 26 | } 27 | 28 | .message .content { 29 | display: flex; 30 | flex-direction: column; 31 | gap: 18px; 32 | } 33 | 34 | .message .content p, 35 | .message .content li, 36 | .message .content code { 37 | font-size: 1rem; 38 | line-height: 1.3; 39 | } 40 | 41 | @media screen and (max-height: 100%) { 42 | .message { 43 | padding: 12px; 44 | gap: 0; 45 | } 46 | 47 | .message .content { 48 | margin-left: 8px; 49 | width: max(80%); 50 | } 51 | 52 | .message .avatar-container img { 53 | max-width: 32px; 54 | max-height: 32px; 55 | } 56 | 57 | .message .content, 58 | .message .content p, 59 | .message .content li, 60 | .message .content code { 61 | font-size: 0.875rem; 62 | line-height: 1.3; 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /g4f/cli.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from enum import Enum 3 | 4 | import g4f 5 | from g4f import Provider 6 | 7 | from g4f.api import Api 8 | from g4f.gui.run import gui_parser, run_gui_args 9 | 10 | def run_gui(args): 11 | print("Running GUI...") 12 | 13 | def main(): 14 | IgnoredProviders = Enum("ignore_providers", {key: key for key in Provider.__all__}) 15 | parser = argparse.ArgumentParser(description="Run gpt4free") 16 | subparsers = parser.add_subparsers(dest="mode", help="Mode to run the g4f in.") 17 | api_parser=subparsers.add_parser("api") 18 | api_parser.add_argument("--bind", default="0.0.0.0:1337", help="The bind string.") 19 | api_parser.add_argument("--debug", type=bool, default=False, help="Enable verbose logging") 20 | api_parser.add_argument("--ignored-providers", nargs="+", choices=[provider.name for provider in IgnoredProviders], 21 | default=[], help="List of providers to ignore when processing request.") 22 | subparsers.add_parser("gui", parents=[gui_parser()], add_help=False) 23 | 24 | args = parser.parse_args() 25 | if args.mode == "api": 26 | controller=Api(engine=g4f, debug=args.debug, list_ignored_providers=args.ignored_providers) 27 | controller.run(args.bind) 28 | elif args.mode == "gui": 29 | run_gui_args(args) 30 | else: 31 | parser.print_help() 32 | exit(1) 33 | 34 | if __name__ == "__main__": 35 | main() 36 | -------------------------------------------------------------------------------- /g4f/Provider/deprecated/Forefront.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import json 4 | 5 | import requests 6 | 7 | from ...typing import Any, CreateResult 8 | from ..base_provider import AbstractProvider 9 | 10 | 11 | class Forefront(AbstractProvider): 12 | url = "https://forefront.com" 13 | supports_stream = True 14 | supports_gpt_35_turbo = True 15 | 16 | @staticmethod 17 | def create_completion( 18 | model: str, 19 | messages: list[dict[str, str]], 20 | stream: bool, **kwargs: Any) -> CreateResult: 21 | 22 | json_data = { 23 | "text" : messages[-1]["content"], 24 | "action" : "noauth", 25 | "id" : "", 26 | "parentId" : "", 27 | "workspaceId" : "", 28 | "messagePersona": "607e41fe-95be-497e-8e97-010a59b2e2c0", 29 | "model" : "gpt-4", 30 | "messages" : messages[:-1] if len(messages) > 1 else [], 31 | "internetMode" : "auto", 32 | } 33 | 34 | response = requests.post("https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat", 35 | json=json_data, stream=True) 36 | 37 | response.raise_for_status() 38 | for token in response.iter_lines(): 39 | if b"delta" in token: 40 | yield json.loads(token.decode().split("data: ")[1])["delta"] 41 | -------------------------------------------------------------------------------- /g4f/Provider/You.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import json 4 | 5 | from ..requests import StreamSession 6 | from ..typing import AsyncGenerator, Messages 7 | from .base_provider import AsyncGeneratorProvider, format_prompt 8 | 9 | 10 | class You(AsyncGeneratorProvider): 11 | url = "https://you.com" 12 | working = True 13 | supports_gpt_35_turbo = True 14 | 15 | 16 | @classmethod 17 | async def create_async_generator( 18 | cls, 19 | model: str, 20 | messages: Messages, 21 | proxy: str = None, 22 | timeout: int = 120, 23 | **kwargs, 24 | ) -> AsyncGenerator: 25 | async with StreamSession(proxies={"https": proxy}, impersonate="chrome107", timeout=timeout) as session: 26 | headers = { 27 | "Accept": "text/event-stream", 28 | "Referer": f"{cls.url}/search?fromSearchBar=true&tbm=youchat", 29 | } 30 | data = {"q": format_prompt(messages), "domain": "youchat", "chat": ""} 31 | async with session.get( 32 | f"{cls.url}/api/streamingSearch", 33 | params=data, 34 | headers=headers 35 | ) as response: 36 | response.raise_for_status() 37 | start = b'data: {"youChatToken": ' 38 | async for line in response.iter_lines(): 39 | if line.startswith(start): 40 | yield json.loads(line[len(start):-1]) 41 | -------------------------------------------------------------------------------- /run.py: -------------------------------------------------------------------------------- 1 | import secrets 2 | 3 | from server.bp import bp 4 | from server.website import Website 5 | from server.backend import Backend_Api 6 | from server.babel import create_babel 7 | from json import load 8 | from flask import Flask 9 | 10 | if __name__ == '__main__': 11 | 12 | # Load configuration from config.json 13 | config = load(open('config.json', 'r')) 14 | site_config = config['site_config'] 15 | url_prefix = config.pop('url_prefix') 16 | 17 | # Create the app 18 | app = Flask(__name__) 19 | app.secret_key = secrets.token_hex(16) 20 | 21 | # Set up Babel 22 | create_babel(app) 23 | 24 | # Set up the website routes 25 | site = Website(bp, url_prefix) 26 | for route in site.routes: 27 | bp.add_url_rule( 28 | route, 29 | view_func=site.routes[route]['function'], 30 | methods=site.routes[route]['methods'], 31 | ) 32 | 33 | # Set up the backend API routes 34 | backend_api = Backend_Api(bp, config) 35 | for route in backend_api.routes: 36 | bp.add_url_rule( 37 | route, 38 | view_func=backend_api.routes[route]['function'], 39 | methods=backend_api.routes[route]['methods'], 40 | ) 41 | 42 | # Register the blueprint 43 | app.register_blueprint(bp, url_prefix=url_prefix) 44 | 45 | # Run the Flask server 46 | print(f"Running on {site_config['port']}{url_prefix}") 47 | app.run(**site_config) 48 | print(f"Closing port {site_config['port']}") 49 | -------------------------------------------------------------------------------- /g4f/Provider/deprecated/ChatgptDuo.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from ...typing import Messages 4 | from curl_cffi.requests import AsyncSession 5 | from ..base_provider import AsyncProvider, format_prompt 6 | 7 | 8 | class ChatgptDuo(AsyncProvider): 9 | url = "https://chatgptduo.com" 10 | supports_gpt_35_turbo = True 11 | working = False 12 | 13 | @classmethod 14 | async def create_async( 15 | cls, 16 | model: str, 17 | messages: Messages, 18 | proxy: str = None, 19 | timeout: int = 120, 20 | **kwargs 21 | ) -> str: 22 | async with AsyncSession( 23 | impersonate="chrome107", 24 | proxies={"https": proxy}, 25 | timeout=timeout 26 | ) as session: 27 | prompt = format_prompt(messages), 28 | data = { 29 | "prompt": prompt, 30 | "search": prompt, 31 | "purpose": "ask", 32 | } 33 | response = await session.post(f"{cls.url}/", data=data) 34 | response.raise_for_status() 35 | data = response.json() 36 | 37 | cls._sources = [{ 38 | "title": source["title"], 39 | "url": source["link"], 40 | "snippet": source["snippet"] 41 | } for source in data["results"]] 42 | 43 | return data["answer"] 44 | 45 | @classmethod 46 | def get_sources(cls): 47 | return cls._sources -------------------------------------------------------------------------------- /g4f/Provider/deprecated/Cromicle.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from aiohttp import ClientSession 4 | from hashlib import sha256 5 | from ...typing import AsyncResult, Messages, Dict 6 | 7 | from ..base_provider import AsyncGeneratorProvider 8 | from ..helper import format_prompt 9 | 10 | 11 | class Cromicle(AsyncGeneratorProvider): 12 | url: str = 'https://cromicle.top' 13 | working: bool = False 14 | supports_gpt_35_turbo: bool = True 15 | 16 | @classmethod 17 | async def create_async_generator( 18 | cls, 19 | model: str, 20 | messages: Messages, 21 | proxy: str = None, 22 | **kwargs 23 | ) -> AsyncResult: 24 | async with ClientSession( 25 | headers=_create_header() 26 | ) as session: 27 | async with session.post( 28 | f'{cls.url}/chat', 29 | proxy=proxy, 30 | json=_create_payload(format_prompt(messages)) 31 | ) as response: 32 | response.raise_for_status() 33 | async for stream in response.content.iter_any(): 34 | if stream: 35 | yield stream.decode() 36 | 37 | 38 | def _create_header() -> Dict[str, str]: 39 | return { 40 | 'accept': '*/*', 41 | 'content-type': 'application/json', 42 | } 43 | 44 | 45 | def _create_payload(message: str) -> Dict[str, str]: 46 | return { 47 | 'message': message, 48 | 'token': 'abc', 49 | 'hash': sha256('abc'.encode() + message.encode()).hexdigest() 50 | } -------------------------------------------------------------------------------- /g4f/Provider/deprecated/Aibn.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import time 4 | import hashlib 5 | 6 | from ...typing import AsyncResult, Messages 7 | from ...requests import StreamSession 8 | from ..base_provider import AsyncGeneratorProvider 9 | 10 | 11 | class Aibn(AsyncGeneratorProvider): 12 | url = "https://aibn.cc" 13 | working = False 14 | supports_message_history = True 15 | supports_gpt_35_turbo = True 16 | 17 | @classmethod 18 | async def create_async_generator( 19 | cls, 20 | model: str, 21 | messages: Messages, 22 | proxy: str = None, 23 | timeout: int = 120, 24 | **kwargs 25 | ) -> AsyncResult: 26 | async with StreamSession( 27 | impersonate="chrome107", 28 | proxies={"https": proxy}, 29 | timeout=timeout 30 | ) as session: 31 | timestamp = int(time.time()) 32 | data = { 33 | "messages": messages, 34 | "pass": None, 35 | "sign": generate_signature(timestamp, messages[-1]["content"]), 36 | "time": timestamp 37 | } 38 | async with session.post(f"{cls.url}/api/generate", json=data) as response: 39 | response.raise_for_status() 40 | async for chunk in response.iter_content(): 41 | yield chunk.decode() 42 | 43 | 44 | def generate_signature(timestamp: int, message: str, secret: str = "undefined"): 45 | data = f"{timestamp}:{message}:{secret}" 46 | return hashlib.sha256(data.encode()).hexdigest() -------------------------------------------------------------------------------- /translations/ko_KR/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Korean (South Korea) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: ko_KR\n" 14 | "Language-Team: ko_KR \n" 15 | "Plural-Forms: nplurals=1; plural=0;\n" 16 | "Content-Type: text/plain; charset=utf-8\n" 17 | "Content-Transfer-Encoding: 8bit\n" 18 | "Generated-By: Babel 2.12.1\n" 19 | 20 | #: 21 | msgid "New Conversation" 22 | msgstr "새 대화" 23 | 24 | #: 25 | msgid "Clear Conversations" 26 | msgstr "대화 지우기" 27 | 28 | #: 29 | msgid "API Key" 30 | msgstr "API 키" 31 | 32 | #: 33 | msgid "Ok" 34 | msgstr "확인" 35 | 36 | #: 37 | msgid "Dark Mode" 38 | msgstr "다크 모드" 39 | 40 | #: 41 | msgid "Language" 42 | msgstr "언어" 43 | 44 | #: 45 | msgid "Stop Generating" 46 | msgstr "생성 중지" 47 | 48 | #: 49 | msgid "Ask a question" 50 | msgstr "질문하기" 51 | 52 | #: 53 | msgid "IMAGE" 54 | msgstr "이미지" 55 | 56 | #: 57 | msgid "Default" 58 | msgstr "기본값" 59 | 60 | #: 61 | msgid "DAN" 62 | msgstr "DAN" 63 | 64 | #: 65 | msgid "Evil" 66 | msgstr "악" 67 | 68 | #: 69 | msgid "Web Access" 70 | msgstr "웹 접근" 71 | 72 | #: 73 | msgid "Version" 74 | msgstr "버전" 75 | 76 | #: 77 | msgid "Model" 78 | msgstr "모델" 79 | 80 | #: 81 | msgid "Provider" 82 | msgstr "공급자" 83 | 84 | #: 85 | msgid "Jailbreak" 86 | msgstr "탈옥" -------------------------------------------------------------------------------- /translations/ja_JP/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Japanese (Japan) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: ja_JP\n" 14 | "Language-Team: ja_JP \n" 15 | "Plural-Forms: nplurals=1; plural=0;\n" 16 | "Content-Type: text/plain; charset=utf-8\n" 17 | "Content-Transfer-Encoding: 8bit\n" 18 | "Generated-By: Babel 2.12.1\n" 19 | 20 | #: 21 | msgid "New Conversation" 22 | msgstr "新しい会話" 23 | 24 | #: 25 | msgid "Clear Conversations" 26 | msgstr "会話をクリア" 27 | 28 | #: 29 | msgid "API Key" 30 | msgstr "APIキー" 31 | 32 | #: 33 | msgid "Ok" 34 | msgstr "OK" 35 | 36 | #: 37 | msgid "Dark Mode" 38 | msgstr "ダークモード" 39 | 40 | #: 41 | msgid "Language" 42 | msgstr "言語" 43 | 44 | #: 45 | msgid "Stop Generating" 46 | msgstr "生成を停止" 47 | 48 | #: 49 | msgid "Ask a question" 50 | msgstr "質問をする" 51 | 52 | #: 53 | msgid "IMAGE" 54 | msgstr "画像" 55 | 56 | #: 57 | msgid "Default" 58 | msgstr "デフォルト" 59 | 60 | #: 61 | msgid "DAN" 62 | msgstr "DAN" 63 | 64 | #: 65 | msgid "Evil" 66 | msgstr "悪" 67 | 68 | #: 69 | msgid "Web Access" 70 | msgstr "ウェブアクセス" 71 | 72 | #: 73 | msgid "Version" 74 | msgstr "バージョン" 75 | 76 | #: 77 | msgid "Model" 78 | msgstr "モデル" 79 | 80 | #: 81 | msgid "Provider" 82 | msgstr "プロバイダー" 83 | 84 | #: 85 | msgid "Jailbreak" 86 | msgstr "脱獄" -------------------------------------------------------------------------------- /translations/zh_Hans_CN/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Chinese (Simplified, China) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: zh_Hans_CN\n" 14 | "Language-Team: zh_Hans_CN \n" 15 | "Plural-Forms: nplurals=1; plural=0;\n" 16 | "Content-Type: text/plain; charset=utf-8\n" 17 | "Content-Transfer-Encoding: 8bit\n" 18 | "Generated-By: Babel 2.12.1\n" 19 | 20 | #: 21 | msgid "New Conversation" 22 | msgstr "新的对话" 23 | 24 | #: 25 | msgid "Clear Conversations" 26 | msgstr "清空对话" 27 | 28 | #: 29 | msgid "API Key" 30 | msgstr "API密钥" 31 | 32 | #: 33 | msgid "Ok" 34 | msgstr "确认" 35 | 36 | #: 37 | msgid "Dark Mode" 38 | msgstr "暗黑模式" 39 | 40 | #: 41 | msgid "Language" 42 | msgstr "语言" 43 | 44 | #: 45 | msgid "Stop Generating" 46 | msgstr "停止产生" 47 | 48 | #: 49 | msgid "Ask a question" 50 | msgstr "提问" 51 | 52 | #: 53 | msgid "IMAGE" 54 | msgstr "图片" 55 | 56 | #: 57 | msgid "Default" 58 | msgstr "默认" 59 | 60 | #: 61 | msgid "DAN" 62 | msgstr "DAN" 63 | 64 | #: 65 | msgid "Evil" 66 | msgstr "邪恶" 67 | 68 | #: 69 | msgid "Web Access" 70 | msgstr "网络访问" 71 | 72 | #: 73 | msgid "Version" 74 | msgstr "版本" 75 | 76 | #: 77 | msgid "Model" 78 | msgstr "模型" 79 | 80 | #: 81 | msgid "Provider" 82 | msgstr "提供者" 83 | 84 | #: 85 | msgid "Jailbreak" 86 | msgstr "Jailbreak" -------------------------------------------------------------------------------- /server/babel.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from flask import request, session, jsonify 4 | from flask_babel import Babel 5 | 6 | 7 | def get_languages_from_dir(directory): 8 | """Return a list of directory names in the given directory.""" 9 | return [name for name in os.listdir(directory) 10 | if os.path.isdir(os.path.join(directory, name))] 11 | 12 | 13 | BABEL_DEFAULT_LOCALE = 'en_US' 14 | BABEL_LANGUAGES = get_languages_from_dir('translations') 15 | 16 | 17 | def create_babel(app): 18 | """Create and initialize a Babel instance with the given Flask app.""" 19 | babel = Babel(app) 20 | app.config['BABEL_DEFAULT_LOCALE'] = BABEL_DEFAULT_LOCALE 21 | app.config['BABEL_LANGUAGES'] = BABEL_LANGUAGES 22 | 23 | babel.init_app(app, locale_selector=get_locale) 24 | compile_translations() 25 | 26 | 27 | def get_locale(): 28 | """Get the user's locale from the session or the request's accepted languages.""" 29 | return session.get('language') or request.accept_languages.best_match(BABEL_LANGUAGES) 30 | 31 | 32 | def get_languages(): 33 | """Return a list of available languages in JSON format.""" 34 | return jsonify(BABEL_LANGUAGES) 35 | 36 | 37 | def compile_translations(): 38 | """Compile the translation files.""" 39 | result = subprocess.run( 40 | ['pybabel', 'compile', '-d', 'translations'], 41 | stdout=subprocess.PIPE, 42 | ) 43 | 44 | if result.returncode != 0: 45 | raise Exception( 46 | f'Compiling translations failed:\n{result.stdout.decode()}') 47 | 48 | print('Translations compiled successfully') 49 | -------------------------------------------------------------------------------- /translations/zh_Hant_TW/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Chinese (Traditional, Taiwan) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: zh_Hant_TW\n" 14 | "Language-Team: zh_Hant_TW \n" 15 | "Plural-Forms: nplurals=1; plural=0;\n" 16 | "Content-Type: text/plain; charset=utf-8\n" 17 | "Content-Transfer-Encoding: 8bit\n" 18 | "Generated-By: Babel 2.12.1\n" 19 | 20 | #: 21 | msgid "New Conversation" 22 | msgstr "新的對話" 23 | 24 | #: 25 | msgid "Clear Conversations" 26 | msgstr "清除對話" 27 | 28 | #: 29 | msgid "API Key" 30 | msgstr "API金鑰" 31 | 32 | #: 33 | msgid "Ok" 34 | msgstr "確定" 35 | 36 | #: 37 | msgid "Dark Mode" 38 | msgstr "暗黑模式" 39 | 40 | #: 41 | msgid "Language" 42 | msgstr "語言" 43 | 44 | #: 45 | msgid "Stop Generating" 46 | msgstr "停止生成" 47 | 48 | #: 49 | msgid "Ask a question" 50 | msgstr "提問" 51 | 52 | #: 53 | msgid "IMAGE" 54 | msgstr "圖片" 55 | 56 | #: 57 | msgid "Default" 58 | msgstr "預設" 59 | 60 | #: 61 | msgid "DAN" 62 | msgstr "DAN" 63 | 64 | #: 65 | msgid "Evil" 66 | msgstr "邪惡" 67 | 68 | #: 69 | msgid "Web Access" 70 | msgstr "網頁訪問" 71 | 72 | #: 73 | msgid "Version" 74 | msgstr "版本" 75 | 76 | #: 77 | msgid "Model" 78 | msgstr "模型" 79 | 80 | #: 81 | msgid "Provider" 82 | msgstr "提供者" 83 | 84 | #: 85 | msgid "Jailbreak" 86 | msgstr "Jailbreak" -------------------------------------------------------------------------------- /g4f/Provider/deprecated/Acytoo.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from aiohttp import ClientSession 4 | 5 | from ...typing import AsyncResult, Messages 6 | from ..base_provider import AsyncGeneratorProvider 7 | 8 | 9 | class Acytoo(AsyncGeneratorProvider): 10 | url = 'https://chat.acytoo.com' 11 | working = False 12 | supports_message_history = True 13 | supports_gpt_35_turbo = True 14 | 15 | @classmethod 16 | async def create_async_generator( 17 | cls, 18 | model: str, 19 | messages: Messages, 20 | proxy: str = None, 21 | **kwargs 22 | ) -> AsyncResult: 23 | async with ClientSession( 24 | headers=_create_header() 25 | ) as session: 26 | async with session.post( 27 | f'{cls.url}/api/completions', 28 | proxy=proxy, 29 | json=_create_payload(messages, **kwargs) 30 | ) as response: 31 | response.raise_for_status() 32 | async for stream in response.content.iter_any(): 33 | if stream: 34 | yield stream.decode() 35 | 36 | 37 | def _create_header(): 38 | return { 39 | 'accept': '*/*', 40 | 'content-type': 'application/json', 41 | } 42 | 43 | 44 | def _create_payload(messages: Messages, temperature: float = 0.5, **kwargs): 45 | return { 46 | 'key' : '', 47 | 'model' : 'gpt-3.5-turbo', 48 | 'messages' : messages, 49 | 'temperature' : temperature, 50 | 'password' : '' 51 | } -------------------------------------------------------------------------------- /g4f/Provider/unfinished/Komo.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import json 4 | 5 | from ...requests import StreamSession 6 | from ...typing import AsyncGenerator 7 | from ..base_provider import AsyncGeneratorProvider, format_prompt 8 | 9 | class Komo(AsyncGeneratorProvider): 10 | url = "https://komo.ai/api/ask" 11 | supports_gpt_35_turbo = True 12 | 13 | @classmethod 14 | async def create_async_generator( 15 | cls, 16 | model: str, 17 | messages: list[dict[str, str]], 18 | **kwargs 19 | ) -> AsyncGenerator: 20 | async with StreamSession(impersonate="chrome107") as session: 21 | prompt = format_prompt(messages) 22 | data = { 23 | "query": prompt, 24 | "FLAG_URLEXTRACT": "false", 25 | "token": "", 26 | "FLAG_MODELA": "1", 27 | } 28 | headers = { 29 | 'authority': 'komo.ai', 30 | 'accept': 'text/event-stream', 31 | 'cache-control': 'no-cache', 32 | 'referer': 'https://komo.ai/', 33 | } 34 | 35 | async with session.get(cls.url, params=data, headers=headers) as response: 36 | response.raise_for_status() 37 | next = False 38 | async for line in response.iter_lines(): 39 | if line == b"event: line": 40 | next = True 41 | elif next and line.startswith(b"data: "): 42 | yield json.loads(line[6:]) 43 | next = False 44 | 45 | -------------------------------------------------------------------------------- /translations/pt_BR/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Portuguese (Brazil) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # Ramon - github.com/ramonvc/ 7 | # ... 8 | msgid "" 9 | msgstr "" 10 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 11 | "MIME-Version: 1.0\n" 12 | "Language: pt_BR\n" 13 | "Language-Team: pt_BR \n" 14 | "Plural-Forms: nplurals=2; plural=(n > 1);\n" 15 | "Content-Type: text/plain; charset=utf-8\n" 16 | "Content-Transfer-Encoding: 8bit\n" 17 | "Generated-By: Babel 2.12.1\n" 18 | 19 | #: 20 | msgid "New Conversation" 21 | msgstr "Nova Conversa" 22 | 23 | #: 24 | msgid "Clear Conversations" 25 | msgstr "Limpar Conversas" 26 | 27 | #: 28 | msgid "API Key" 29 | msgstr "Chave API" 30 | 31 | #: 32 | msgid "Ok" 33 | msgstr "Ok" 34 | 35 | #: 36 | msgid "Dark Mode" 37 | msgstr "Modo Escuro" 38 | 39 | #: 40 | msgid "Language" 41 | msgstr "Idioma" 42 | 43 | #: 44 | msgid "Stop Generating" 45 | msgstr "Parar de Gerar" 46 | 47 | #: 48 | msgid "Ask a question" 49 | msgstr "Faça uma pergunta" 50 | 51 | #: 52 | msgid "IMAGE" 53 | msgstr "IMAGEM" 54 | 55 | #: 56 | msgid "Default" 57 | msgstr "Padrão" 58 | 59 | #: 60 | msgid "DAN" 61 | msgstr "DAN" 62 | 63 | #: 64 | msgid "Evil" 65 | msgstr "Mal" 66 | 67 | #: 68 | msgid "Web Access" 69 | msgstr "Acesso Web" 70 | 71 | #: 72 | msgid "Version" 73 | msgstr "Versão" 74 | 75 | #: 76 | msgid "Model" 77 | msgstr "Modelo" 78 | 79 | #: 80 | msgid "Provider" 81 | msgstr "Fornecedor" 82 | 83 | #: 84 | msgid "Jailbreak" 85 | msgstr "Jailbreak" -------------------------------------------------------------------------------- /translations/he_IL/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Hebrew (Israel) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: he_IL\n" 14 | "Language-Team: he_IL \n" 15 | "Plural-Forms: nplurals=2; plural=(n != 1);\n" 16 | "Content-Type: text/plain; charset=utf-8\n" 17 | "Content-Transfer-Encoding: 8bit\n" 18 | "Generated-By: Babel 2.12.1\n" 19 | 20 | #: 21 | msgid "New Conversation" 22 | msgstr "שיחה חדשה" 23 | 24 | #: 25 | msgid "Clear Conversations" 26 | msgstr "נקה שיחות" 27 | 28 | #: 29 | msgid "API Key" 30 | msgstr "מפתח API" 31 | 32 | #: 33 | msgid "Ok" 34 | msgstr "אוקיי" 35 | 36 | #: 37 | msgid "Dark Mode" 38 | msgstr "מצב כהה" 39 | 40 | #: 41 | msgid "Language" 42 | msgstr "שפה" 43 | 44 | #: 45 | msgid "Stop Generating" 46 | msgstr "הפסק ליצור" 47 | 48 | #: 49 | msgid "Ask a question" 50 | msgstr "שאל שאלה" 51 | 52 | #: 53 | msgid "IMAGE" 54 | msgstr "תמונה" 55 | 56 | #: 57 | msgid "Default" 58 | msgstr "ברירת מחדל" 59 | 60 | #: 61 | msgid "DAN" 62 | msgstr "DAN" 63 | 64 | #: 65 | msgid "Evil" 66 | msgstr "רשע" 67 | 68 | #: 69 | msgid "Web Access" 70 | msgstr "גישה לאינטרנט" 71 | 72 | #: 73 | msgid "Version" 74 | msgstr "גרסה" 75 | 76 | #: 77 | msgid "Model" 78 | msgstr "דֶגֶם" 79 | 80 | #: 81 | msgid "Provider" 82 | msgstr "ספק" 83 | 84 | #: 85 | msgid "Jailbreak" 86 | msgstr "Jailbreak" -------------------------------------------------------------------------------- /translations/en_US/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # English (United States) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # Ramon - github.com/ramonvc/ 7 | # ... 8 | msgid "" 9 | msgstr "" 10 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 11 | "MIME-Version: 1.0\n" 12 | "Language: en_US\n" 13 | "Language-Team: en_US \n" 14 | "Plural-Forms: nplurals=2; plural=(n != 1);\n" 15 | "Content-Type: text/plain; charset=utf-8\n" 16 | "Content-Transfer-Encoding: 8bit\n" 17 | "Generated-By: Babel 2.12.1\n" 18 | 19 | #: 20 | msgid "New Conversation" 21 | msgstr "New Conversation" 22 | 23 | #: 24 | msgid "Clear Conversations" 25 | msgstr "Clear Conversations" 26 | 27 | #: 28 | msgid "API Key" 29 | msgstr "API Key" 30 | 31 | #: 32 | msgid "Ok" 33 | msgstr "Ok" 34 | 35 | #: 36 | msgid "Dark Mode" 37 | msgstr "Dark Mod" 38 | 39 | #: 40 | msgid "Language" 41 | msgstr "Language" 42 | 43 | #: 44 | msgid "Stop Generating" 45 | msgstr "Stop Generating" 46 | 47 | #: 48 | msgid "Ask a question" 49 | msgstr "Ask a question" 50 | 51 | #: 52 | msgid "IMAGE" 53 | msgstr "IMAGE" 54 | 55 | #: 56 | msgid "Default" 57 | msgstr "Default" 58 | 59 | #: 60 | msgid "DAN" 61 | msgstr "DAN" 62 | 63 | #: 64 | msgid "Evil" 65 | msgstr "Evil" 66 | 67 | #: 68 | msgid "Web Access" 69 | msgstr "Web Access" 70 | 71 | #: 72 | msgid "Version" 73 | msgstr "Version" 74 | #: 75 | msgid "Model" 76 | msgstr "Μodel" 77 | 78 | #: 79 | msgid "Provider" 80 | msgstr "Provider" 81 | 82 | #: 83 | msgid "Jailbreak" 84 | msgstr "Jailbreak" 85 | 86 | 87 | -------------------------------------------------------------------------------- /translations/hi_IN/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Hindi (India) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: hi_IN\n" 14 | "Language-Team: hi_IN \n" 15 | "Plural-Forms: nplurals=2; plural=(n != 1);\n" 16 | "Content-Type: text/plain; charset=utf-8\n" 17 | "Content-Transfer-Encoding: 8bit\n" 18 | "Generated-By: Babel 2.12.1\n" 19 | 20 | #: 21 | msgid "New Conversation" 22 | msgstr "नई बातचीत" 23 | 24 | #: 25 | msgid "Clear Conversations" 26 | msgstr "बातचीत साफ करें" 27 | 28 | #: 29 | msgid "API Key" 30 | msgstr "API कुंजी" 31 | 32 | #: 33 | msgid "Ok" 34 | msgstr "ठीक है" 35 | 36 | #: 37 | msgid "Dark Mode" 38 | msgstr "डार्क मोड" 39 | 40 | #: 41 | msgid "Language" 42 | msgstr "भाषा" 43 | 44 | #: 45 | msgid "Stop Generating" 46 | msgstr "उत्पादन रोकें" 47 | 48 | #: 49 | msgid "Ask a question" 50 | msgstr "एक प्रश्न पूछें" 51 | 52 | #: 53 | msgid "IMAGE" 54 | msgstr "छवि" 55 | 56 | #: 57 | msgid "Default" 58 | msgstr "डिफ़ॉल्ट" 59 | 60 | #: 61 | msgid "DAN" 62 | msgstr "DAN" 63 | 64 | #: 65 | msgid "Evil" 66 | msgstr "बुराई" 67 | 68 | #: 69 | msgid "Web Access" 70 | msgstr "वेब पहुंच" 71 | 72 | #: 73 | msgid "Version" 74 | msgstr "संस्करण" 75 | 76 | #: 77 | msgid "Model" 78 | msgstr "नमूना" 79 | 80 | #: 81 | msgid "Provider" 82 | msgstr "प्रदाता" 83 | 84 | #: 85 | msgid "Jailbreak" 86 | msgstr "Jailbreak" -------------------------------------------------------------------------------- /translations/th_TH/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Thai (Thailand) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: th_TH\n" 14 | "Language-Team: th_TH \n" 15 | "Plural-Forms: nplurals=1; plural=0;\n" 16 | "Content-Type: text/plain; charset=utf-8\n" 17 | "Content-Transfer-Encoding: 8bit\n" 18 | "Generated-By: Babel 2.12.1\n" 19 | 20 | #: 21 | msgid "New Conversation" 22 | msgstr "การสนทนาใหม่" 23 | 24 | #: 25 | msgid "Clear Conversations" 26 | msgstr "ล้างการสนทนา" 27 | 28 | #: 29 | msgid "API Key" 30 | msgstr "คีย์ API" 31 | 32 | #: 33 | msgid "Ok" 34 | msgstr "ตกลง" 35 | 36 | #: 37 | msgid "Dark Mode" 38 | msgstr "โหมดมืด" 39 | 40 | #: 41 | msgid "Language" 42 | msgstr "ภาษา" 43 | 44 | #: 45 | msgid "Stop Generating" 46 | msgstr "หยุดการสร้าง" 47 | 48 | #: 49 | msgid "Ask a question" 50 | msgstr "ถามคำถาม" 51 | 52 | #: 53 | msgid "IMAGE" 54 | msgstr "ภาพ" 55 | 56 | #: 57 | msgid "Default" 58 | msgstr "ค่าเริ่มต้น" 59 | 60 | #: 61 | msgid "DAN" 62 | msgstr "DAN" 63 | 64 | #: 65 | msgid "Evil" 66 | msgstr "ชั่วร้าย" 67 | 68 | #: 69 | msgid "Web Access" 70 | msgstr "การเข้าถึงเว็บ" 71 | 72 | #: 73 | msgid "Version" 74 | msgstr "เวอร์ชัน" 75 | 76 | #: 77 | msgid "Model" 78 | msgstr "แบบอย่าง" 79 | 80 | #: 81 | msgid "Provider" 82 | msgstr "ผู้ให้บริการ" 83 | 84 | #: 85 | msgid "Jailbreak" 86 | msgstr "Jailbreak" -------------------------------------------------------------------------------- /translations/da_DK/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Danish (Denmark) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: da_DK\n" 14 | "Language-Team: da_DK \n" 15 | "Plural-Forms: nplurals=2; plural=(n != 1);\n" 16 | "Content-Type: text/plain; charset=utf-8\n" 17 | "Content-Transfer-Encoding: 8bit\n" 18 | "Generated-By: Babel 2.12.1\n" 19 | 20 | #: 21 | msgid "New Conversation" 22 | msgstr "Ny samtale" 23 | 24 | #: 25 | msgid "Clear Conversations" 26 | msgstr "Ryd samtaler" 27 | 28 | #: 29 | msgid "API Key" 30 | msgstr "API-nøgle" 31 | 32 | #: 33 | msgid "Ok" 34 | msgstr "Ok" 35 | 36 | #: 37 | msgid "Dark Mode" 38 | msgstr "Mørk tilstand" 39 | 40 | #: 41 | msgid "Language" 42 | msgstr "Sprog" 43 | 44 | #: 45 | msgid "Stop Generating" 46 | msgstr "Stop generering" 47 | 48 | #: 49 | msgid "Ask a question" 50 | msgstr "Stil et spørgsmål" 51 | 52 | #: 53 | msgid "IMAGE" 54 | msgstr "BILLEDE" 55 | 56 | #: 57 | msgid "Default" 58 | msgstr "Standard" 59 | 60 | #: 61 | msgid "DAN" 62 | msgstr "DAN" 63 | 64 | #: 65 | msgid "Evil" 66 | msgstr "Ond" 67 | 68 | #: 69 | msgid "Web Access" 70 | msgstr "Webadgang" 71 | 72 | #: 73 | msgid "Version" 74 | msgstr "Version" 75 | 76 | #: 77 | msgid "Model" 78 | msgstr "Model" 79 | 80 | #: 81 | msgid "Provider" 82 | msgstr "Udbyder" 83 | 84 | #: 85 | msgid "Jailbreak" 86 | msgstr "Jailbreak" 87 | -------------------------------------------------------------------------------- /translations/sv_SE/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Swedish (Sweden) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: sv_SE\n" 14 | "Language-Team: sv_SE \n" 15 | "Plural-Forms: nplurals=2; plural=(n != 1);\n" 16 | "Content-Type: text/plain; charset=utf-8\n" 17 | "Content-Transfer-Encoding: 8bit\n" 18 | "Generated-By: Babel 2.12.1\n" 19 | 20 | #: 21 | msgid "New Conversation" 22 | msgstr "Ny konversation" 23 | 24 | #: 25 | msgid "Clear Conversations" 26 | msgstr "Rensa konversationer" 27 | 28 | #: 29 | msgid "API Key" 30 | msgstr "API-nyckel" 31 | 32 | #: 33 | msgid "Ok" 34 | msgstr "Ok" 35 | 36 | #: 37 | msgid "Dark Mode" 38 | msgstr "Mörkt läge" 39 | 40 | #: 41 | msgid "Language" 42 | msgstr "Språk" 43 | 44 | #: 45 | msgid "Stop Generating" 46 | msgstr "Sluta generera" 47 | 48 | #: 49 | msgid "Ask a question" 50 | msgstr "Ställ en fråga" 51 | 52 | #: 53 | msgid "IMAGE" 54 | msgstr "BILD" 55 | 56 | #: 57 | msgid "Default" 58 | msgstr "Standard" 59 | 60 | #: 61 | msgid "DAN" 62 | msgstr "DAN" 63 | 64 | #: 65 | msgid "Evil" 66 | msgstr "Ond" 67 | 68 | #: 69 | msgid "Web Access" 70 | msgstr "Webbåtkomst" 71 | 72 | #: 73 | msgid "Version" 74 | msgstr "Version" 75 | 76 | #: 77 | msgid "Model" 78 | msgstr "Modell" 79 | 80 | #: 81 | msgid "Provider" 82 | msgstr "Leverantör" 83 | 84 | #: 85 | msgid "Jailbreak" 86 | msgstr "Jailbreak" -------------------------------------------------------------------------------- /translations/tr_TR/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Turkish (Turkey) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: tr_TR\n" 14 | "Language-Team: tr_TR \n" 15 | "Plural-Forms: nplurals=1; plural=0;\n" 16 | "Content-Type: text/plain; charset=utf-8\n" 17 | "Content-Transfer-Encoding: 8bit\n" 18 | "Generated-By: Babel 2.12.1\n" 19 | 20 | #: 21 | msgid "New Conversation" 22 | msgstr "Yeni Konuşma" 23 | 24 | #: 25 | msgid "Clear Conversations" 26 | msgstr "Konuşmaları Temizle" 27 | 28 | #: 29 | msgid "API Key" 30 | msgstr "API Anahtarı" 31 | 32 | #: 33 | msgid "Ok" 34 | msgstr "Tamam" 35 | 36 | #: 37 | msgid "Dark Mode" 38 | msgstr "Karanlık Mod" 39 | 40 | #: 41 | msgid "Language" 42 | msgstr "Dil" 43 | 44 | #: 45 | msgid "Stop Generating" 46 | msgstr "Üretmeyi Durdur" 47 | 48 | #: 49 | msgid "Ask a question" 50 | msgstr "Bir soru sor" 51 | 52 | #: 53 | msgid "IMAGE" 54 | msgstr "GÖRÜNTÜ" 55 | 56 | #: 57 | msgid "Default" 58 | msgstr "Varsayılan" 59 | 60 | #: 61 | msgid "DAN" 62 | msgstr "DAN" 63 | 64 | #: 65 | msgid "Evil" 66 | msgstr "Kötü" 67 | 68 | #: 69 | msgid "Web Access" 70 | msgstr "Web Erişimi" 71 | 72 | #: 73 | msgid "Version" 74 | msgstr "Versiyon" 75 | 76 | #: 77 | msgid "Model" 78 | msgstr "Modeli" 79 | 80 | #: 81 | msgid "Provider" 82 | msgstr "Sağlayıcı" 83 | 84 | #: 85 | msgid "Jailbreak" 86 | msgstr "Jailbreak" -------------------------------------------------------------------------------- /translations/nb_NO/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Norwegian Bokmål (Norway) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: nb_NO\n" 14 | "Language-Team: nb_NO \n" 15 | "Plural-Forms: nplurals=2; plural=(n != 1);\n" 16 | "Content-Type: text/plain; charset=utf-8\n" 17 | "Content-Transfer-Encoding: 8bit\n" 18 | "Generated-By: Babel 2.12.1\n" 19 | 20 | #: 21 | msgid "New Conversation" 22 | msgstr "Ny samtale" 23 | 24 | #: 25 | msgid "Clear Conversations" 26 | msgstr "Fjern samtaler" 27 | 28 | #: 29 | msgid "API Key" 30 | msgstr "API-nøkkel" 31 | 32 | #: 33 | msgid "Ok" 34 | msgstr "Ok" 35 | 36 | #: 37 | msgid "Dark Mode" 38 | msgstr "Mørk modus" 39 | 40 | #: 41 | msgid "Language" 42 | msgstr "Språk" 43 | 44 | #: 45 | msgid "Stop Generating" 46 | msgstr "Stopp generering" 47 | 48 | #: 49 | msgid "Ask a question" 50 | msgstr "Still et spørsmål" 51 | 52 | #: 53 | msgid "IMAGE" 54 | msgstr "BILDE" 55 | 56 | #: 57 | msgid "Default" 58 | msgstr "Standard" 59 | 60 | #: 61 | msgid "DAN" 62 | msgstr "DAN" 63 | 64 | #: 65 | msgid "Evil" 66 | msgstr "Ond" 67 | 68 | #: 69 | msgid "Web Access" 70 | msgstr "Webtilgang" 71 | 72 | #: 73 | msgid "Version" 74 | msgstr "Versjon" 75 | 76 | #: 77 | msgid "Model" 78 | msgstr "Modell" 79 | 80 | #: 81 | msgid "Provider" 82 | msgstr "Forsørger" 83 | 84 | #: 85 | msgid "Jailbreak" 86 | msgstr "Jailbreak" -------------------------------------------------------------------------------- /client/css/global.css: -------------------------------------------------------------------------------- 1 | @import url("https://fonts.googleapis.com/css2?family=Inter:wght@100;200;300;400;500;600;700;800;900&display=swap"); 2 | * { 3 | --font-1: "Inter", sans-serif; 4 | --section-gap: 24px; 5 | --border-radius-1: 8px; 6 | margin: 0; 7 | padding: 0; 8 | box-sizing: border-box; 9 | position: relative; 10 | font-family: var(--font-1); 11 | } 12 | 13 | .theme-light { 14 | --colour-1: #f5f5f5; 15 | --colour-2: #000000; 16 | --colour-3: #474747; 17 | --colour-4: #949494; 18 | --colour-5: #ebebeb; 19 | --colour-6: #dadada; 20 | --accent: #b97272; 21 | --blur-bg: #ffffff; 22 | --blur-border: 1px solid #dbdbdb57; 23 | --blur-box-shadow: 2px 2px 2px #dbdbdb57; 24 | --user-input: #282828; 25 | --conversations: #666666; 26 | } 27 | 28 | .theme-dark { 29 | --colour-1: #181818; 30 | --colour-2: #ccc; 31 | --colour-3: #dadada; 32 | --colour-4: #f0f0f0; 33 | --colour-5: #181818; 34 | --colour-6: #242424; 35 | 36 | --accent: #151718; 37 | --blur-bg: #242627; 38 | --blur-border: 1px solid #47474757; 39 | --blur-box-shadow: 2px 2px 2px #47474757; 40 | --user-input: #f5f5f5; 41 | --conversations: #555555; 42 | } 43 | 44 | html, 45 | body { 46 | background: var(--colour-1); 47 | color: var(--colour-3); 48 | height: max(100%); 49 | } 50 | 51 | ol, 52 | ul { 53 | padding-left: 20px; 54 | } 55 | 56 | .shown { 57 | display: flex !important; 58 | } 59 | 60 | a:-webkit-any-link { 61 | color: var(--accent); 62 | } 63 | 64 | pre { 65 | white-space: pre-wrap; 66 | } 67 | 68 | @media screen and (max-height: 720px) { 69 | :root { 70 | --section-gap: 16px; 71 | } 72 | } 73 | 74 | @media screen and (max-width: 990px) { 75 | * { 76 | --section-gap: 16px; 77 | } 78 | } 79 | 80 | -------------------------------------------------------------------------------- /translations/id_ID/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Indonesian (Indonesia) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: id_ID\n" 14 | "Language-Team: id_ID \n" 15 | "Plural-Forms: nplurals=2; plural=(n != 1);\n" 16 | "Content-Type: text/plain; charset=utf-8\n" 17 | "Content-Transfer-Encoding: 8bit\n" 18 | "Generated-By: Babel 2.12.1\n" 19 | 20 | #: 21 | msgid "New Conversation" 22 | msgstr "Percakapan Baru" 23 | 24 | #: 25 | msgid "Clear Conversations" 26 | msgstr "Hapus Percakapan" 27 | 28 | #: 29 | msgid "API Key" 30 | msgstr "Kunci API" 31 | 32 | #: 33 | msgid "Ok" 34 | msgstr "Oke" 35 | 36 | #: 37 | msgid "Dark Mode" 38 | msgstr "Mode Gelap" 39 | 40 | #: 41 | msgid "Language" 42 | msgstr "Bahasa" 43 | 44 | #: 45 | msgid "Stop Generating" 46 | msgstr "Berhenti Menghasilkan" 47 | 48 | #: 49 | msgid "Ask a question" 50 | msgstr "Ajukan pertanyaan" 51 | 52 | #: 53 | msgid "IMAGE" 54 | msgstr "GAMBAR" 55 | 56 | #: 57 | msgid "Default" 58 | msgstr "Default" 59 | 60 | #: 61 | msgid "DAN" 62 | msgstr "DAN" 63 | 64 | #: 65 | msgid "Evil" 66 | msgstr "Jahat" 67 | 68 | #: 69 | msgid "Web Access" 70 | msgstr "Akses Web" 71 | 72 | #: 73 | msgid "Version" 74 | msgstr "Versi" 75 | 76 | #: 77 | msgid "Model" 78 | msgstr "Model" 79 | 80 | #: 81 | msgid "Provider" 82 | msgstr "Pemberi" 83 | 84 | #: 85 | msgid "Jailbreak" 86 | msgstr "Jailbreak" -------------------------------------------------------------------------------- /translations/nl_NL/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Dutch (Netherlands) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: nl_NL\n" 14 | "Language-Team: nl_NL \n" 15 | "Plural-Forms: nplurals=2; plural=(n != 1);\n" 16 | "Content-Type: text/plain; charset=utf-8\n" 17 | "Content-Transfer-Encoding: 8bit\n" 18 | "Generated-By: Babel 2.12.1\n" 19 | 20 | #: 21 | msgid "New Conversation" 22 | msgstr "Nieuw gesprek" 23 | 24 | #: 25 | msgid "Clear Conversations" 26 | msgstr "Gesprekken wissen" 27 | 28 | #: 29 | msgid "API Key" 30 | msgstr "API-sleutel" 31 | 32 | #: 33 | msgid "Ok" 34 | msgstr "Ok" 35 | 36 | #: 37 | msgid "Dark Mode" 38 | msgstr "Donkere modus" 39 | 40 | #: 41 | msgid "Language" 42 | msgstr "Taal" 43 | 44 | #: 45 | msgid "Stop Generating" 46 | msgstr "Stop met genereren" 47 | 48 | #: 49 | msgid "Ask a question" 50 | msgstr "Stel een vraag" 51 | 52 | #: 53 | msgid "IMAGE" 54 | msgstr "AFBEELDING" 55 | 56 | #: 57 | msgid "Default" 58 | msgstr "Standaard" 59 | 60 | #: 61 | msgid "DAN" 62 | msgstr "DAN" 63 | 64 | #: 65 | msgid "Evil" 66 | msgstr "Kwaad" 67 | 68 | #: 69 | msgid "Web Access" 70 | msgstr "Webtoegang" 71 | 72 | #: 73 | msgid "Version" 74 | msgstr "Versie" 75 | 76 | #: 77 | msgid "Model" 78 | msgstr "Model" 79 | 80 | #: 81 | msgid "Provider" 82 | msgstr "Aanbieder" 83 | 84 | #: 85 | msgid "Jailbreak" 86 | msgstr "Jailbreak" -------------------------------------------------------------------------------- /translations/fi_FI/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Finnish (Finland) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: fi_FI\n" 14 | "Language-Team: fi_FI \n" 15 | "Plural-Forms: nplurals=2; plural=(n != 1);\n" 16 | "Content-Type: text/plain; charset=utf-8\n" 17 | "Content-Transfer-Encoding: 8bit\n" 18 | "Generated-By: Babel 2.12.1\n" 19 | 20 | #: 21 | msgid "New Conversation" 22 | msgstr "Uusi keskustelu" 23 | 24 | #: 25 | msgid "Clear Conversations" 26 | msgstr "Tyhjennä keskustelut" 27 | 28 | #: 29 | msgid "API Key" 30 | msgstr "API-avain" 31 | 32 | #: 33 | msgid "Ok" 34 | msgstr "Ok" 35 | 36 | #: 37 | msgid "Dark Mode" 38 | msgstr "Tumma tila" 39 | 40 | #: 41 | msgid "Language" 42 | msgstr "Kieli" 43 | 44 | #: 45 | msgid "Stop Generating" 46 | msgstr "Lopeta tuottaminen" 47 | 48 | #: 49 | msgid "Ask a question" 50 | msgstr "Esitä kysymys" 51 | 52 | #: 53 | msgid "IMAGE" 54 | msgstr "KUVA" 55 | 56 | #: 57 | msgid "Default" 58 | msgstr "Oletus" 59 | 60 | #: 61 | msgid "DAN" 62 | msgstr "DAN" 63 | 64 | #: 65 | msgid "Evil" 66 | msgstr "Paha" 67 | 68 | #: 69 | msgid "Web Access" 70 | msgstr "Verkkopääsy" 71 | 72 | #: 73 | msgid "Version" 74 | msgstr "Versio" 75 | 76 | #: 77 | msgid "Model" 78 | msgstr "Malli" 79 | 80 | #: 81 | msgid "Provider" 82 | msgstr "Palveluntarjoaja" 83 | 84 | #: 85 | msgid "Jailbreak" 86 | msgstr "Jailbreak" 87 | -------------------------------------------------------------------------------- /translations/hu_HU/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Hungarian (Hungary) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: hu_HU\n" 14 | "Language-Team: hu_HU \n" 15 | "Plural-Forms: nplurals=1; plural=0;\n" 16 | "Content-Type: text/plain; charset=utf-8\n" 17 | "Content-Transfer-Encoding: 8bit\n" 18 | "Generated-By: Babel 2.12.1\n" 19 | 20 | #: 21 | msgid "New Conversation" 22 | msgstr "Új beszélgetés" 23 | 24 | #: 25 | msgid "Clear Conversations" 26 | msgstr "Beszélgetések törlése" 27 | 28 | #: 29 | msgid "API Key" 30 | msgstr "API kulcs" 31 | 32 | #: 33 | msgid "Ok" 34 | msgstr "Rendben" 35 | 36 | #: 37 | msgid "Dark Mode" 38 | msgstr "Sötét mód" 39 | 40 | #: 41 | msgid "Language" 42 | msgstr "Nyelv" 43 | 44 | #: 45 | msgid "Stop Generating" 46 | msgstr "Generálás leállítása" 47 | 48 | #: 49 | msgid "Ask a question" 50 | msgstr "Kérdés feltevése" 51 | 52 | #: 53 | msgid "IMAGE" 54 | msgstr "KÉP" 55 | 56 | #: 57 | msgid "Default" 58 | msgstr "Alapértelmezett" 59 | 60 | #: 61 | msgid "DAN" 62 | msgstr "DAN" 63 | 64 | #: 65 | msgid "Evil" 66 | msgstr "Gonosz" 67 | 68 | #: 69 | msgid "Web Access" 70 | msgstr "Webhozzáférés" 71 | 72 | #: 73 | msgid "Version" 74 | msgstr "Verzió" 75 | 76 | #: 77 | msgid "Model" 78 | msgstr "Modell" 79 | 80 | #: 81 | msgid "Provider" 82 | msgstr "Szolgáltató" 83 | 84 | #: 85 | msgid "Jailbreak" 86 | msgstr "Jailbreak" -------------------------------------------------------------------------------- /translations/vi_VN/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Vietnamese (Vietnam) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: vi_VN\n" 14 | "Language-Team: vi_VN \n" 15 | "Plural-Forms: nplurals=1; plural=0;\n" 16 | "Content-Type: text/plain; charset=utf-8\n" 17 | "Content-Transfer-Encoding: 8bit\n" 18 | "Generated-By: Babel 2.12.1\n" 19 | 20 | #: 21 | msgid "New Conversation" 22 | msgstr "Cuộc trò chuyện mới" 23 | 24 | #: 25 | msgid "Clear Conversations" 26 | msgstr "Xóa cuộc trò chuyện" 27 | 28 | #: 29 | msgid "API Key" 30 | msgstr "Khóa API" 31 | 32 | #: 33 | msgid "Ok" 34 | msgstr "Đồng ý" 35 | 36 | #: 37 | msgid "Dark Mode" 38 | msgstr "Chế độ tối" 39 | 40 | #: 41 | msgid "Language" 42 | msgstr "Ngôn ngữ" 43 | 44 | #: 45 | msgid "Stop Generating" 46 | msgstr "Dừng sinh" 47 | 48 | #: 49 | msgid "Ask a question" 50 | msgstr "Đặt một câu hỏi" 51 | 52 | #: 53 | msgid "IMAGE" 54 | msgstr "HÌNH ẢNH" 55 | 56 | #: 57 | msgid "Default" 58 | msgstr "Mặc định" 59 | 60 | #: 61 | msgid "DAN" 62 | msgstr "DAN" 63 | 64 | #: 65 | msgid "Evil" 66 | msgstr "Ác" 67 | 68 | #: 69 | msgid "Web Access" 70 | msgstr "Truy cập web" 71 | 72 | #: 73 | msgid "Version" 74 | msgstr "Phiên bản" 75 | 76 | #: 77 | msgid "Model" 78 | msgstr "Người mẫu" 79 | 80 | #: 81 | msgid "Provider" 82 | msgstr "Các nhà cung cấp" 83 | 84 | #: 85 | msgid "Jailbreak" 86 | msgstr "Jailbreak" -------------------------------------------------------------------------------- /translations/de_DE/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # German (Germany) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: de_DE\n" 14 | "Language-Team: de_DE \n" 15 | "Plural-Forms: nplurals=2; plural=(n != 1);\n" 16 | "Content-Type: text/plain; charset=utf-8\n" 17 | "Content-Transfer-Encoding: 8bit\n" 18 | "Generated-By: Babel 2.12.1\n" 19 | 20 | #: 21 | msgid "New Conversation" 22 | msgstr "Neues Gespräch" 23 | 24 | #: 25 | msgid "Clear Conversations" 26 | msgstr "Unterhaltungen löschen" 27 | 28 | #: 29 | msgid "API Key" 30 | msgstr "API-Schlüssel" 31 | 32 | #: 33 | msgid "Ok" 34 | msgstr "Okay" 35 | 36 | #: 37 | msgid "Dark Mode" 38 | msgstr "Dunkler Modus" 39 | 40 | #: 41 | msgid "Language" 42 | msgstr "Sprache" 43 | 44 | #: 45 | msgid "Stop Generating" 46 | msgstr "Generierung stoppen" 47 | 48 | #: 49 | msgid "Ask a question" 50 | msgstr "Stelle eine Frage" 51 | 52 | #: 53 | msgid "IMAGE" 54 | msgstr "BILD" 55 | 56 | #: 57 | msgid "Default" 58 | msgstr "Standard" 59 | 60 | #: 61 | msgid "DAN" 62 | msgstr "DAN" 63 | 64 | #: 65 | msgid "Evil" 66 | msgstr "Böse" 67 | 68 | #: 69 | msgid "Web Access" 70 | msgstr "Webzugang" 71 | 72 | #: 73 | msgid "Version" 74 | msgstr "Version" 75 | 76 | #: 77 | msgid "Model" 78 | msgstr "Modell" 79 | 80 | #: 81 | msgid "Provider" 82 | msgstr "Anbieter" 83 | 84 | #: 85 | msgid "Jailbreak" 86 | msgstr "Jailbreak" 87 | -------------------------------------------------------------------------------- /translations/es_ES/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Spanish (Spain) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: es_ES\n" 14 | "Language-Team: es_ES \n" 15 | "Plural-Forms: nplurals=2; plural=(n != 1);\n" 16 | "Content-Type: text/plain; charset=utf-8\n" 17 | "Content-Transfer-Encoding: 8bit\n" 18 | "Generated-By: Babel 2.12.1\n" 19 | 20 | #: 21 | msgid "New Conversation" 22 | msgstr "Nueva conversación" 23 | 24 | #: 25 | msgid "Clear Conversations" 26 | msgstr "Borrar conversaciones" 27 | 28 | #: 29 | msgid "API Key" 30 | msgstr "Clave API" 31 | 32 | #: 33 | msgid "Ok" 34 | msgstr "Ok" 35 | 36 | #: 37 | msgid "Dark Mode" 38 | msgstr "Modo oscuro" 39 | 40 | #: 41 | msgid "Language" 42 | msgstr "Idioma" 43 | 44 | #: 45 | msgid "Stop Generating" 46 | msgstr "Dejar de generar" 47 | 48 | #: 49 | msgid "Ask a question" 50 | msgstr "Haz una pregunta" 51 | 52 | #: 53 | msgid "IMAGE" 54 | msgstr "IMAGEN" 55 | 56 | #: 57 | msgid "Default" 58 | msgstr "Predeterminado" 59 | 60 | #: 61 | msgid "DAN" 62 | msgstr "DAN" 63 | 64 | #: 65 | msgid "Evil" 66 | msgstr "Mal" 67 | 68 | #: 69 | msgid "Web Access" 70 | msgstr "Acceso web" 71 | 72 | #: 73 | msgid "Version" 74 | msgstr "Versión" 75 | 76 | #: 77 | msgid "Model" 78 | msgstr "Modelo" 79 | 80 | #: 81 | msgid "Provider" 82 | msgstr "Provizanto" 83 | 84 | #: 85 | msgid "Jailbreak" 86 | msgstr "Jailbreak" 87 | -------------------------------------------------------------------------------- /translations/es_MX/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Spanish (Mexico) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: es_MX\n" 14 | "Language-Team: es_MX \n" 15 | "Plural-Forms: nplurals=2; plural=(n != 1);\n" 16 | "Content-Type: text/plain; charset=utf-8\n" 17 | "Content-Transfer-Encoding: 8bit\n" 18 | "Generated-By: Babel 2.12.1\n" 19 | 20 | #: 21 | msgid "New Conversation" 22 | msgstr "Nueva conversación" 23 | 24 | #: 25 | msgid "Clear Conversations" 26 | msgstr "Borrar conversaciones" 27 | 28 | #: 29 | msgid "API Key" 30 | msgstr "Clave API" 31 | 32 | #: 33 | msgid "Ok" 34 | msgstr "Ok" 35 | 36 | #: 37 | msgid "Dark Mode" 38 | msgstr "Modo oscuro" 39 | 40 | #: 41 | msgid "Language" 42 | msgstr "Idioma" 43 | 44 | #: 45 | msgid "Stop Generating" 46 | msgstr "Detener generación" 47 | 48 | #: 49 | msgid "Ask a question" 50 | msgstr "Haz una pregunta" 51 | 52 | #: 53 | msgid "IMAGE" 54 | msgstr "IMAGEN" 55 | 56 | #: 57 | msgid "Default" 58 | msgstr "Por defecto" 59 | 60 | #: 61 | msgid "DAN" 62 | msgstr "DAN" 63 | 64 | #: 65 | msgid "Evil" 66 | msgstr "Malvado" 67 | 68 | #: 69 | msgid "Web Access" 70 | msgstr "Acceso web" 71 | 72 | #: 73 | msgid "Version" 74 | msgstr "Versión" 75 | 76 | #: 77 | msgid "Model" 78 | msgstr "Modelo" 79 | 80 | #: 81 | msgid "Provider" 82 | msgstr "Provizanto" 83 | 84 | #: 85 | msgid "Jailbreak" 86 | msgstr "Jailbreak" 87 | -------------------------------------------------------------------------------- /translations/it_IT/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Italian (Italy) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: it_IT\n" 14 | "Language-Team: it_IT \n" 15 | "Plural-Forms: nplurals=2; plural=(n != 1);\n" 16 | "Content-Type: text/plain; charset=utf-8\n" 17 | "Content-Transfer-Encoding: 8bit\n" 18 | "Generated-By: Babel 2.12.1\n" 19 | 20 | #: 21 | msgid "New Conversation" 22 | msgstr "Nuova conversazione" 23 | 24 | #: 25 | msgid "Clear Conversations" 26 | msgstr "Cancella conversazioni" 27 | 28 | #: 29 | msgid "API Key" 30 | msgstr "Chiave API" 31 | 32 | #: 33 | msgid "Ok" 34 | msgstr "Ok" 35 | 36 | #: 37 | msgid "Dark Mode" 38 | msgstr "Modalità scura" 39 | 40 | #: 41 | msgid "Language" 42 | msgstr "Lingua" 43 | 44 | #: 45 | msgid "Stop Generating" 46 | msgstr "Ferma la generazione" 47 | 48 | #: 49 | msgid "Ask a question" 50 | msgstr "Fai una domanda" 51 | 52 | #: 53 | msgid "IMAGE" 54 | msgstr "IMMAGINE" 55 | 56 | #: 57 | msgid "Default" 58 | msgstr "Default" 59 | 60 | #: 61 | msgid "DAN" 62 | msgstr "DAN" 63 | 64 | #: 65 | msgid "Evil" 66 | msgstr "Male" 67 | 68 | #: 69 | msgid "Web Access" 70 | msgstr "Accesso web" 71 | 72 | #: 73 | msgid "Version" 74 | msgstr "Versione" 75 | 76 | #: 77 | msgid "Model" 78 | msgstr "Modello" 79 | 80 | #: 81 | msgid "Provider" 82 | msgstr "Fornitore" 83 | 84 | #: 85 | msgid "Jailbreak" 86 | msgstr "Jailbreak" -------------------------------------------------------------------------------- /translations/fr_FR/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # French (France) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: fr_FR\n" 14 | "Language-Team: fr_FR \n" 15 | "Plural-Forms: nplurals=2; plural=(n > 1);\n" 16 | "Content-Type: text/plain; charset=utf-8\n" 17 | "Content-Transfer-Encoding: 8bit\n" 18 | "Generated-By: Babel 2.12.1\n" 19 | 20 | #: 21 | msgid "New Conversation" 22 | msgstr "Nouvelle conversation" 23 | 24 | #: 25 | msgid "Clear Conversations" 26 | msgstr "Effacer les conversations" 27 | 28 | #: 29 | msgid "API Key" 30 | msgstr "Clé API" 31 | 32 | #: 33 | msgid "Ok" 34 | msgstr "Ok" 35 | 36 | #: 37 | msgid "Dark Mode" 38 | msgstr "Mode sombre" 39 | 40 | #: 41 | msgid "Language" 42 | msgstr "Langue" 43 | 44 | #: 45 | msgid "Stop Generating" 46 | msgstr "Arrêter la génération" 47 | 48 | #: 49 | msgid "Ask a question" 50 | msgstr "Poser une question" 51 | 52 | #: 53 | msgid "IMAGE" 54 | msgstr "IMAGE" 55 | 56 | #: 57 | msgid "Default" 58 | msgstr "Par défaut" 59 | 60 | #: 61 | msgid "DAN" 62 | msgstr "DAN" 63 | 64 | #: 65 | msgid "Evil" 66 | msgstr "Méchant" 67 | 68 | #: 69 | msgid "Web Access" 70 | msgstr "Accès Web" 71 | 72 | #: 73 | msgid "Version" 74 | msgstr "Version" 75 | 76 | #: 77 | msgid "Model" 78 | msgstr "Modèle" 79 | 80 | #: 81 | msgid "Provider" 82 | msgstr "Fournisseur" 83 | 84 | #: 85 | msgid "Jailbreak" 86 | msgstr "Jailbreak" 87 | -------------------------------------------------------------------------------- /translations/el_GR/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Greek (Greece) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: el_GR\n" 14 | "Language-Team: el_GR \n" 15 | "Plural-Forms: nplurals=2; plural=(n != 1);\n" 16 | "Content-Type: text/plain; charset=utf-8\n" 17 | "Content-Transfer-Encoding: 8bit\n" 18 | "Generated-By: Babel 2.12.1\n" 19 | 20 | #: 21 | msgid "New Conversation" 22 | msgstr "Νέα Συνομιλία" 23 | 24 | #: 25 | msgid "Clear Conversations" 26 | msgstr "Καθαρισμός Συνομιλιών" 27 | 28 | #: 29 | msgid "API Key" 30 | msgstr "Κλειδί API" 31 | 32 | #: 33 | msgid "Ok" 34 | msgstr "Εντάξει" 35 | 36 | #: 37 | msgid "Dark Mode" 38 | msgstr "Σκοτεινή Λειτουργία" 39 | 40 | #: 41 | msgid "Language" 42 | msgstr "Γλώσσα" 43 | 44 | #: 45 | msgid "Stop Generating" 46 | msgstr "Διακοπή Δημιουργίας" 47 | 48 | #: 49 | msgid "Ask a question" 50 | msgstr "Κάντε μια ερώτηση" 51 | 52 | #: 53 | msgid "IMAGE" 54 | msgstr "ΕΙΚΟΝΑ" 55 | 56 | #: 57 | msgid "Default" 58 | msgstr "Προεπιλογή" 59 | 60 | #: 61 | msgid "DAN" 62 | msgstr "DAN" 63 | 64 | #: 65 | msgid "Evil" 66 | msgstr "Κακό" 67 | 68 | #: 69 | msgid "Web Access" 70 | msgstr "Πρόσβαση στον ιστό" 71 | 72 | #: 73 | msgid "Version" 74 | msgstr "Έκδοση" 75 | 76 | #: 77 | msgid "Model" 78 | msgstr "Μοντέλο" 79 | 80 | #: 81 | msgid "Provider" 82 | msgstr "Προμηθευτής" 83 | 84 | #: 85 | msgid "Jailbreak" 86 | msgstr "Jailbreak" 87 | -------------------------------------------------------------------------------- /translations/sk_SK/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Slovak (Slovakia) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: sk_SK\n" 14 | "Language-Team: sk_SK \n" 15 | "Plural-Forms: nplurals=3; plural=((n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2);\n" 16 | "Content-Type: text/plain; charset=utf-8\n" 17 | "Content-Transfer-Encoding: 8bit\n" 18 | "Generated-By: Babel 2.12.1\n" 19 | 20 | #: 21 | msgid "New Conversation" 22 | msgstr "Nová konverzácia" 23 | 24 | #: 25 | msgid "Clear Conversations" 26 | msgstr "Vymazať konverzácie" 27 | 28 | #: 29 | msgid "API Key" 30 | msgstr "API Kľúč" 31 | 32 | #: 33 | msgid "Ok" 34 | msgstr "Ok" 35 | 36 | #: 37 | msgid "Dark Mode" 38 | msgstr "Tmavý režim" 39 | 40 | #: 41 | msgid "Language" 42 | msgstr "Jazyk" 43 | 44 | #: 45 | msgid "Stop Generating" 46 | msgstr "Zastaviť generovanie" 47 | 48 | #: 49 | msgid "Ask a question" 50 | msgstr "Opýtať sa otázku" 51 | 52 | #: 53 | msgid "IMAGE" 54 | msgstr "OBRÁZOK" 55 | 56 | #: 57 | msgid "Default" 58 | msgstr "Predvolené" 59 | 60 | #: 61 | msgid "DAN" 62 | msgstr "DAN" 63 | 64 | #: 65 | msgid "Evil" 66 | msgstr "Zlo" 67 | 68 | #: 69 | msgid "Web Access" 70 | msgstr "Prístup na web" 71 | 72 | #: 73 | msgid "Version" 74 | msgstr "Verzia" 75 | 76 | #: 77 | msgid "Model" 78 | msgstr "Model" 79 | 80 | #: 81 | msgid "Provider" 82 | msgstr "Poskytovateľ" 83 | 84 | #: 85 | msgid "Jailbreak" 86 | msgstr "Jailbreak" -------------------------------------------------------------------------------- /translations/cs_CZ/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Czech (Czechia) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: cs_CZ\n" 14 | "Language-Team: cs_CZ \n" 15 | "Plural-Forms: nplurals=3; plural=((n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2);\n" 16 | "Content-Type: text/plain; charset=utf-8\n" 17 | "Content-Transfer-Encoding: 8bit\n" 18 | "Generated-By: Babel 2.12.1\n" 19 | 20 | #: 21 | msgid "New Conversation" 22 | msgstr "Nová konverzace" 23 | 24 | #: 25 | msgid "Clear Conversations" 26 | msgstr "Vymazat konverzace" 27 | 28 | #: 29 | msgid "API Key" 30 | msgstr "API klíč" 31 | 32 | #: 33 | msgid "Ok" 34 | msgstr "Dobře" 35 | 36 | #: 37 | msgid "Dark Mode" 38 | msgstr "Tmavý režim" 39 | 40 | #: 41 | msgid "Language" 42 | msgstr "Jazyk" 43 | 44 | #: 45 | msgid "Stop Generating" 46 | msgstr "Zastavit generování" 47 | 48 | #: 49 | msgid "Ask a question" 50 | msgstr "Zeptejte se na otázku" 51 | 52 | #: 53 | msgid "IMAGE" 54 | msgstr "OBRAZ" 55 | 56 | #: 57 | msgid "Default" 58 | msgstr "Výchozí" 59 | 60 | #: 61 | msgid "DAN" 62 | msgstr "DAN" 63 | 64 | #: 65 | msgid "Evil" 66 | msgstr "Zlo" 67 | 68 | #: 69 | msgid "Web Access" 70 | msgstr "Webový přístup" 71 | 72 | #: 73 | msgid "Version" 74 | msgstr "Verze" 75 | 76 | #: 77 | msgid "Model" 78 | msgstr "Modelka" 79 | 80 | #: 81 | msgid "Provider" 82 | msgstr "Poskytovatel" 83 | 84 | #: 85 | msgid "Jailbreak" 86 | msgstr "Jailbreak" 87 | -------------------------------------------------------------------------------- /translations/pl_PL/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Polish (Poland) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: pl_PL\n" 14 | "Language-Team: pl_PL \n" 15 | "Plural-Forms: nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && " 16 | "(n%100<10 || n%100>=20) ? 1 : 2);\n" 17 | "Content-Type: text/plain; charset=utf-8\n" 18 | "Content-Transfer-Encoding: 8bit\n" 19 | "Generated-By: Babel 2.12.1\n" 20 | 21 | #: 22 | msgid "New Conversation" 23 | msgstr "Nowa rozmowa" 24 | 25 | #: 26 | msgid "Clear Conversations" 27 | msgstr "Wyczyść rozmowy" 28 | 29 | #: 30 | msgid "API Key" 31 | msgstr "Klucz API" 32 | 33 | #: 34 | msgid "Ok" 35 | msgstr "Ok" 36 | 37 | #: 38 | msgid "Dark Mode" 39 | msgstr "Tryb ciemny" 40 | 41 | #: 42 | msgid "Language" 43 | msgstr "Język" 44 | 45 | #: 46 | msgid "Stop Generating" 47 | msgstr "Zatrzymaj generowanie" 48 | 49 | #: 50 | msgid "Ask a question" 51 | msgstr "Zadaj pytanie" 52 | 53 | #: 54 | msgid "IMAGE" 55 | msgstr "OBRAZ" 56 | 57 | #: 58 | msgid "Default" 59 | msgstr "Domyślny" 60 | 61 | #: 62 | msgid "DAN" 63 | msgstr "DAN" 64 | 65 | #: 66 | msgid "Evil" 67 | msgstr "Zło" 68 | 69 | #: 70 | msgid "Web Access" 71 | msgstr "Dostęp do sieci" 72 | 73 | #: 74 | msgid "Version" 75 | msgstr "Wersja" 76 | 77 | #: 78 | msgid "Model" 79 | msgstr "Model" 80 | 81 | #: 82 | msgid "Provider" 83 | msgstr "Dostawca" 84 | 85 | #: 86 | msgid "Jailbreak" 87 | msgstr "Jailbreak" -------------------------------------------------------------------------------- /translations/ro_RO/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Romanian (Romania) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: ro_RO\n" 14 | "Language-Team: ro_RO \n" 15 | "Plural-Forms: nplurals=3; plural=(n==1 ? 0 : (n==0 || (n%100 > 0 && n%100" 16 | " < 20)) ? 1 : 2);\n" 17 | "Content-Type: text/plain; charset=utf-8\n" 18 | "Content-Transfer-Encoding: 8bit\n" 19 | "Generated-By: Babel 2.12.1\n" 20 | 21 | #: 22 | msgid "New Conversation" 23 | msgstr "Conversație nouă" 24 | 25 | #: 26 | msgid "Clear Conversations" 27 | msgstr "Șterge conversațiile" 28 | 29 | #: 30 | msgid "API Key" 31 | msgstr "Cheie API" 32 | 33 | #: 34 | msgid "Ok" 35 | msgstr "Ok" 36 | 37 | #: 38 | msgid "Dark Mode" 39 | msgstr "Mod întunecat" 40 | 41 | #: 42 | msgid "Language" 43 | msgstr "Limbă" 44 | 45 | #: 46 | msgid "Stop Generating" 47 | msgstr "Opriți generarea" 48 | 49 | #: 50 | msgid "Ask a question" 51 | msgstr "Pune o întrebare" 52 | 53 | #: 54 | msgid "IMAGE" 55 | msgstr "IMAGINE" 56 | 57 | #: 58 | msgid "Default" 59 | msgstr "Implicit" 60 | 61 | #: 62 | msgid "DAN" 63 | msgstr "DAN" 64 | 65 | #: 66 | msgid "Evil" 67 | msgstr "Rău" 68 | 69 | #: 70 | msgid "Web Access" 71 | msgstr "Acces la web" 72 | 73 | #: 74 | msgid "Version" 75 | msgstr "Versiune" 76 | 77 | #: 78 | msgid "Model" 79 | msgstr "Model" 80 | 81 | #: 82 | msgid "Provider" 83 | msgstr "Furnizor" 84 | 85 | #: 86 | msgid "Jailbreak" 87 | msgstr "Jailbreak" -------------------------------------------------------------------------------- /translations/ar_SA/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Arabic (Saudi Arabia) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: ar_SA\n" 14 | "Language-Team: ar_SA \n" 15 | "Plural-Forms: nplurals=6; plural=(n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : " 16 | "n%100>=3 && n%100<=10 ? 3 : n%100>=0 && n%100<=2 ? 4 : 5);\n" 17 | "Content-Type: text/plain; charset=utf-8\n" 18 | "Content-Transfer-Encoding: 8bit\n" 19 | "Generated-By: Babel 2.12.1\n" 20 | 21 | #: 22 | msgid "New Conversation" 23 | msgstr "محادثة جديدة" 24 | 25 | #: 26 | msgid "Clear Conversations" 27 | msgstr "مسح المحادثات" 28 | 29 | #: 30 | msgid "API Key" 31 | msgstr "مفتاح API" 32 | 33 | #: 34 | msgid "Ok" 35 | msgstr "حسنا" 36 | 37 | #: 38 | msgid "Dark Mode" 39 | msgstr "الوضع الداكن" 40 | 41 | #: 42 | msgid "Language" 43 | msgstr "اللغة" 44 | 45 | #: 46 | msgid "Stop Generating" 47 | msgstr "توقف عن التوليد" 48 | 49 | #: 50 | msgid "Ask a question" 51 | msgstr "اسأل سؤال" 52 | 53 | #: 54 | msgid "IMAGE" 55 | msgstr "صورة" 56 | 57 | #: 58 | msgid "Default" 59 | msgstr "افتراضي" 60 | 61 | #: 62 | msgid "DAN" 63 | msgstr "دان" 64 | 65 | #: 66 | msgid "Evil" 67 | msgstr "شرير" 68 | 69 | #: 70 | msgid "Web Access" 71 | msgstr "الوصول إلى الويب" 72 | 73 | #: 74 | msgid "Version" 75 | msgstr "الإصدار" 76 | 77 | #: 78 | msgid "Model" 79 | msgstr "نموذج" 80 | 81 | #: 82 | msgid "Provider" 83 | msgstr "مزود" 84 | 85 | #: 86 | msgid "Jailbreak" 87 | msgstr "الهروب من السجن" 88 | -------------------------------------------------------------------------------- /translations/ru_RU/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Russian (Russia) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: ru_RU\n" 14 | "Language-Team: ru_RU \n" 15 | "Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " 16 | "n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" 17 | "Content-Type: text/plain; charset=utf-8\n" 18 | "Content-Transfer-Encoding: 8bit\n" 19 | "Generated-By: Babel 2.12.1\n" 20 | 21 | #: 22 | msgid "New Conversation" 23 | msgstr "Новый разговор" 24 | 25 | #: 26 | msgid "Clear Conversations" 27 | msgstr "Очистить разговоры" 28 | 29 | #: 30 | msgid "API Key" 31 | msgstr "API ключ" 32 | 33 | #: 34 | msgid "Ok" 35 | msgstr "Ок" 36 | 37 | #: 38 | msgid "Dark Mode" 39 | msgstr "Темный режим" 40 | 41 | #: 42 | msgid "Language" 43 | msgstr "Язык" 44 | 45 | #: 46 | msgid "Stop Generating" 47 | msgstr "Остановить генерацию" 48 | 49 | #: 50 | msgid "Ask a question" 51 | msgstr "Задать вопрос" 52 | 53 | #: 54 | msgid "IMAGE" 55 | msgstr "ИЗОБРАЖЕНИЕ" 56 | 57 | #: 58 | msgid "Default" 59 | msgstr "По умолчанию" 60 | 61 | #: 62 | msgid "DAN" 63 | msgstr "DAN" 64 | 65 | #: 66 | msgid "Evil" 67 | msgstr "Зло" 68 | 69 | #: 70 | msgid "Web Access" 71 | msgstr "Веб-доступ" 72 | 73 | #: 74 | msgid "Version" 75 | msgstr "Версия" 76 | 77 | #: 78 | msgid "Model" 79 | msgstr "Модель" 80 | 81 | #: 82 | msgid "Provider" 83 | msgstr "Поставщик" 84 | 85 | #: 86 | msgid "Jailbreak" 87 | msgstr "Jailbreak" -------------------------------------------------------------------------------- /translations/uk_UA/LC_MESSAGES/messages.po: -------------------------------------------------------------------------------- 1 | # Ukrainian (Ukraine) translations for FreeGPT WebUI. 2 | # Copyright (C) 2023 FreeGPT WebUI. 3 | # This file is distributed under the same license as the FreeGPT WebUI 4 | # project. 5 | # Contributors: 6 | # example name - github.com/example/ 7 | # example name2 - github.com/example2/ 8 | # 9 | msgid "" 10 | msgstr "" 11 | "Report-Msgid-Bugs-To: https://github.com/ramonvc/freegpt-webui/issues" 12 | "MIME-Version: 1.0\n" 13 | "Language: uk_UA\n" 14 | "Language-Team: uk_UA \n" 15 | "Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " 16 | "n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" 17 | "Content-Type: text/plain; charset=utf-8\n" 18 | "Content-Transfer-Encoding: 8bit\n" 19 | "Generated-By: Babel 2.12.1\n" 20 | 21 | #: 22 | msgid "New Conversation" 23 | msgstr "Нова розмова" 24 | 25 | #: 26 | msgid "Clear Conversations" 27 | msgstr "Очистити розмови" 28 | 29 | #: 30 | msgid "API Key" 31 | msgstr "API-ключ" 32 | 33 | #: 34 | msgid "Ok" 35 | msgstr "Добре" 36 | 37 | #: 38 | msgid "Dark Mode" 39 | msgstr "Темний режим" 40 | 41 | #: 42 | msgid "Language" 43 | msgstr "Мова" 44 | 45 | #: 46 | msgid "Stop Generating" 47 | msgstr "Зупинити генерацію" 48 | 49 | #: 50 | msgid "Ask a question" 51 | msgstr "Задати питання" 52 | 53 | #: 54 | msgid "IMAGE" 55 | msgstr "ЗОБРАЖЕННЯ" 56 | 57 | #: 58 | msgid "Default" 59 | msgstr "За замовчуванням" 60 | 61 | #: 62 | msgid "DAN" 63 | msgstr "DAN" 64 | 65 | #: 66 | msgid "Evil" 67 | msgstr "Зло" 68 | 69 | #: 70 | msgid "Web Access" 71 | msgstr "Веб-доступ" 72 | 73 | #: 74 | msgid "Version" 75 | msgstr "Версія" 76 | 77 | #: 78 | msgid "Model" 79 | msgstr "Модель" 80 | 81 | #: 82 | msgid "Provider" 83 | msgstr "Провайдер" 84 | 85 | #: 86 | msgid "Jailbreak" 87 | msgstr "Jailbreak" -------------------------------------------------------------------------------- /g4f/Provider/AiAsk.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from aiohttp import ClientSession 4 | from ..typing import AsyncResult, Messages 5 | from .base_provider import AsyncGeneratorProvider 6 | 7 | class AiAsk(AsyncGeneratorProvider): 8 | url = "https://e.aiask.me" 9 | supports_message_history = True 10 | supports_gpt_35_turbo = True 11 | working = False 12 | 13 | @classmethod 14 | async def create_async_generator( 15 | cls, 16 | model: str, 17 | messages: Messages, 18 | proxy: str = None, 19 | **kwargs 20 | ) -> AsyncResult: 21 | headers = { 22 | "accept": "application/json, text/plain, */*", 23 | "origin": cls.url, 24 | "referer": f"{cls.url}/chat", 25 | } 26 | async with ClientSession(headers=headers) as session: 27 | data = { 28 | "continuous": True, 29 | "id": "fRMSQtuHl91A4De9cCvKD", 30 | "list": messages, 31 | "models": "0", 32 | "prompt": "", 33 | "temperature": kwargs.get("temperature", 0.5), 34 | "title": "", 35 | } 36 | buffer = "" 37 | rate_limit = "您的免费额度不够使用这个模型啦,请点击右上角登录继续使用!" 38 | async with session.post(f"{cls.url}/v1/chat/gpt/", json=data, proxy=proxy) as response: 39 | response.raise_for_status() 40 | async for chunk in response.content.iter_any(): 41 | buffer += chunk.decode() 42 | if not rate_limit.startswith(buffer): 43 | yield buffer 44 | buffer = "" 45 | elif buffer == rate_limit: 46 | raise RuntimeError("Rate limit reached") -------------------------------------------------------------------------------- /client/js/change-language.js: -------------------------------------------------------------------------------- 1 | document.addEventListener('DOMContentLoaded', fetchLanguages); 2 | 3 | async function fetchLanguages() { 4 | try { 5 | const [languagesResponse, currentLanguageResponse] = await Promise.all([ 6 | fetch(`${url_prefix}/get-languages`), 7 | fetch(`${url_prefix}/get-locale`) 8 | ]); 9 | 10 | const languages = await languagesResponse.json(); 11 | const currentLanguage = await currentLanguageResponse.text(); 12 | 13 | const languageSelect = document.getElementById('language'); 14 | languages.forEach(lang => { 15 | const option = document.createElement('option'); 16 | option.value = lang; 17 | option.textContent = lang; 18 | languageSelect.appendChild(option); 19 | }); 20 | 21 | const savedLanguage = localStorage.getItem("language") || currentLanguage; 22 | setLanguageOnPageLoad(savedLanguage); 23 | } catch (error) { 24 | console.error("Failed to fetch languages or current language"); 25 | } 26 | } 27 | 28 | function setLanguageOnPageLoad(language) { 29 | document.getElementById("language").value = language; 30 | } 31 | 32 | function changeLanguage(lang) { 33 | fetch(`${url_prefix}/change-language`, { 34 | method: "POST", 35 | headers: { 36 | "Content-Type": "application/json", 37 | }, 38 | body: JSON.stringify({ language: lang }), 39 | }).then((response) => { 40 | if (response.ok) { 41 | localStorage.setItem("language", lang); 42 | location.reload(); 43 | } else { 44 | console.error("Failed to change language"); 45 | } 46 | }); 47 | } 48 | -------------------------------------------------------------------------------- /g4f/Provider/FreeGpt.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import time, hashlib, random 4 | 5 | from ..typing import AsyncResult, Messages 6 | from ..requests import StreamSession 7 | from .base_provider import AsyncGeneratorProvider 8 | 9 | domains = [ 10 | 'https://s.aifree.site' 11 | ] 12 | 13 | class FreeGpt(AsyncGeneratorProvider): 14 | url = "https://freegpts1.aifree.site/" 15 | working = False 16 | supports_message_history = True 17 | supports_gpt_35_turbo = True 18 | 19 | @classmethod 20 | async def create_async_generator( 21 | cls, 22 | model: str, 23 | messages: Messages, 24 | proxy: str = None, 25 | timeout: int = 120, 26 | **kwargs 27 | ) -> AsyncResult: 28 | async with StreamSession( 29 | impersonate="chrome107", 30 | timeout=timeout, 31 | proxies={"https": proxy} 32 | ) as session: 33 | prompt = messages[-1]["content"] 34 | timestamp = int(time.time()) 35 | data = { 36 | "messages": messages, 37 | "time": timestamp, 38 | "pass": None, 39 | "sign": generate_signature(timestamp, prompt) 40 | } 41 | url = random.choice(domains) 42 | async with session.post(f"{url}/api/generate", json=data) as response: 43 | response.raise_for_status() 44 | async for chunk in response.iter_content(): 45 | chunk = chunk.decode() 46 | if chunk == "当前地区当日额度已消耗完": 47 | raise RuntimeError("Rate limit reached") 48 | yield chunk 49 | 50 | 51 | def generate_signature(timestamp: int, message: str, secret: str = ""): 52 | data = f"{timestamp}:{message}:{secret}" 53 | return hashlib.sha256(data.encode()).hexdigest() 54 | -------------------------------------------------------------------------------- /g4f/Provider/deprecated/Lockchat.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import json 4 | 5 | import requests 6 | 7 | from ...typing import Any, CreateResult 8 | from ..base_provider import AbstractProvider 9 | 10 | 11 | class Lockchat(AbstractProvider): 12 | url: str = "http://supertest.lockchat.app" 13 | supports_stream = True 14 | supports_gpt_35_turbo = True 15 | supports_gpt_4 = True 16 | 17 | @staticmethod 18 | def create_completion( 19 | model: str, 20 | messages: list[dict[str, str]], 21 | stream: bool, **kwargs: Any) -> CreateResult: 22 | 23 | temperature = float(kwargs.get("temperature", 0.7)) 24 | payload = { 25 | "temperature": temperature, 26 | "messages" : messages, 27 | "model" : model, 28 | "stream" : True, 29 | } 30 | 31 | headers = { 32 | "user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0", 33 | } 34 | response = requests.post("http://supertest.lockchat.app/v1/chat/completions", 35 | json=payload, headers=headers, stream=True) 36 | 37 | response.raise_for_status() 38 | for token in response.iter_lines(): 39 | if b"The model: `gpt-4` does not exist" in token: 40 | print("error, retrying...") 41 | 42 | Lockchat.create_completion( 43 | model = model, 44 | messages = messages, 45 | stream = stream, 46 | temperature = temperature, 47 | **kwargs) 48 | 49 | if b"content" in token: 50 | token = json.loads(token.decode("utf-8").split("data: ")[1]) 51 | token = token["choices"][0]["delta"].get("content") 52 | 53 | if token: 54 | yield (token) -------------------------------------------------------------------------------- /g4f/utils.py: -------------------------------------------------------------------------------- 1 | import browser_cookie3 2 | 3 | 4 | class Utils: 5 | browsers = [ 6 | browser_cookie3.chrome, # 62.74% market share 7 | browser_cookie3.safari, # 24.12% market share 8 | browser_cookie3.firefox, # 4.56% market share 9 | browser_cookie3.edge, # 2.85% market share 10 | browser_cookie3.opera, # 1.69% market share 11 | browser_cookie3.brave, # 0.96% market share 12 | browser_cookie3.opera_gx, # 0.64% market share 13 | browser_cookie3.vivaldi, # 0.32% market share 14 | ] 15 | 16 | def get_cookies(domain: str, setName: str = None, setBrowser: str = False) -> dict: 17 | cookies = {} 18 | 19 | if setBrowser != False: 20 | for browser in Utils.browsers: 21 | if browser.__name__ == setBrowser: 22 | try: 23 | for c in browser(domain_name=domain): 24 | if c.name not in cookies: 25 | cookies = cookies | {c.name: c.value} 26 | 27 | except Exception as e: 28 | pass 29 | 30 | else: 31 | for browser in Utils.browsers: 32 | try: 33 | for c in browser(domain_name=domain): 34 | if c.name not in cookies: 35 | cookies = cookies | {c.name: c.value} 36 | 37 | except Exception as e: 38 | pass 39 | 40 | if setName: 41 | try: 42 | return {setName: cookies[setName]} 43 | 44 | except ValueError: 45 | print(f'Error: could not find {setName} cookie in any browser.') 46 | exit(1) 47 | 48 | else: 49 | return cookies 50 | -------------------------------------------------------------------------------- /g4f/Provider/GptTalkRu.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from aiohttp import ClientSession 4 | 5 | from ..typing import AsyncResult, Messages 6 | from .base_provider import AsyncGeneratorProvider 7 | 8 | 9 | class GptTalkRu(AsyncGeneratorProvider): 10 | url = "https://gpttalk.ru" 11 | working = True 12 | supports_gpt_35_turbo = True 13 | 14 | @classmethod 15 | async def create_async_generator( 16 | cls, 17 | model: str, 18 | messages: Messages, 19 | proxy: str = None, 20 | **kwargs 21 | ) -> AsyncResult: 22 | if not model: 23 | model = "gpt-3.5-turbo" 24 | headers = { 25 | "Accept": "application/json, text/plain, */*", 26 | "Accept-Language": "en-US", 27 | "Connection": "keep-alive", 28 | "Content-Type": "application/json", 29 | "Origin": "https://gpttalk.ru", 30 | "Referer": "https://gpttalk.ru/", 31 | "Sec-Fetch-Dest": "empty", 32 | "Sec-Fetch-Mode": "cors", 33 | "Sec-Fetch-Site": "same-origin", 34 | "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36", 35 | "sec-ch-ua": '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"', 36 | "sec-ch-ua-mobile": "?0", 37 | "sec-ch-ua-platform": '"Linux"', 38 | } 39 | async with ClientSession(headers=headers) as session: 40 | data = { 41 | "model": model, 42 | "modelType": 1, 43 | "prompt": messages, 44 | "responseType": "stream", 45 | } 46 | async with session.post(f"{cls.url}/gpt2", json=data, proxy=proxy) as response: 47 | response.raise_for_status() 48 | async for chunk in response.content.iter_any(): 49 | yield chunk.decode() -------------------------------------------------------------------------------- /client/js/fullscreen-toggle.js: -------------------------------------------------------------------------------- 1 | let shiftAmount = 70; 2 | let mainContainer = document.querySelector('.main-container'); 3 | 4 | function shiftContentUp() { 5 | mainContainer.style.marginTop = "-" + shiftAmount + "%"; 6 | } 7 | 8 | function resetContent() { 9 | mainContainer.style.marginTop = "0"; 10 | } 11 | 12 | function toggleFullScreen() { 13 | if (!document.fullscreenElement) { 14 | const elementToFullScreen = document.documentElement; 15 | if (elementToFullScreen.mozRequestFullScreen) { // Firefox 16 | elementToFullScreen.mozRequestFullScreen(); 17 | } else if (elementToFullScreen.webkitRequestFullscreen) { // Chrome, Safari und Opera 18 | elementToFullScreen.webkitRequestFullscreen(); 19 | } else if (elementToFullScreen.msRequestFullscreen) { // Internet Explorer und Edge 20 | elementToFullScreen.msRequestFullscreen(); 21 | } 22 | } else { 23 | if (document.mozCancelFullScreen) { // Firefox 24 | document.mozCancelFullScreen(); 25 | } else if (document.webkitExitFullscreen) { // Chrome, Safari und Opera 26 | document.webkitExitFullscreen(); 27 | } else if (document.msExitFullscreen) { // Internet Explorer und Edge 28 | document.msExitFullscreen(); 29 | } 30 | } 31 | }; 32 | 33 | document.getElementById('message-input').addEventListener('focus', function() { 34 | if (document.fullscreenElement) { 35 | shiftContentUp(); 36 | } 37 | }); 38 | 39 | document.getElementById('message-input').addEventListener('blur', function() { 40 | if (document.fullscreenElement) { 41 | resetContent(); 42 | } 43 | }); 44 | 45 | document.addEventListener('fullscreenchange', function() { 46 | if (document.fullscreenElement) { 47 | document.getElementById('fullscreen-toggle').checked = true; 48 | } else { 49 | document.getElementById('fullscreen-toggle').checked = false; 50 | } 51 | }); 52 | 53 | document.getElementById('fullscreen-toggle').addEventListener('change', function() { 54 | toggleFullScreen(this.checked); 55 | }); 56 | 57 | -------------------------------------------------------------------------------- /g4f/Provider/deprecated/CodeLinkAva.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from aiohttp import ClientSession 4 | import json 5 | 6 | from ...typing import AsyncGenerator 7 | from ..base_provider import AsyncGeneratorProvider 8 | 9 | 10 | class CodeLinkAva(AsyncGeneratorProvider): 11 | url = "https://ava-ai-ef611.web.app" 12 | supports_gpt_35_turbo = True 13 | working = False 14 | 15 | @classmethod 16 | async def create_async_generator( 17 | cls, 18 | model: str, 19 | messages: list[dict[str, str]], 20 | **kwargs 21 | ) -> AsyncGenerator: 22 | headers = { 23 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", 24 | "Accept": "*/*", 25 | "Accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", 26 | "Origin": cls.url, 27 | "Referer": f"{cls.url}/", 28 | "Sec-Fetch-Dest": "empty", 29 | "Sec-Fetch-Mode": "cors", 30 | "Sec-Fetch-Site": "same-origin", 31 | } 32 | async with ClientSession( 33 | headers=headers 34 | ) as session: 35 | data = { 36 | "messages": messages, 37 | "temperature": 0.6, 38 | "stream": True, 39 | **kwargs 40 | } 41 | async with session.post("https://ava-alpha-api.codelink.io/api/chat", json=data) as response: 42 | response.raise_for_status() 43 | async for line in response.content: 44 | line = line.decode() 45 | if line.startswith("data: "): 46 | if line.startswith("data: [DONE]"): 47 | break 48 | line = json.loads(line[6:-1]) 49 | 50 | content = line["choices"][0]["delta"].get("content") 51 | if content: 52 | yield content -------------------------------------------------------------------------------- /server/website.py: -------------------------------------------------------------------------------- 1 | from flask import render_template, redirect, url_for, request, session 2 | from flask_babel import refresh 3 | from time import time 4 | from os import urandom 5 | from server.babel import get_locale, get_languages 6 | 7 | 8 | class Website: 9 | def __init__(self, bp, url_prefix) -> None: 10 | self.bp = bp 11 | self.url_prefix = url_prefix 12 | self.routes = { 13 | '/': { 14 | 'function': lambda: redirect(url_for('._index')), 15 | 'methods': ['GET', 'POST'] 16 | }, 17 | '/chat/': { 18 | 'function': self._index, 19 | 'methods': ['GET', 'POST'] 20 | }, 21 | '/chat/': { 22 | 'function': self._chat, 23 | 'methods': ['GET', 'POST'] 24 | }, 25 | '/change-language': { 26 | 'function': self.change_language, 27 | 'methods': ['POST'] 28 | }, 29 | '/get-locale': { 30 | 'function': self.get_locale, 31 | 'methods': ['GET'] 32 | }, 33 | '/get-languages': { 34 | 'function': self.get_languages, 35 | 'methods': ['GET'] 36 | } 37 | } 38 | 39 | def _chat(self, conversation_id): 40 | if '-' not in conversation_id: 41 | return redirect(url_for('._index')) 42 | 43 | return render_template('index.html', chat_id=conversation_id, url_prefix=self.url_prefix) 44 | 45 | def _index(self): 46 | return render_template('index.html', chat_id=f'{urandom(4).hex()}-{urandom(2).hex()}-{urandom(2).hex()}-{urandom(2).hex()}-{hex(int(time() * 1000))[2:]}', url_prefix=self.url_prefix) 47 | 48 | def change_language(self): 49 | data = request.get_json() 50 | session['language'] = data.get('language') 51 | refresh() 52 | return '', 204 53 | 54 | def get_locale(self): 55 | return get_locale() 56 | 57 | def get_languages(self): 58 | return get_languages() 59 | -------------------------------------------------------------------------------- /g4f/Provider/Yqcloud.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import random 4 | from ..requests import StreamSession 5 | 6 | from ..typing import AsyncResult, Messages 7 | from .base_provider import AsyncGeneratorProvider, format_prompt 8 | 9 | 10 | class Yqcloud(AsyncGeneratorProvider): 11 | url = "https://chat9.yqcloud.top/" 12 | working = True 13 | supports_gpt_35_turbo = True 14 | 15 | @staticmethod 16 | async def create_async_generator( 17 | model: str, 18 | messages: Messages, 19 | proxy: str = None, 20 | timeout: int = 120, 21 | **kwargs, 22 | ) -> AsyncResult: 23 | async with StreamSession( 24 | headers=_create_header(), proxies={"https": proxy}, timeout=timeout 25 | ) as session: 26 | payload = _create_payload(messages, **kwargs) 27 | async with session.post("https://api.aichatos.cloud/api/generateStream", json=payload) as response: 28 | response.raise_for_status() 29 | async for chunk in response.iter_content(): 30 | if chunk: 31 | chunk = chunk.decode() 32 | if "sorry, 您的ip已由于触发防滥用检测而被封禁" in chunk: 33 | raise RuntimeError("IP address is blocked by abuse detection.") 34 | yield chunk 35 | 36 | 37 | def _create_header(): 38 | return { 39 | "accept" : "application/json, text/plain, */*", 40 | "content-type" : "application/json", 41 | "origin" : "https://chat9.yqcloud.top", 42 | "referer" : "https://chat9.yqcloud.top/" 43 | } 44 | 45 | 46 | def _create_payload( 47 | messages: Messages, 48 | system_message: str = "", 49 | user_id: int = None, 50 | **kwargs 51 | ): 52 | if not user_id: 53 | user_id = random.randint(1690000544336, 2093025544336) 54 | return { 55 | "prompt": format_prompt(messages), 56 | "network": True, 57 | "system": system_message, 58 | "withoutContext": False, 59 | "stream": True, 60 | "userId": f"#/chat/{user_id}" 61 | } 62 | -------------------------------------------------------------------------------- /g4f/Provider/ChatAnywhere.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from aiohttp import ClientSession, ClientTimeout 4 | 5 | from ..typing import AsyncResult, Messages 6 | from .base_provider import AsyncGeneratorProvider 7 | 8 | 9 | class ChatAnywhere(AsyncGeneratorProvider): 10 | url = "https://chatanywhere.cn" 11 | supports_gpt_35_turbo = True 12 | supports_message_history = True 13 | working = False 14 | 15 | @classmethod 16 | async def create_async_generator( 17 | cls, 18 | model: str, 19 | messages: Messages, 20 | proxy: str = None, 21 | timeout: int = 120, 22 | temperature: float = 0.5, 23 | **kwargs 24 | ) -> AsyncResult: 25 | headers = { 26 | "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0", 27 | "Accept": "application/json, text/plain, */*", 28 | "Accept-Language": "de,en-US;q=0.7,en;q=0.3", 29 | "Accept-Encoding": "gzip, deflate, br", 30 | "Content-Type": "application/json", 31 | "Referer": f"{cls.url}/", 32 | "Origin": cls.url, 33 | "Sec-Fetch-Dest": "empty", 34 | "Sec-Fetch-Mode": "cors", 35 | "Sec-Fetch-Site": "same-origin", 36 | "Authorization": "", 37 | "Connection": "keep-alive", 38 | "TE": "trailers" 39 | } 40 | async with ClientSession(headers=headers, timeout=ClientTimeout(timeout)) as session: 41 | data = { 42 | "list": messages, 43 | "id": "s1_qYuOLXjI3rEpc7WHfQ", 44 | "title": messages[-1]["content"], 45 | "prompt": "", 46 | "temperature": temperature, 47 | "models": "61490748", 48 | "continuous": True 49 | } 50 | async with session.post(f"{cls.url}/v1/chat/gpt/", json=data, proxy=proxy) as response: 51 | response.raise_for_status() 52 | async for chunk in response.content.iter_any(): 53 | if chunk: 54 | yield chunk.decode() -------------------------------------------------------------------------------- /g4f/Provider/needs_auth/Raycast.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import json 4 | 5 | import requests 6 | 7 | from ...typing import CreateResult, Messages 8 | from ..base_provider import AbstractProvider 9 | 10 | 11 | class Raycast(AbstractProvider): 12 | url = "https://raycast.com" 13 | supports_gpt_35_turbo = True 14 | supports_gpt_4 = True 15 | supports_stream = True 16 | needs_auth = True 17 | working = True 18 | 19 | @staticmethod 20 | def create_completion( 21 | model: str, 22 | messages: Messages, 23 | stream: bool, 24 | proxy: str = None, 25 | **kwargs, 26 | ) -> CreateResult: 27 | auth = kwargs.get('auth') 28 | headers = { 29 | 'Accept': 'application/json', 30 | 'Accept-Language': 'en-US,en;q=0.9', 31 | 'Authorization': f'Bearer {auth}', 32 | 'Content-Type': 'application/json', 33 | 'User-Agent': 'Raycast/0 CFNetwork/1410.0.3 Darwin/22.6.0', 34 | } 35 | parsed_messages = [ 36 | {'author': message['role'], 'content': {'text': message['content']}} 37 | for message in messages 38 | ] 39 | data = { 40 | "debug": False, 41 | "locale": "en-CN", 42 | "messages": parsed_messages, 43 | "model": model, 44 | "provider": "openai", 45 | "source": "ai_chat", 46 | "system_instruction": "markdown", 47 | "temperature": 0.5 48 | } 49 | response = requests.post( 50 | "https://backend.raycast.com/api/v1/ai/chat_completions", 51 | headers=headers, 52 | json=data, 53 | stream=True, 54 | proxies={"https": proxy} 55 | ) 56 | for token in response.iter_lines(): 57 | if b'data: ' not in token: 58 | continue 59 | completion_chunk = json.loads(token.decode().replace('data: ', '')) 60 | token = completion_chunk['text'] 61 | if token != None: 62 | yield token 63 | -------------------------------------------------------------------------------- /g4f/Provider/deprecated/Vitalentum.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import json 4 | from aiohttp import ClientSession 5 | 6 | from ..base_provider import AsyncGeneratorProvider 7 | from ...typing import AsyncResult, Messages 8 | 9 | class Vitalentum(AsyncGeneratorProvider): 10 | url = "https://app.vitalentum.io" 11 | supports_gpt_35_turbo = True 12 | 13 | 14 | @classmethod 15 | async def create_async_generator( 16 | cls, 17 | model: str, 18 | messages: Messages, 19 | proxy: str = None, 20 | **kwargs 21 | ) -> AsyncResult: 22 | headers = { 23 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", 24 | "Accept": "text/event-stream", 25 | "Accept-language": "de,en-US;q=0.7,en;q=0.3", 26 | "Origin": cls.url, 27 | "Referer": f"{cls.url}/", 28 | "Sec-Fetch-Dest": "empty", 29 | "Sec-Fetch-Mode": "cors", 30 | "Sec-Fetch-Site": "same-origin", 31 | } 32 | conversation = json.dumps({"history": [{ 33 | "speaker": "human" if message["role"] == "user" else "bot", 34 | "text": message["content"], 35 | } for message in messages]}) 36 | data = { 37 | "conversation": conversation, 38 | "temperature": 0.7, 39 | **kwargs 40 | } 41 | async with ClientSession( 42 | headers=headers 43 | ) as session: 44 | async with session.post(f"{cls.url}/api/converse-edge", json=data, proxy=proxy) as response: 45 | response.raise_for_status() 46 | async for line in response.content: 47 | line = line.decode() 48 | if line.startswith("data: "): 49 | if line.startswith("data: [DONE]"): 50 | break 51 | line = json.loads(line[6:-1]) 52 | content = line["choices"][0]["delta"].get("content") 53 | 54 | if content: 55 | yield content -------------------------------------------------------------------------------- /g4f/Provider/Ylokh.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import json 4 | 5 | from ..requests import StreamSession 6 | from .base_provider import AsyncGeneratorProvider 7 | from ..typing import AsyncResult, Messages 8 | 9 | class Ylokh(AsyncGeneratorProvider): 10 | url = "https://chat.ylokh.xyz" 11 | working = False 12 | supports_message_history = True 13 | supports_gpt_35_turbo = True 14 | 15 | 16 | @classmethod 17 | async def create_async_generator( 18 | cls, 19 | model: str, 20 | messages: Messages, 21 | stream: bool = True, 22 | proxy: str = None, 23 | timeout: int = 120, 24 | **kwargs 25 | ) -> AsyncResult: 26 | model = model if model else "gpt-3.5-turbo" 27 | headers = {"Origin": cls.url, "Referer": f"{cls.url}/"} 28 | data = { 29 | "messages": messages, 30 | "model": model, 31 | "temperature": 1, 32 | "presence_penalty": 0, 33 | "top_p": 1, 34 | "frequency_penalty": 0, 35 | "allow_fallback": True, 36 | "stream": stream, 37 | **kwargs 38 | } 39 | async with StreamSession( 40 | headers=headers, 41 | proxies={"https": proxy}, 42 | timeout=timeout 43 | ) as session: 44 | async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data) as response: 45 | response.raise_for_status() 46 | if stream: 47 | async for line in response.iter_lines(): 48 | line = line.decode() 49 | if line.startswith("data: "): 50 | if line.startswith("data: [DONE]"): 51 | break 52 | line = json.loads(line[6:]) 53 | content = line["choices"][0]["delta"].get("content") 54 | if content: 55 | yield content 56 | else: 57 | chat = await response.json() 58 | yield chat["choices"][0]["message"].get("content") -------------------------------------------------------------------------------- /g4f/Provider/deprecated/Wuguokai.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import random 4 | 5 | import requests 6 | 7 | from ...typing import Any, CreateResult 8 | from ..base_provider import AbstractProvider, format_prompt 9 | 10 | 11 | class Wuguokai(AbstractProvider): 12 | url = 'https://chat.wuguokai.xyz' 13 | supports_gpt_35_turbo = True 14 | working = False 15 | 16 | @staticmethod 17 | def create_completion( 18 | model: str, 19 | messages: list[dict[str, str]], 20 | stream: bool, 21 | **kwargs: Any, 22 | ) -> CreateResult: 23 | headers = { 24 | 'authority': 'ai-api.wuguokai.xyz', 25 | 'accept': 'application/json, text/plain, */*', 26 | 'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7', 27 | 'content-type': 'application/json', 28 | 'origin': 'https://chat.wuguokai.xyz', 29 | 'referer': 'https://chat.wuguokai.xyz/', 30 | 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', 31 | 'sec-ch-ua-mobile': '?0', 32 | 'sec-ch-ua-platform': '"Windows"', 33 | 'sec-fetch-dest': 'empty', 34 | 'sec-fetch-mode': 'cors', 35 | 'sec-fetch-site': 'same-site', 36 | 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36' 37 | } 38 | data ={ 39 | "prompt": format_prompt(messages), 40 | "options": {}, 41 | "userId": f"#/chat/{random.randint(1,99999999)}", 42 | "usingContext": True 43 | } 44 | response = requests.post( 45 | "https://ai-api20.wuguokai.xyz/api/chat-process", 46 | headers=headers, 47 | timeout=3, 48 | json=data, 49 | proxies=kwargs.get('proxy', {}), 50 | ) 51 | _split = response.text.split("> 若回答失败请重试或多刷新几次界面后重试") 52 | if response.status_code != 200: 53 | raise Exception(f"Error: {response.status_code} {response.reason}") 54 | if len(_split) > 1: 55 | yield _split[1].strip() 56 | else: 57 | yield _split[0].strip() -------------------------------------------------------------------------------- /g4f/Provider/ChatgptDemoAi.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import json 4 | from aiohttp import ClientSession 5 | 6 | from ..typing import AsyncResult, Messages 7 | from .base_provider import AsyncGeneratorProvider 8 | from .helper import get_random_string 9 | 10 | class ChatgptDemoAi(AsyncGeneratorProvider): 11 | url = "https://chat.chatgptdemo.ai" 12 | working = False 13 | supports_gpt_35_turbo = True 14 | supports_message_history = True 15 | 16 | @classmethod 17 | async def create_async_generator( 18 | cls, 19 | model: str, 20 | messages: Messages, 21 | proxy: str = None, 22 | **kwargs 23 | ) -> AsyncResult: 24 | headers = { 25 | "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0", 26 | "Accept": "*/*", 27 | "Accept-Language": "de,en-US;q=0.7,en;q=0.3", 28 | "Accept-Encoding": "gzip, deflate, br", 29 | "Referer": f"{cls.url}/", 30 | "Content-Type": "application/json", 31 | "Origin": cls.url, 32 | "Connection": "keep-alive", 33 | "Sec-Fetch-Dest": "empty", 34 | "Sec-Fetch-Mode": "cors", 35 | "Sec-Fetch-Site": "same-origin", 36 | "TE": "trailers" 37 | } 38 | async with ClientSession(headers=headers) as session: 39 | data = { 40 | "botId": "default", 41 | "customId": "8824fe9bdb323a5d585a3223aaa0cb6e", 42 | "session": "N/A", 43 | "chatId": get_random_string(12), 44 | "contextId": 2, 45 | "messages": messages, 46 | "newMessage": messages[-1]["content"], 47 | "stream": True 48 | } 49 | async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response: 50 | response.raise_for_status() 51 | async for chunk in response.content: 52 | if chunk.startswith(b"data: "): 53 | data = json.loads(chunk[6:]) 54 | if data["type"] == "live": 55 | yield data["data"] -------------------------------------------------------------------------------- /g4f/Provider/GptGod.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import secrets 4 | import json 5 | from aiohttp import ClientSession 6 | 7 | from ..typing import AsyncResult, Messages 8 | from .base_provider import AsyncGeneratorProvider 9 | from .helper import format_prompt 10 | 11 | class GptGod(AsyncGeneratorProvider): 12 | url = "https://gptgod.site" 13 | supports_gpt_35_turbo = True 14 | working = False 15 | 16 | @classmethod 17 | async def create_async_generator( 18 | cls, 19 | model: str, 20 | messages: Messages, 21 | proxy: str = None, 22 | **kwargs 23 | ) -> AsyncResult: 24 | 25 | headers = { 26 | "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0", 27 | "Accept": "text/event-stream", 28 | "Accept-Language": "de,en-US;q=0.7,en;q=0.3", 29 | "Accept-Encoding": "gzip, deflate, br", 30 | "Alt-Used": "gptgod.site", 31 | "Connection": "keep-alive", 32 | "Referer": f"{cls.url}/", 33 | "Sec-Fetch-Dest": "empty", 34 | "Sec-Fetch-Mode": "cors", 35 | "Sec-Fetch-Site": "same-origin", 36 | "Pragma": "no-cache", 37 | "Cache-Control": "no-cache", 38 | } 39 | 40 | async with ClientSession(headers=headers) as session: 41 | prompt = format_prompt(messages) 42 | data = { 43 | "content": prompt, 44 | "id": secrets.token_hex(16).zfill(32) 45 | } 46 | async with session.get(f"{cls.url}/api/session/free/gpt3p5", params=data, proxy=proxy) as response: 47 | response.raise_for_status() 48 | event = None 49 | async for line in response.content: 50 | # print(line) 51 | 52 | if line.startswith(b'event: '): 53 | event = line[7:-1] 54 | 55 | elif event == b"data" and line.startswith(b"data: "): 56 | data = json.loads(line[6:-1]) 57 | if data: 58 | yield data 59 | 60 | elif event == b"done": 61 | break -------------------------------------------------------------------------------- /g4f/Provider/OnlineGpt.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import json 4 | from aiohttp import ClientSession 5 | 6 | from ..typing import AsyncResult, Messages 7 | from .base_provider import AsyncGeneratorProvider 8 | from .helper import get_random_string 9 | 10 | 11 | class OnlineGpt(AsyncGeneratorProvider): 12 | url = "https://onlinegpt.org" 13 | working = True 14 | supports_gpt_35_turbo = True 15 | supports_message_history = False 16 | 17 | @classmethod 18 | async def create_async_generator( 19 | cls, 20 | model: str, 21 | messages: Messages, 22 | proxy: str = None, 23 | **kwargs 24 | ) -> AsyncResult: 25 | headers = { 26 | "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0", 27 | "Accept": "text/event-stream", 28 | "Accept-Language": "de,en-US;q=0.7,en;q=0.3", 29 | "Accept-Encoding": "gzip, deflate, br", 30 | "Referer": f"{cls.url}/chat/", 31 | "Content-Type": "application/json", 32 | "Origin": cls.url, 33 | "Alt-Used": "onlinegpt.org", 34 | "Connection": "keep-alive", 35 | "Sec-Fetch-Dest": "empty", 36 | "Sec-Fetch-Mode": "cors", 37 | "Sec-Fetch-Site": "same-origin", 38 | "TE": "trailers" 39 | } 40 | async with ClientSession(headers=headers) as session: 41 | data = { 42 | "botId": "default", 43 | "customId": None, 44 | "session": get_random_string(12), 45 | "chatId": get_random_string(), 46 | "contextId": 9, 47 | "messages": messages, 48 | "newMessage": messages[-1]["content"], 49 | "newImageId": None, 50 | "stream": True 51 | } 52 | async with session.post(f"{cls.url}/chatgpt/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response: 53 | response.raise_for_status() 54 | async for chunk in response.content: 55 | if chunk.startswith(b"data: "): 56 | data = json.loads(chunk[6:]) 57 | if data["type"] == "live": 58 | yield data["data"] -------------------------------------------------------------------------------- /g4f/Provider/GeminiProChat.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import time 4 | from hashlib import sha256 5 | from aiohttp import ClientSession 6 | 7 | from ..typing import AsyncResult, Messages 8 | from .base_provider import AsyncGeneratorProvider 9 | 10 | 11 | class GeminiProChat(AsyncGeneratorProvider): 12 | url = "https://geminiprochat.com" 13 | working = True 14 | supports_gpt_35_turbo = True 15 | 16 | @classmethod 17 | async def create_async_generator( 18 | cls, 19 | model: str, 20 | messages: Messages, 21 | proxy: str = None, 22 | **kwargs 23 | ) -> AsyncResult: 24 | headers = { 25 | "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0", 26 | "Accept": "*/*", 27 | "Accept-Language": "de,en-US;q=0.7,en;q=0.3", 28 | "Accept-Encoding": "gzip, deflate, br", 29 | "Content-Type": "text/plain;charset=UTF-8", 30 | "Referer": "https://geminiprochat.com/", 31 | "Origin": "https://geminiprochat.com", 32 | "Sec-Fetch-Dest": "empty", 33 | "Sec-Fetch-Mode": "cors", 34 | "Sec-Fetch-Site": "same-origin", 35 | "Connection": "keep-alive", 36 | "TE": "trailers", 37 | } 38 | async with ClientSession(headers=headers) as session: 39 | timestamp = int(time.time() * 1e3) 40 | data = { 41 | "messages":[{ 42 | "role": "model" if message["role"] == "assistant" else "user", 43 | "parts": [{"text": message["content"]}] 44 | } for message in messages], 45 | "time": timestamp, 46 | "pass": None, 47 | "sign": generate_signature(timestamp, messages[-1]["content"]), 48 | } 49 | async with session.post(f"{cls.url}/api/generate", json=data, proxy=proxy) as response: 50 | response.raise_for_status() 51 | async for chunk in response.content.iter_any(): 52 | yield chunk.decode() 53 | 54 | def generate_signature(time: int, text: str): 55 | message = f'{time}:{text}:9C4680FB-A4E1-6BC7-052A-7F68F9F5AD1F'; 56 | return sha256(message.encode()).hexdigest() 57 | -------------------------------------------------------------------------------- /g4f/Provider/deprecated/V50.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import uuid 4 | 5 | import requests 6 | 7 | from ...typing import Any, CreateResult 8 | from ..base_provider import AbstractProvider 9 | 10 | 11 | class V50(AbstractProvider): 12 | url = 'https://p5.v50.ltd' 13 | supports_gpt_35_turbo = True 14 | supports_stream = False 15 | needs_auth = False 16 | working = False 17 | 18 | @staticmethod 19 | def create_completion( 20 | model: str, 21 | messages: list[dict[str, str]], 22 | stream: bool, **kwargs: Any) -> CreateResult: 23 | 24 | conversation = ( 25 | "\n".join( 26 | f"{message['role']}: {message['content']}" for message in messages 27 | ) 28 | + "\nassistant: " 29 | ) 30 | payload = { 31 | "prompt" : conversation, 32 | "options" : {}, 33 | "systemMessage" : ".", 34 | "temperature" : kwargs.get("temperature", 0.4), 35 | "top_p" : kwargs.get("top_p", 0.4), 36 | "model" : model, 37 | "user" : str(uuid.uuid4()) 38 | } 39 | 40 | headers = { 41 | 'authority' : 'p5.v50.ltd', 42 | 'accept' : 'application/json, text/plain, */*', 43 | 'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7', 44 | 'content-type' : 'application/json', 45 | 'origin' : 'https://p5.v50.ltd', 46 | 'referer' : 'https://p5.v50.ltd/', 47 | 'sec-ch-ua-platform': '"Windows"', 48 | 'sec-fetch-dest' : 'empty', 49 | 'sec-fetch-mode' : 'cors', 50 | 'sec-fetch-site' : 'same-origin', 51 | 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36' 52 | } 53 | response = requests.post( 54 | "https://p5.v50.ltd/api/chat-process", 55 | json=payload, 56 | headers=headers, 57 | proxies=kwargs.get('proxy', {}), 58 | ) 59 | 60 | if "https://fk1.v50.ltd" not in response.text: 61 | yield response.text -------------------------------------------------------------------------------- /g4f/Provider/Aura.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from aiohttp import ClientSession 3 | from ..typing import AsyncResult, Messages 4 | from .base_provider import AsyncGeneratorProvider 5 | 6 | class Aura(AsyncGeneratorProvider): 7 | url = "https://openchat.team" 8 | working = True 9 | supports_gpt_35_turbo = True 10 | 11 | @classmethod 12 | async def create_async_generator( 13 | cls, 14 | model: str, 15 | messages: Messages, 16 | proxy: str = None, 17 | **kwargs 18 | ) -> AsyncResult: 19 | headers = { 20 | "Accept": "*/*", 21 | "Accept-Encoding": "gzip, deflate, br", 22 | "Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8", 23 | "Content-Type": "application/json", 24 | "Origin": f"{cls.url}", 25 | "Referer": f"{cls.url}/", 26 | "Sec-Ch-Ua": '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"', 27 | "Sec-Ch-Ua-Mobile": "?0", 28 | "Sec-Ch-Ua-Platform": '"Linux"', 29 | "Sec-Fetch-Dest": "empty", 30 | "Sec-Fetch-Mode": "cors", 31 | "Sec-Fetch-Site": "same-origin", 32 | "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", 33 | } 34 | async with ClientSession(headers=headers) as session: 35 | new_messages = [] 36 | system_message = [] 37 | for message in messages: 38 | if message["role"] == "system": 39 | system_message.append(message["content"]) 40 | else: 41 | new_messages.append(message) 42 | data = { 43 | "model": { 44 | "id": "openchat_v3.2_mistral", 45 | "name": "OpenChat Aura", 46 | "maxLength": 24576, 47 | "tokenLimit": 8192 48 | }, 49 | "messages": new_messages, 50 | "key": "", 51 | "prompt": "\n".join(system_message), 52 | "temperature": 0.5 53 | } 54 | async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response: 55 | async for chunk in response.content.iter_any(): 56 | yield chunk.decode() -------------------------------------------------------------------------------- /g4f/Provider/Bestim.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from ..typing import Messages 4 | from .base_provider import BaseProvider, CreateResult 5 | from ..requests import get_session_from_browser 6 | from uuid import uuid4 7 | import requests 8 | 9 | class Bestim(BaseProvider): 10 | url = "https://chatgpt.bestim.org" 11 | supports_gpt_35_turbo = True 12 | supports_message_history = True 13 | working = False 14 | supports_stream = True 15 | 16 | @classmethod 17 | def create_completion( 18 | cls, 19 | model: str, 20 | messages: Messages, 21 | stream: bool, 22 | proxy: str = None, 23 | **kwargs 24 | ) -> CreateResult: 25 | session = get_session_from_browser(cls.url, proxy=proxy) 26 | headers = { 27 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:121.0) Gecko/20100101 Firefox/121.0', 28 | 'Accept': 'application/json, text/event-stream', 29 | 'Accept-Language': 'en-US,en;q=0.5', 30 | 'Accept-Encoding': 'gzip, deflate, br', 31 | 'Referer': 'https://chatgpt.bestim.org/chat/', 32 | 'Origin': 'https://chatgpt.bestim.org', 33 | 'Alt-Used': 'chatgpt.bestim.org', 34 | 'Connection': 'keep-alive', 35 | 'Sec-Fetch-Dest': 'empty', 36 | 'Sec-Fetch-Mode': 'cors', 37 | 'Sec-Fetch-Site': 'same-origin', 38 | 'TE': 'trailers' 39 | } 40 | data = { 41 | "messagesHistory": [{ 42 | "id": str(uuid4()), 43 | "content": m["content"], 44 | "from": "you" if m["role"] == "user" else "bot" 45 | } for m in messages], 46 | "type": "chat", 47 | } 48 | response = session.post( 49 | url="https://chatgpt.bestim.org/chat/send2/", 50 | headers=headers, 51 | json=data, 52 | proxies={"https": proxy}, 53 | stream=True 54 | ) 55 | response.raise_for_status() 56 | for line in response.iter_lines(): 57 | if not line.startswith(b"event: trylimit"): 58 | yield line.decode().removeprefix("data: ") 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | -------------------------------------------------------------------------------- /g4f/Provider/AiChatOnline.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import json 4 | from aiohttp import ClientSession 5 | 6 | from ..typing import AsyncResult, Messages 7 | from .base_provider import AsyncGeneratorProvider 8 | from .helper import get_random_string 9 | 10 | class AiChatOnline(AsyncGeneratorProvider): 11 | url = "https://aichatonline.org" 12 | working = True 13 | supports_gpt_35_turbo = True 14 | supports_message_history = False 15 | 16 | @classmethod 17 | async def create_async_generator( 18 | cls, 19 | model: str, 20 | messages: Messages, 21 | proxy: str = None, 22 | **kwargs 23 | ) -> AsyncResult: 24 | headers = { 25 | "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0", 26 | "Accept": "text/event-stream", 27 | "Accept-Language": "de,en-US;q=0.7,en;q=0.3", 28 | "Accept-Encoding": "gzip, deflate, br", 29 | "Referer": f"{cls.url}/chatgpt/chat/", 30 | "Content-Type": "application/json", 31 | "Origin": cls.url, 32 | "Alt-Used": "aichatonline.org", 33 | "Connection": "keep-alive", 34 | "Sec-Fetch-Dest": "empty", 35 | "Sec-Fetch-Mode": "cors", 36 | "Sec-Fetch-Site": "same-origin", 37 | "TE": "trailers" 38 | } 39 | async with ClientSession(headers=headers) as session: 40 | data = { 41 | "botId": "default", 42 | "customId": None, 43 | "session": get_random_string(16), 44 | "chatId": get_random_string(), 45 | "contextId": 7, 46 | "messages": messages, 47 | "newMessage": messages[-1]["content"], 48 | "newImageId": None, 49 | "stream": True 50 | } 51 | async with session.post(f"{cls.url}/chatgpt/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response: 52 | response.raise_for_status() 53 | async for chunk in response.content: 54 | if chunk.startswith(b"data: "): 55 | data = json.loads(chunk[6:]) 56 | if data["type"] == "live": 57 | yield data["data"] 58 | elif data["type"] == "end": 59 | break -------------------------------------------------------------------------------- /g4f/Provider/Gpt6.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import json 4 | from aiohttp import ClientSession 5 | 6 | from ..typing import AsyncResult, Messages 7 | from .base_provider import AsyncGeneratorProvider 8 | from .helper import format_prompt 9 | 10 | 11 | class Gpt6(AsyncGeneratorProvider): 12 | url = "https://gpt6.ai" 13 | working = True 14 | supports_gpt_35_turbo = True 15 | 16 | @classmethod 17 | async def create_async_generator( 18 | cls, 19 | model: str, 20 | messages: Messages, 21 | proxy: str = None, 22 | **kwargs 23 | ) -> AsyncResult: 24 | headers = { 25 | "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0", 26 | "Accept": "*/*", 27 | "Accept-Language": "de,en-US;q=0.7,en;q=0.3", 28 | "Accept-Encoding": "gzip, deflate, br", 29 | "Content-Type": "application/json", 30 | "Origin": "https://gpt6.ai", 31 | "Connection": "keep-alive", 32 | "Referer": "https://gpt6.ai/", 33 | "Sec-Fetch-Dest": "empty", 34 | "Sec-Fetch-Mode": "cors", 35 | "Sec-Fetch-Site": "cross-site", 36 | "TE": "trailers", 37 | } 38 | async with ClientSession(headers=headers) as session: 39 | data = { 40 | "prompts":messages, 41 | "geoInfo":{"ip":"100.90.100.222","hostname":"ip-100-090-100-222.um36.pools.vodafone-ip.de","city":"Muenchen","region":"North Rhine-Westphalia","country":"DE","loc":"44.0910,5.5827","org":"AS3209 Vodafone GmbH","postal":"41507","timezone":"Europe/Berlin"}, 42 | "paid":False, 43 | "character":{"textContent":"","id":"52690ad6-22e4-4674-93d4-1784721e9944","name":"GPT6","htmlContent":""} 44 | } 45 | async with session.post(f"https://seahorse-app-d29hu.ondigitalocean.app/api/v1/query", json=data, proxy=proxy) as response: 46 | response.raise_for_status() 47 | async for line in response.content: 48 | if line.startswith(b"data: [DONE]"): 49 | break 50 | elif line.startswith(b"data: "): 51 | line = json.loads(line[6:-1]) 52 | 53 | chunk = line["choices"][0]["delta"].get("content") 54 | if chunk: 55 | yield chunk -------------------------------------------------------------------------------- /g4f/Provider/Chatxyz.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import json 4 | from aiohttp import ClientSession 5 | 6 | from ..typing import AsyncResult, Messages 7 | from .base_provider import AsyncGeneratorProvider 8 | 9 | class Chatxyz(AsyncGeneratorProvider): 10 | url = "https://chat.3211000.xyz" 11 | working = True 12 | supports_gpt_35_turbo = True 13 | supports_message_history = True 14 | 15 | @classmethod 16 | async def create_async_generator( 17 | cls, 18 | model: str, 19 | messages: Messages, 20 | proxy: str = None, 21 | **kwargs 22 | ) -> AsyncResult: 23 | headers = { 24 | 'Accept': 'text/event-stream', 25 | 'Accept-Encoding': 'gzip, deflate, br', 26 | 'Accept-Language': 'en-US,en;q=0.5', 27 | 'Alt-Used': 'chat.3211000.xyz', 28 | 'Content-Type': 'application/json', 29 | 'Host': 'chat.3211000.xyz', 30 | 'Origin': 'https://chat.3211000.xyz', 31 | 'Referer': 'https://chat.3211000.xyz/', 32 | 'Sec-Fetch-Dest': 'empty', 33 | 'Sec-Fetch-Mode': 'cors', 34 | 'Sec-Fetch-Site': 'same-origin', 35 | 'TE': 'trailers', 36 | 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:121.0) Gecko/20100101 Firefox/121.0', 37 | 'x-requested-with': 'XMLHttpRequest' 38 | } 39 | async with ClientSession(headers=headers) as session: 40 | data = { 41 | "messages": messages, 42 | "stream": True, 43 | "model": "gpt-3.5-turbo", 44 | "temperature": 0.5, 45 | "presence_penalty": 0, 46 | "frequency_penalty": 0, 47 | "top_p": 1, 48 | **kwargs 49 | } 50 | async with session.post(f'{cls.url}/api/openai/v1/chat/completions', json=data, proxy=proxy) as response: 51 | response.raise_for_status() 52 | async for chunk in response.content: 53 | line = chunk.decode() 54 | if line.startswith("data: [DONE]"): 55 | break 56 | elif line.startswith("data: "): 57 | line = json.loads(line[6:]) 58 | chunk = line["choices"][0]["delta"].get("content") 59 | if(chunk): 60 | yield chunk -------------------------------------------------------------------------------- /g4f/Provider/ChatgptNext.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import json 4 | from aiohttp import ClientSession 5 | 6 | from ..typing import AsyncResult, Messages 7 | from .base_provider import AsyncGeneratorProvider 8 | from .helper import format_prompt 9 | 10 | 11 | class ChatgptNext(AsyncGeneratorProvider): 12 | url = "https://www.chatgpt-free.cc" 13 | working = True 14 | supports_gpt_35_turbo = True 15 | 16 | @classmethod 17 | async def create_async_generator( 18 | cls, 19 | model: str, 20 | messages: Messages, 21 | proxy: str = None, 22 | **kwargs 23 | ) -> AsyncResult: 24 | if not model: 25 | model = "gpt-3.5-turbo" 26 | headers = { 27 | "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0", 28 | "Accept": "text/event-stream", 29 | "Accept-Language": "de,en-US;q=0.7,en;q=0.3", 30 | "Accept-Encoding": "gzip, deflate, br", 31 | "Content-Type": "application/json", 32 | "Referer": "https://chat.fstha.com/", 33 | "x-requested-with": "XMLHttpRequest", 34 | "Origin": "https://chat.fstha.com", 35 | "Sec-Fetch-Dest": "empty", 36 | "Sec-Fetch-Mode": "cors", 37 | "Sec-Fetch-Site": "same-origin", 38 | "Authorization": "Bearer ak-chatgpt-nice", 39 | "Connection": "keep-alive", 40 | "Alt-Used": "chat.fstha.com", 41 | } 42 | async with ClientSession(headers=headers) as session: 43 | data = { 44 | "messages": messages, 45 | "stream": True, 46 | "model": model, 47 | "temperature": 0.5, 48 | "presence_penalty": 0, 49 | "frequency_penalty": 0, 50 | "top_p": 1, 51 | **kwargs 52 | } 53 | async with session.post(f"https://chat.fstha.com/api/openai/v1/chat/completions", json=data, proxy=proxy) as response: 54 | response.raise_for_status() 55 | async for chunk in response.content: 56 | if chunk.startswith(b"data: [DONE]"): 57 | break 58 | if chunk.startswith(b"data: "): 59 | content = json.loads(chunk[6:])["choices"][0]["delta"].get("content") 60 | if content: 61 | yield content -------------------------------------------------------------------------------- /g4f/Provider/GptGo.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from aiohttp import ClientSession 4 | import json 5 | import base64 6 | 7 | from ..typing import AsyncResult, Messages 8 | from .base_provider import AsyncGeneratorProvider, format_prompt 9 | 10 | 11 | class GptGo(AsyncGeneratorProvider): 12 | url = "https://gptgo.ai" 13 | supports_gpt_35_turbo = True 14 | working = True 15 | 16 | @classmethod 17 | async def create_async_generator( 18 | cls, 19 | model: str, 20 | messages: Messages, 21 | proxy: str = None, 22 | **kwargs 23 | ) -> AsyncResult: 24 | headers = { 25 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", 26 | "Accept": "*/*", 27 | "Accept-language": "en-US", 28 | "Origin": cls.url, 29 | "Referer": f"{cls.url}/", 30 | "sec-ch-ua": '"Google Chrome";v="116", "Chromium";v="116", "Not?A_Brand";v="24"', 31 | "sec-ch-ua-mobile": "?0", 32 | "sec-ch-ua-platform": '"Windows"', 33 | "Sec-Fetch-Dest": "empty", 34 | "Sec-Fetch-Mode": "cors", 35 | "Sec-Fetch-Site": "same-origin", 36 | } 37 | async with ClientSession( 38 | headers=headers 39 | ) as session: 40 | async with session.post( 41 | "https://gptgo.ai/get_token.php", 42 | data={"ask": format_prompt(messages)}, 43 | proxy=proxy 44 | ) as response: 45 | response.raise_for_status() 46 | token = await response.text(); 47 | token = base64.b64decode(token[10:-20]).decode() 48 | 49 | async with session.get( 50 | "https://api.gptgo.ai/web.php", 51 | params={"array_chat": token}, 52 | proxy=proxy 53 | ) as response: 54 | response.raise_for_status() 55 | async for line in response.content: 56 | if line.startswith(b"data: [DONE]"): 57 | break 58 | if line.startswith(b"data: "): 59 | line = json.loads(line[6:]) 60 | content = line["choices"][0]["delta"].get("content") 61 | if content and content != "\n#GPTGO ": 62 | yield content 63 | -------------------------------------------------------------------------------- /g4f/Provider/Opchatgpts.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import random, string, json 4 | from aiohttp import ClientSession 5 | 6 | from ..typing import Messages, AsyncResult 7 | from .base_provider import AsyncGeneratorProvider 8 | from .helper import get_random_string 9 | 10 | class Opchatgpts(AsyncGeneratorProvider): 11 | url = "https://opchatgpts.net" 12 | working = False 13 | supports_message_history = True 14 | supports_gpt_35_turbo = True 15 | 16 | @classmethod 17 | async def create_async_generator( 18 | cls, 19 | model: str, 20 | messages: Messages, 21 | proxy: str = None, **kwargs) -> AsyncResult: 22 | 23 | headers = { 24 | "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", 25 | "Accept" : "*/*", 26 | "Accept-Language" : "de,en-US;q=0.7,en;q=0.3", 27 | "Origin" : cls.url, 28 | "Alt-Used" : "opchatgpts.net", 29 | "Referer" : f"{cls.url}/chatgpt-free-use/", 30 | "Sec-Fetch-Dest" : "empty", 31 | "Sec-Fetch-Mode" : "cors", 32 | "Sec-Fetch-Site" : "same-origin", 33 | } 34 | async with ClientSession( 35 | headers=headers 36 | ) as session: 37 | data = { 38 | "botId": "default", 39 | "chatId": get_random_string(), 40 | "contextId": 28, 41 | "customId": None, 42 | "messages": messages, 43 | "newMessage": messages[-1]["content"], 44 | "session": "N/A", 45 | "stream": True 46 | } 47 | async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response: 48 | response.raise_for_status() 49 | async for line in response.content: 50 | if line.startswith(b"data: "): 51 | try: 52 | line = json.loads(line[6:]) 53 | assert "type" in line 54 | except: 55 | raise RuntimeError(f"Broken line: {line.decode()}") 56 | if line["type"] == "live": 57 | yield line["data"] 58 | elif line["type"] == "end": 59 | break -------------------------------------------------------------------------------- /g4f/Provider/Koala.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import json 4 | from aiohttp import ClientSession 5 | 6 | from ..typing import AsyncResult, Messages 7 | from .base_provider import AsyncGeneratorProvider 8 | from .helper import get_random_string 9 | 10 | class Koala(AsyncGeneratorProvider): 11 | url = "https://koala.sh" 12 | supports_gpt_35_turbo = True 13 | supports_message_history = True 14 | working = True 15 | 16 | @classmethod 17 | async def create_async_generator( 18 | cls, 19 | model: str, 20 | messages: Messages, 21 | proxy: str = None, 22 | **kwargs 23 | ) -> AsyncResult: 24 | if not model: 25 | model = "gpt-3.5-turbo" 26 | headers = { 27 | "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0", 28 | "Accept": "text/event-stream", 29 | "Accept-Language": "de,en-US;q=0.7,en;q=0.3", 30 | "Accept-Encoding": "gzip, deflate, br", 31 | "Referer": f"{cls.url}/chat", 32 | "Content-Type": "application/json", 33 | "Flag-Real-Time-Data": "false", 34 | "Visitor-ID": get_random_string(20), 35 | "Origin": cls.url, 36 | "Alt-Used": "koala.sh", 37 | "Connection": "keep-alive", 38 | "Sec-Fetch-Dest": "empty", 39 | "Sec-Fetch-Mode": "cors", 40 | "Sec-Fetch-Site": "same-origin", 41 | "Pragma": "no-cache", 42 | "Cache-Control": "no-cache", 43 | "TE": "trailers", 44 | } 45 | async with ClientSession(headers=headers) as session: 46 | data = { 47 | "input": messages[-1]["content"], 48 | "inputHistory": [ 49 | message["content"] 50 | for message in messages 51 | if message["role"] == "user" 52 | ], 53 | "outputHistory": [ 54 | message["content"] 55 | for message in messages 56 | if message["role"] == "assistant" 57 | ], 58 | "model": model, 59 | } 60 | async with session.post(f"{cls.url}/api/gpt/", json=data, proxy=proxy) as response: 61 | response.raise_for_status() 62 | async for chunk in response.content: 63 | if chunk.startswith(b"data: "): 64 | yield json.loads(chunk[6:]) -------------------------------------------------------------------------------- /g4f/Provider/unfinished/AiChatting.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from urllib.parse import unquote 4 | 5 | from ...typing import AsyncResult, Messages 6 | from ..base_provider import AbstractProvider 7 | from ...webdriver import WebDriver 8 | from ...requests import Session, get_session_from_browser 9 | 10 | class AiChatting(AbstractProvider): 11 | url = "https://www.aichatting.net" 12 | supports_gpt_35_turbo = True 13 | _session: Session = None 14 | 15 | @classmethod 16 | def create_completion( 17 | cls, 18 | model: str, 19 | messages: Messages, 20 | stream: bool, 21 | proxy: str = None, 22 | timeout: int = 120, 23 | webdriver: WebDriver = None, 24 | **kwargs 25 | ) -> AsyncResult: 26 | if not cls._session: 27 | cls._session = get_session_from_browser(cls.url, webdriver, proxy, timeout) 28 | visitorId = unquote(cls._session.cookies.get("aichatting.website.visitorId")) 29 | 30 | headers = { 31 | "accept": "application/json, text/plain, */*", 32 | "lang": "en", 33 | "source": "web" 34 | } 35 | data = { 36 | "roleId": 0, 37 | } 38 | try: 39 | response = cls._session.post("https://aga-api.aichatting.net/aigc/chat/record/conversation/create", json=data, headers=headers) 40 | response.raise_for_status() 41 | conversation_id = response.json()["data"]["conversationId"] 42 | except Exception as e: 43 | cls.reset() 44 | raise e 45 | headers = { 46 | "authority": "aga-api.aichatting.net", 47 | "accept": "text/event-stream,application/json, text/event-stream", 48 | "lang": "en", 49 | "source": "web", 50 | "vtoken": visitorId, 51 | } 52 | data = { 53 | "spaceHandle": True, 54 | "roleId": 0, 55 | "messages": messages, 56 | "conversationId": conversation_id, 57 | } 58 | response = cls._session.post("https://aga-api.aichatting.net/aigc/chat/v2/stream", json=data, headers=headers, stream=True) 59 | response.raise_for_status() 60 | for chunk in response.iter_lines(): 61 | if chunk.startswith(b"data:"): 62 | yield chunk[5:].decode().replace("-=- --", " ").replace("-=-n--", "\n").replace("--@DONE@--", "") 63 | 64 | @classmethod 65 | def reset(cls): 66 | cls._session = None -------------------------------------------------------------------------------- /g4f/Provider/ChatForAi.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import time 4 | import hashlib 5 | 6 | from ..typing import AsyncResult, Messages 7 | from ..requests import StreamSession 8 | from .base_provider import AsyncGeneratorProvider 9 | 10 | 11 | class ChatForAi(AsyncGeneratorProvider): 12 | url = "https://chatforai.store" 13 | working = True 14 | supports_message_history = True 15 | supports_gpt_35_turbo = True 16 | 17 | @classmethod 18 | async def create_async_generator( 19 | cls, 20 | model: str, 21 | messages: Messages, 22 | proxy: str = None, 23 | timeout: int = 120, 24 | **kwargs 25 | ) -> AsyncResult: 26 | headers = { 27 | "Content-Type": "text/plain;charset=UTF-8", 28 | "Origin": cls.url, 29 | "Referer": f"{cls.url}/?r=b", 30 | } 31 | async with StreamSession(impersonate="chrome107", headers=headers, proxies={"https": proxy}, timeout=timeout) as session: 32 | prompt = messages[-1]["content"] 33 | timestamp = int(time.time() * 1e3) 34 | conversation_id = f"id_{timestamp-123}" 35 | data = { 36 | "conversationId": conversation_id, 37 | "conversationType": "chat_continuous", 38 | "botId": "chat_continuous", 39 | "globalSettings":{ 40 | "baseUrl": "https://api.openai.com", 41 | "model": model if model else "gpt-3.5-turbo", 42 | "messageHistorySize": 5, 43 | "temperature": 0.7, 44 | "top_p": 1, 45 | **kwargs 46 | }, 47 | "botSettings": {}, 48 | "prompt": prompt, 49 | "messages": messages, 50 | "timestamp": timestamp, 51 | "sign": generate_signature(timestamp, prompt, conversation_id) 52 | } 53 | async with session.post(f"{cls.url}/api/handle/provider-openai", json=data) as response: 54 | response.raise_for_status() 55 | async for chunk in response.iter_content(): 56 | if b"https://chatforai.store" in chunk: 57 | raise RuntimeError(f"Response: {chunk.decode()}") 58 | yield chunk.decode() 59 | 60 | 61 | def generate_signature(timestamp: int, message: str, id: str): 62 | buffer = f"{timestamp}:{id}:{message}:7YN8z6d6" 63 | return hashlib.sha256(buffer.encode()).hexdigest() 64 | -------------------------------------------------------------------------------- /g4f/Provider/MyShell.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import time, json 4 | 5 | from ..typing import CreateResult, Messages 6 | from .base_provider import AbstractProvider 7 | from .helper import format_prompt 8 | from ..webdriver import WebDriver, WebDriverSession, bypass_cloudflare 9 | 10 | class MyShell(AbstractProvider): 11 | url = "https://app.myshell.ai/chat" 12 | working = True 13 | supports_gpt_35_turbo = True 14 | supports_stream = True 15 | 16 | @classmethod 17 | def create_completion( 18 | cls, 19 | model: str, 20 | messages: Messages, 21 | stream: bool, 22 | proxy: str = None, 23 | timeout: int = 120, 24 | webdriver: WebDriver = None, 25 | **kwargs 26 | ) -> CreateResult: 27 | with WebDriverSession(webdriver, "", proxy=proxy) as driver: 28 | bypass_cloudflare(driver, cls.url, timeout) 29 | 30 | # Send request with message 31 | data = { 32 | "botId": "4738", 33 | "conversation_scenario": 3, 34 | "message": format_prompt(messages), 35 | "messageType": 1 36 | } 37 | script = """ 38 | response = await fetch("https://api.myshell.ai/v1/bot/chat/send_message", { 39 | "headers": { 40 | "accept": "application/json", 41 | "content-type": "application/json", 42 | "myshell-service-name": "organics-api", 43 | "visitor-id": localStorage.getItem("mix_visitorId") 44 | }, 45 | "body": '{body}', 46 | "method": "POST" 47 | }) 48 | window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader(); 49 | """ 50 | driver.execute_script(script.replace("{body}", json.dumps(data))) 51 | script = """ 52 | chunk = await window._reader.read(); 53 | if (chunk.done) { 54 | return null; 55 | } 56 | content = ''; 57 | chunk.value.split('\\n').forEach((line, index) => { 58 | if (line.startsWith('data: ')) { 59 | try { 60 | const data = JSON.parse(line.substring('data: '.length)); 61 | if ('content' in data) { 62 | content += data['content']; 63 | } 64 | } catch(e) {} 65 | } 66 | }); 67 | return content; 68 | """ 69 | while True: 70 | chunk = driver.execute_script(script) 71 | if chunk: 72 | yield chunk 73 | elif chunk != "": 74 | break 75 | else: 76 | time.sleep(0.1) -------------------------------------------------------------------------------- /client/css/hljs.css: -------------------------------------------------------------------------------- 1 | .hljs { 2 | color: #4b45be; 3 | word-wrap: break-word; 4 | white-space: pre-wrap; 5 | } 6 | 7 | .theme-dark .hljs { 8 | color: #949494; 9 | } 10 | 11 | /* style for hljs copy */ 12 | .hljs-copy-wrapper { 13 | position: relative; 14 | overflow-x: clip; 15 | } 16 | 17 | .hljs-copy-wrapper:hover .hljs-copy-button, 18 | .hljs-copy-button:focus { 19 | transform: translateX(0); 20 | } 21 | 22 | .hljs-copy-button { 23 | position: absolute; 24 | transform: translateX(calc(100% + 1.125em)); 25 | top: 0.2em; 26 | right: 1em; 27 | width: 2em; 28 | height: 2em; 29 | text-indent: -9999px; 30 | color: #fff; 31 | border-radius: 0.25rem; 32 | border: 1px solid #ffffff22; 33 | background-color: #2d2b57; 34 | background-image: url('data:image/svg+xml;utf-8,'); 35 | background-repeat: no-repeat; 36 | background-position: center; 37 | transition: background-color 200ms ease, transform 200ms ease-out; 38 | } 39 | 40 | .theme-dark .hljs-copy-button { 41 | background-color: #949494; 42 | } 43 | 44 | .hljs-copy-button:hover { 45 | border-color: #ffffff44; 46 | cursor: pointer; 47 | } 48 | 49 | .hljs-copy-button:active { 50 | border-color: #ffffff66; 51 | } 52 | 53 | .hljs-copy-button[data-copied="true"] { 54 | text-indent: 0; 55 | width: auto; 56 | background-image: none; 57 | } 58 | 59 | .hljs-copy-alert { 60 | clip: rect(0 0 0 0); 61 | clip-path: inset(50%); 62 | height: 1px; 63 | overflow: hidden; 64 | position: absolute; 65 | white-space: nowrap; 66 | width: 1px; 67 | } 68 | 69 | @media (prefers-reduced-motion) { 70 | .hljs-copy-button { 71 | transition: none; 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /g4f/Provider/unfinished/ChatAiGpt.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import re 4 | from aiohttp import ClientSession 5 | 6 | from ...typing import AsyncResult, Messages 7 | from ..base_provider import AsyncGeneratorProvider 8 | from ..helper import format_prompt 9 | 10 | 11 | class ChatAiGpt(AsyncGeneratorProvider): 12 | url = "https://chataigpt.org" 13 | supports_gpt_35_turbo = True 14 | _nonce = None 15 | _post_id = None 16 | 17 | @classmethod 18 | async def create_async_generator( 19 | cls, 20 | model: str, 21 | messages: Messages, 22 | proxy: str = None, 23 | **kwargs 24 | ) -> AsyncResult: 25 | headers = { 26 | "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0", 27 | "Accept": "*/*", 28 | "Accept-Language": "de,en-US;q=0.7,en;q=0.3", 29 | "Accept-Encoding": "gzip, deflate, br", 30 | "Origin": cls.url, 31 | "Alt-Used": cls.url, 32 | "Connection": "keep-alive", 33 | "Referer": cls.url, 34 | "Pragma": "no-cache", 35 | "Cache-Control": "no-cache", 36 | "TE": "trailers", 37 | "Sec-Fetch-Dest": "empty", 38 | "Sec-Fetch-Mode": "cors", 39 | "Sec-Fetch-Site": "same-origin", 40 | } 41 | async with ClientSession(headers=headers) as session: 42 | if not cls._nonce: 43 | async with session.get(f"{cls.url}/", proxy=proxy) as response: 44 | response.raise_for_status() 45 | response = await response.text() 46 | 47 | result = re.search( 48 | r'data-nonce=(.*?) data-post-id=([0-9]+)', response 49 | ) 50 | 51 | if result: 52 | cls._nonce, cls._post_id = result.group(1), result.group(2) 53 | else: 54 | raise RuntimeError("No nonce found") 55 | prompt = format_prompt(messages) 56 | data = { 57 | "_wpnonce": cls._nonce, 58 | "post_id": cls._post_id, 59 | "url": cls.url, 60 | "action": "wpaicg_chat_shortcode_message", 61 | "message": prompt, 62 | "bot_id": 0 63 | } 64 | async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, proxy=proxy) as response: 65 | response.raise_for_status() 66 | async for chunk in response.content: 67 | if chunk: 68 | yield chunk.decode() -------------------------------------------------------------------------------- /g4f/Provider/deprecated/Wewordle.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import random, string, time 4 | from aiohttp import ClientSession 5 | 6 | from ..base_provider import AsyncProvider 7 | 8 | 9 | class Wewordle(AsyncProvider): 10 | url = "https://wewordle.org" 11 | working = False 12 | supports_gpt_35_turbo = True 13 | 14 | @classmethod 15 | async def create_async( 16 | cls, 17 | model: str, 18 | messages: list[dict[str, str]], 19 | proxy: str = None, 20 | **kwargs 21 | ) -> str: 22 | 23 | headers = { 24 | "accept" : "*/*", 25 | "pragma" : "no-cache", 26 | "Content-Type" : "application/json", 27 | "Connection" : "keep-alive" 28 | } 29 | 30 | _user_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=16)) 31 | _app_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=31)) 32 | _request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime()) 33 | data = { 34 | "user" : _user_id, 35 | "messages" : messages, 36 | "subscriber": { 37 | "originalPurchaseDate" : None, 38 | "originalApplicationVersion" : None, 39 | "allPurchaseDatesMillis" : {}, 40 | "entitlements" : {"active": {}, "all": {}}, 41 | "allPurchaseDates" : {}, 42 | "allExpirationDatesMillis" : {}, 43 | "allExpirationDates" : {}, 44 | "originalAppUserId" : f"$RCAnonymousID:{_app_id}", 45 | "latestExpirationDate" : None, 46 | "requestDate" : _request_date, 47 | "latestExpirationDateMillis" : None, 48 | "nonSubscriptionTransactions" : [], 49 | "originalPurchaseDateMillis" : None, 50 | "managementURL" : None, 51 | "allPurchasedProductIdentifiers": [], 52 | "firstSeen" : _request_date, 53 | "activeSubscriptions" : [], 54 | } 55 | } 56 | 57 | 58 | async with ClientSession( 59 | headers=headers 60 | ) as session: 61 | async with session.post(f"{cls.url}/gptapi/v1/android/turbo", proxy=proxy, json=data) as response: 62 | response.raise_for_status() 63 | content = (await response.json())["message"]["content"] 64 | if content: 65 | return content -------------------------------------------------------------------------------- /g4f/Provider/deprecated/DfeHub.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import json 4 | import re 5 | import time 6 | 7 | import requests 8 | 9 | from ...typing import Any, CreateResult 10 | from ..base_provider import AbstractProvider 11 | 12 | 13 | class DfeHub(AbstractProvider): 14 | url = "https://chat.dfehub.com/" 15 | supports_stream = True 16 | supports_gpt_35_turbo = True 17 | 18 | @staticmethod 19 | def create_completion( 20 | model: str, 21 | messages: list[dict[str, str]], 22 | stream: bool, **kwargs: Any) -> CreateResult: 23 | 24 | headers = { 25 | "authority" : "chat.dfehub.com", 26 | "accept" : "*/*", 27 | "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", 28 | "content-type" : "application/json", 29 | "origin" : "https://chat.dfehub.com", 30 | "referer" : "https://chat.dfehub.com/", 31 | "sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', 32 | "sec-ch-ua-mobile" : "?0", 33 | "sec-ch-ua-platform": '"macOS"', 34 | "sec-fetch-dest" : "empty", 35 | "sec-fetch-mode" : "cors", 36 | "sec-fetch-site" : "same-origin", 37 | "user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", 38 | "x-requested-with" : "XMLHttpRequest", 39 | } 40 | 41 | json_data = { 42 | "messages" : messages, 43 | "model" : "gpt-3.5-turbo", 44 | "temperature" : kwargs.get("temperature", 0.5), 45 | "presence_penalty" : kwargs.get("presence_penalty", 0), 46 | "frequency_penalty" : kwargs.get("frequency_penalty", 0), 47 | "top_p" : kwargs.get("top_p", 1), 48 | "stream" : True 49 | } 50 | 51 | response = requests.post("https://chat.dfehub.com/api/openai/v1/chat/completions", 52 | headers=headers, json=json_data, timeout=3) 53 | 54 | for chunk in response.iter_lines(): 55 | if b"detail" in chunk: 56 | delay = re.findall(r"\d+\.\d+", chunk.decode()) 57 | delay = float(delay[-1]) 58 | time.sleep(delay) 59 | yield from DfeHub.create_completion(model, messages, stream, **kwargs) 60 | if b"content" in chunk: 61 | data = json.loads(chunk.decode().split("data: ")[1]) 62 | yield (data["choices"][0]["delta"]["content"]) 63 | -------------------------------------------------------------------------------- /g4f/Provider/deprecated/NoowAi.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import json 4 | from aiohttp import ClientSession 5 | 6 | from ..typing import AsyncResult, Messages 7 | from .base_provider import AsyncGeneratorProvider 8 | from .helper import get_random_string 9 | 10 | class NoowAi(AsyncGeneratorProvider): 11 | url = "https://noowai.com" 12 | supports_message_history = True 13 | supports_gpt_35_turbo = True 14 | working = False 15 | 16 | @classmethod 17 | async def create_async_generator( 18 | cls, 19 | model: str, 20 | messages: Messages, 21 | proxy: str = None, 22 | **kwargs 23 | ) -> AsyncResult: 24 | headers = { 25 | "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0", 26 | "Accept": "*/*", 27 | "Accept-Language": "de,en-US;q=0.7,en;q=0.3", 28 | "Accept-Encoding": "gzip, deflate, br", 29 | "Referer": f"{cls.url}/", 30 | "Content-Type": "application/json", 31 | "Origin": cls.url, 32 | "Alt-Used": "noowai.com", 33 | "Connection": "keep-alive", 34 | "Sec-Fetch-Dest": "empty", 35 | "Sec-Fetch-Mode": "cors", 36 | "Sec-Fetch-Site": "same-origin", 37 | "Pragma": "no-cache", 38 | "Cache-Control": "no-cache", 39 | "TE": "trailers" 40 | } 41 | async with ClientSession(headers=headers) as session: 42 | data = { 43 | "botId": "default", 44 | "customId": "d49bc3670c3d858458576d75c8ea0f5d", 45 | "session": "N/A", 46 | "chatId": get_random_string(), 47 | "contextId": 25, 48 | "messages": messages, 49 | "newMessage": messages[-1]["content"], 50 | "stream": True 51 | } 52 | async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response: 53 | response.raise_for_status() 54 | async for line in response.content: 55 | if line.startswith(b"data: "): 56 | try: 57 | line = json.loads(line[6:]) 58 | assert "type" in line 59 | except: 60 | raise RuntimeError(f"Broken line: {line.decode()}") 61 | if line["type"] == "live": 62 | yield line["data"] 63 | elif line["type"] == "end": 64 | break 65 | elif line["type"] == "error": 66 | raise RuntimeError(line["data"]) -------------------------------------------------------------------------------- /g4f/Provider/deprecated/GetGpt.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import json 4 | import os 5 | import uuid 6 | 7 | import requests 8 | try: 9 | from Crypto.Cipher import AES 10 | except ImportError: 11 | from Cryptodome.Cipher import AES 12 | 13 | from ...typing import Any, CreateResult 14 | from ..base_provider import AbstractProvider 15 | 16 | 17 | class GetGpt(AbstractProvider): 18 | url = 'https://chat.getgpt.world/' 19 | supports_stream = True 20 | working = False 21 | supports_gpt_35_turbo = True 22 | 23 | @staticmethod 24 | def create_completion( 25 | model: str, 26 | messages: list[dict[str, str]], 27 | stream: bool, **kwargs: Any) -> CreateResult: 28 | 29 | headers = { 30 | 'Content-Type' : 'application/json', 31 | 'Referer' : 'https://chat.getgpt.world/', 32 | 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36', 33 | } 34 | 35 | data = json.dumps( 36 | { 37 | 'messages' : messages, 38 | 'frequency_penalty' : kwargs.get('frequency_penalty', 0), 39 | 'max_tokens' : kwargs.get('max_tokens', 4000), 40 | 'model' : 'gpt-3.5-turbo', 41 | 'presence_penalty' : kwargs.get('presence_penalty', 0), 42 | 'temperature' : kwargs.get('temperature', 1), 43 | 'top_p' : kwargs.get('top_p', 1), 44 | 'stream' : True, 45 | 'uuid' : str(uuid.uuid4()) 46 | } 47 | ) 48 | 49 | res = requests.post('https://chat.getgpt.world/api/chat/stream', 50 | headers=headers, json={'signature': _encrypt(data)}, stream=True) 51 | 52 | res.raise_for_status() 53 | for line in res.iter_lines(): 54 | if b'content' in line: 55 | line_json = json.loads(line.decode('utf-8').split('data: ')[1]) 56 | yield (line_json['choices'][0]['delta']['content']) 57 | 58 | 59 | def _encrypt(e: str): 60 | t = os.urandom(8).hex().encode('utf-8') 61 | n = os.urandom(8).hex().encode('utf-8') 62 | r = e.encode('utf-8') 63 | 64 | cipher = AES.new(t, AES.MODE_CBC, n) 65 | ciphertext = cipher.encrypt(_pad_data(r)) 66 | 67 | return ciphertext.hex() + t.decode('utf-8') + n.decode('utf-8') 68 | 69 | 70 | def _pad_data(data: bytes) -> bytes: 71 | block_size = AES.block_size 72 | padding_size = block_size - len(data) % block_size 73 | padding = bytes([padding_size] * padding_size) 74 | 75 | return data + padding 76 | -------------------------------------------------------------------------------- /translations/README.md: -------------------------------------------------------------------------------- 1 | # Contributing to Translations for FreeGPT WebUI 🌐 2 | 3 | This README.md file was created to guide contributors on how to contribute translations to the FreeGPT WebUI project. The translations are located in the `translations` folder and are generated by `babel-flask`. 4 | 5 | ## Modifying an existing translation 6 | 7 | To modify an existing translation, follow the steps below: 8 | 9 | 1. Inside the `translations` folder, you will find all existing translations in the project. 10 | 11 | 2. Translate the strings in the `.po` file found within the folder of the language translation you want to modify.
12 | The identifiers of the strings to be translated are in the format `msgid "string"` and should be translated in the `msgstr ""` field. For example: 13 | 14 | ``` 15 | msgid "New Conversation" 16 | msgstr "Nova Conversa" 17 | ``` 18 | 19 | 3. Add your name and GitHub profile to the "Contributors" section at the beginning of the `.po` file. If there are already contributors, add your name to the list. For example: 20 | 21 | ``` 22 | # Portuguese (Brazil) translations for FreeGPT WebUI. 23 | # Copyright (C) 2023 FreeGPT WebUI. 24 | # This file is distributed under the same license as the FreeGPT WebUI 25 | # project. 26 | # Contributors: 27 | # Ramon - github.com/ramonvc/ 28 | # Your Name - github.com/yourprofile/ 29 | # ... 30 | ``` 31 | 32 | 4. Create a pull request with the changes so we can review and incorporate them into the project. 33 | 34 | ## Adding a new language 35 | 36 | To add a new language, follow the steps below: 37 | 38 | 1. Install `babel-flask` (if not already installed) using the command `pip install Flask-Babel`. 39 | 40 | 2. Inside the project folder use the following code to generate the folder with the translation template, replacing LANGUAGE_CODE with the language code. Example: en_US 41 | 42 | ``` 43 | pybabel init -i ./translations/messages.pot -d translations -l LANGUAGE_CODE 44 | ``` 45 | 46 | 3. Translate the strings in the created `.po` file. The identifiers of the strings to be translated are in the format `msgid "string"` and should be translated in the `msgstr ""` field. For example: 47 | 48 | ``` 49 | msgid "New Conversation" 50 | msgstr "Nova Conversa" 51 | ``` 52 | 53 | 4. Add your name and GitHub profile to the "Contributors" section at the beginning of the `.po` file. For example: 54 | 55 | ``` 56 | # Portuguese (Brazil) translations for FreeGPT WebUI. 57 | # Copyright (C) 2023 FreeGPT WebUI. 58 | # This file is distributed under the same license as the FreeGPT WebUI 59 | # project. 60 | # Contributors: 61 | # Ramon - github.com/ramonvc/ 62 | # ... 63 | ``` 64 | 65 | 5. Create a pull request with the changes so we can review and incorporate them into the project. 66 | 67 | 68 | -------------------------------------------------------------------------------- /g4f/Provider/Aichat.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from aiohttp import ClientSession 4 | 5 | from ..typing import Messages 6 | from .base_provider import AsyncProvider, format_prompt 7 | from .helper import get_cookies 8 | from ..requests import StreamSession 9 | 10 | class Aichat(AsyncProvider): 11 | url = "https://chat-gpt.org/chat" 12 | working = False 13 | supports_gpt_35_turbo = True 14 | 15 | @staticmethod 16 | async def create_async( 17 | model: str, 18 | messages: Messages, 19 | proxy: str = None, **kwargs) -> str: 20 | 21 | cookies = get_cookies('chat-gpt.org') if not kwargs.get('cookies') else kwargs.get('cookies') 22 | if not cookies: 23 | raise RuntimeError( 24 | "g4f.provider.Aichat requires cookies, [refresh https://chat-gpt.org on chrome]" 25 | ) 26 | 27 | headers = { 28 | 'authority': 'chat-gpt.org', 29 | 'accept': '*/*', 30 | 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', 31 | 'content-type': 'application/json', 32 | 'origin': 'https://chat-gpt.org', 33 | 'referer': 'https://chat-gpt.org/chat', 34 | 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"', 35 | 'sec-ch-ua-mobile': '?0', 36 | 'sec-ch-ua-platform': '"macOS"', 37 | 'sec-fetch-dest': 'empty', 38 | 'sec-fetch-mode': 'cors', 39 | 'sec-fetch-site': 'same-origin', 40 | 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36', 41 | } 42 | 43 | async with StreamSession(headers=headers, 44 | cookies=cookies, 45 | timeout=6, 46 | proxies={"https": proxy} if proxy else None, 47 | impersonate="chrome110", verify=False) as session: 48 | 49 | json_data = { 50 | "message": format_prompt(messages), 51 | "temperature": kwargs.get('temperature', 0.5), 52 | "presence_penalty": 0, 53 | "top_p": kwargs.get('top_p', 1), 54 | "frequency_penalty": 0, 55 | } 56 | 57 | async with session.post("https://chat-gpt.org/api/text", 58 | json=json_data) as response: 59 | 60 | response.raise_for_status() 61 | result = await response.json() 62 | 63 | if not result['response']: 64 | raise Exception(f"Error Response: {result}") 65 | 66 | return result["message"] 67 | -------------------------------------------------------------------------------- /g4f/Provider/Pi.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import json 4 | 5 | from ..typing import CreateResult, Messages 6 | from .base_provider import AbstractProvider, format_prompt 7 | from ..requests import Session, get_session_from_browser 8 | 9 | class Pi(AbstractProvider): 10 | url = "https://pi.ai/talk" 11 | working = True 12 | supports_stream = True 13 | 14 | @classmethod 15 | def create_completion( 16 | cls, 17 | model: str, 18 | messages: Messages, 19 | stream: bool, 20 | session: Session = None, 21 | proxy: str = None, 22 | timeout: int = 180, 23 | conversation_id: str = None, 24 | **kwargs 25 | ) -> CreateResult: 26 | if not session: 27 | session = get_session_from_browser(url=cls.url, proxy=proxy, timeout=timeout) 28 | if not conversation_id: 29 | conversation_id = cls.start_conversation(session) 30 | prompt = format_prompt(messages) 31 | else: 32 | prompt = messages[-1]["content"] 33 | answer = cls.ask(session, prompt, conversation_id) 34 | for line in answer: 35 | if "text" in line: 36 | yield line["text"] 37 | 38 | @classmethod 39 | def start_conversation(cls, session: Session) -> str: 40 | response = session.post('https://pi.ai/api/chat/start', data="{}", headers={ 41 | 'accept': 'application/json', 42 | 'x-api-version': '3' 43 | }) 44 | if 'Just a moment' in response.text: 45 | raise RuntimeError('Error: Cloudflare detected') 46 | return response.json()['conversations'][0]['sid'] 47 | 48 | def get_chat_history(session: Session, conversation_id: str): 49 | params = { 50 | 'conversation': conversation_id, 51 | } 52 | response = session.get('https://pi.ai/api/chat/history', params=params) 53 | if 'Just a moment' in response.text: 54 | raise RuntimeError('Error: Cloudflare detected') 55 | return response.json() 56 | 57 | def ask(session: Session, prompt: str, conversation_id: str): 58 | json_data = { 59 | 'text': prompt, 60 | 'conversation': conversation_id, 61 | 'mode': 'BASE', 62 | } 63 | response = session.post('https://pi.ai/api/chat', json=json_data, stream=True) 64 | for line in response.iter_lines(): 65 | if b'Just a moment' in line: 66 | raise RuntimeError('Error: Cloudflare detected') 67 | if line.startswith(b'data: {"text":'): 68 | yield json.loads(line.split(b'data: ')[1]) 69 | elif line.startswith(b'data: {"title":'): 70 | yield json.loads(line.split(b'data: ')[1]) 71 | -------------------------------------------------------------------------------- /g4f/Provider/needs_auth/HuggingChat.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import json, uuid 4 | 5 | from aiohttp import ClientSession 6 | 7 | from ...typing import AsyncResult, Messages 8 | from ..base_provider import AsyncGeneratorProvider 9 | from ..helper import format_prompt, get_cookies 10 | 11 | map = { 12 | "openchat/openchat_3.5": "openchat/openchat-3.5-1210", 13 | } 14 | 15 | class HuggingChat(AsyncGeneratorProvider): 16 | url = "https://huggingface.co/chat" 17 | working = True 18 | model = "meta-llama/Llama-2-70b-chat-hf" 19 | 20 | @classmethod 21 | async def create_async_generator( 22 | cls, 23 | model: str, 24 | messages: Messages, 25 | stream: bool = True, 26 | proxy: str = None, 27 | web_search: bool = False, 28 | cookies: dict = None, 29 | **kwargs 30 | ) -> AsyncResult: 31 | if not model: 32 | model = cls.model 33 | elif model in map: 34 | model = map[model] 35 | if not cookies: 36 | cookies = get_cookies(".huggingface.co") 37 | 38 | headers = { 39 | 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36', 40 | } 41 | async with ClientSession( 42 | cookies=cookies, 43 | headers=headers 44 | ) as session: 45 | async with session.post(f"{cls.url}/conversation", json={"model": model}, proxy=proxy) as response: 46 | conversation_id = (await response.json())["conversationId"] 47 | 48 | send = { 49 | "id": str(uuid.uuid4()), 50 | "inputs": format_prompt(messages), 51 | "is_retry": False, 52 | "response_id": str(uuid.uuid4()), 53 | "web_search": web_search 54 | } 55 | async with session.post(f"{cls.url}/conversation/{conversation_id}", json=send, proxy=proxy) as response: 56 | first_token = True 57 | async for line in response.content: 58 | line = json.loads(line[:-1]) 59 | if "type" not in line: 60 | raise RuntimeError(f"Response: {line}") 61 | elif line["type"] == "stream": 62 | token = line["token"] 63 | if first_token: 64 | token = token.lstrip() 65 | first_token = False 66 | yield token 67 | elif line["type"] == "finalAnswer": 68 | break 69 | 70 | async with session.delete(f"{cls.url}/conversation/{conversation_id}", proxy=proxy) as response: 71 | response.raise_for_status() 72 | --------------------------------------------------------------------------------