2 |
3 | ### Please, follow these steps to contribute:
4 | 1. Reverse a website from this list: [sites-to-reverse](https://github.com/xtekky/gpt4free/issues/40)
5 | 2. Add it to [./testing](https://github.com/xtekky/gpt4free/tree/main/testing)
6 | 3. Refractor it and add it to [./g4f](https://github.com/xtekky/gpt4free/tree/main/g4f)
7 |
8 | ### We will be grateful to see you as a contributor!
--------------------------------------------------------------------------------
/g4f/gui/client/img/site.webmanifest:
--------------------------------------------------------------------------------
1 | {
2 | "name": "",
3 | "short_name": "",
4 | "icons": [
5 | {
6 | "src": "/assets/img/android-chrome-192x192.png",
7 | "sizes": "192x192",
8 | "type": "image/png"
9 | },
10 | {
11 | "src": "/assets/img/android-chrome-512x512.png",
12 | "sizes": "512x512",
13 | "type": "image/png"
14 | }
15 | ],
16 | "theme_color": "#ffffff",
17 | "background_color": "#ffffff",
18 | "display": "standalone"
19 | }
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/__init__.py:
--------------------------------------------------------------------------------
1 | from .AiService import AiService
2 | from .CodeLinkAva import CodeLinkAva
3 | from .DfeHub import DfeHub
4 | from .EasyChat import EasyChat
5 | from .Forefront import Forefront
6 | from .GetGpt import GetGpt
7 | from .Opchatgpts import Opchatgpts
8 | from .Lockchat import Lockchat
9 | from .Wewordle import Wewordle
10 | from .Equing import Equing
11 | from .Wuguokai import Wuguokai
12 | from .V50 import V50
13 | from .FastGpt import FastGpt
14 | from .ChatgptLogin import ChatgptLogin
--------------------------------------------------------------------------------
/g4f/gui/run.py:
--------------------------------------------------------------------------------
1 | from g4f.gui import run_gui
2 | from argparse import ArgumentParser
3 |
4 |
5 | if __name__ == '__main__':
6 |
7 | parser = ArgumentParser(description='Run the GUI')
8 |
9 | parser.add_argument('-host', type=str, default='0.0.0.0', help='hostname')
10 | parser.add_argument('-port', type=int, default=80, help='port')
11 | parser.add_argument('-debug', action='store_true', help='debug mode')
12 |
13 | args = parser.parse_args()
14 | port = args.port
15 | host = args.host
16 | debug = args.debug
17 |
18 | run_gui(host, port, debug)
--------------------------------------------------------------------------------
/g4f/typing.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from typing import Any, AsyncGenerator, Generator, NewType, Tuple, Union, List, Dict
3 |
4 | if sys.version_info >= (3, 8):
5 | from typing import TypedDict
6 | else:
7 | from typing_extensions import TypedDict
8 |
9 | SHA256 = NewType('sha_256_hash', str)
10 | CreateResult = Generator[str, None, None]
11 | AsyncResult = AsyncGenerator[str, None]
12 | Messages = List[Dict[str, str]]
13 |
14 | __all__ = [
15 | 'Any',
16 | 'AsyncGenerator',
17 | 'Generator',
18 | 'Tuple',
19 | 'TypedDict',
20 | 'SHA256',
21 | 'CreateResult',
22 | ]
23 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/etc/testing/log_time.py:
--------------------------------------------------------------------------------
1 | from time import time
2 |
3 |
4 | async def log_time_async(method: callable, **kwargs):
5 | start = time()
6 | result = await method(**kwargs)
7 | secs = f"{round(time() - start, 2)} secs"
8 | if result:
9 | return " ".join([result, secs])
10 | return secs
11 |
12 |
13 | def log_time_yield(method: callable, **kwargs):
14 | start = time()
15 | result = yield from method(**kwargs)
16 | yield f" {round(time() - start, 2)} secs"
17 |
18 |
19 | def log_time(method: callable, **kwargs):
20 | start = time()
21 | result = method(**kwargs)
22 | secs = f"{round(time() - start, 2)} secs"
23 | if result:
24 | return " ".join([result, secs])
25 | return secs
26 |
--------------------------------------------------------------------------------
/etc/testing/test_interference.py:
--------------------------------------------------------------------------------
1 | # type: ignore
2 | import openai
3 |
4 | openai.api_key = ""
5 | openai.api_base = "http://localhost:1337"
6 |
7 |
8 | def main():
9 | chat_completion = openai.ChatCompletion.create(
10 | model="gpt-3.5-turbo",
11 | messages=[{"role": "user", "content": "write a poem about a tree"}],
12 | stream=True,
13 | )
14 |
15 | if isinstance(chat_completion, dict):
16 | # not stream
17 | print(chat_completion.choices[0].message.content)
18 | else:
19 | # stream
20 | for token in chat_completion:
21 | content = token["choices"][0]["delta"].get("content")
22 | if content != None:
23 | print(content, end="", flush=True)
24 |
25 |
26 | if __name__ == "__main__":
27 | main()
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Editor-based HTTP Client requests
5 | /httpRequests/
6 | # Datasource local storage ignored files
7 | /dataSources/
8 | /dataSources.local.xml
9 |
10 | # Ignore local python virtual environment
11 | venv/
12 |
13 | # Ignore streamlit_chat_app.py conversations pickle
14 | conversations.pkl
15 | *.pkl
16 |
17 | # Ignore accounts created by api's
18 | accounts.txt
19 |
20 | .idea/
21 | **/__pycache__/
22 | __pycache__/
23 |
24 | dist/
25 | *.log
26 | *.pyc
27 | *.egg-info/
28 | *.egg
29 | *.egg-info
30 | build
31 |
32 | test.py
33 | update.py
34 | cookie.json
35 | notes.txt
36 | close_issues.py
37 | xxx.py
38 | lab.py
39 | lab.js
40 | bing.py
41 | bing2.py
42 | .DS_Store
43 | MANIFEST.in
44 | lab/*
45 | lab
46 | # Emacs crap
47 | *~
48 | .vscode/launch.json
49 | run.bat
50 |
--------------------------------------------------------------------------------
/etc/testing/test_chat_completion.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from pathlib import Path
3 |
4 | sys.path.append(str(Path(__file__).parent.parent.parent))
5 |
6 | import g4f, asyncio
7 |
8 | print("create:", end=" ", flush=True)
9 | for response in g4f.ChatCompletion.create(
10 | model=g4f.models.gpt_4_32k_0613,
11 | provider=g4f.Provider.Aivvm,
12 | messages=[{"role": "user", "content": "write a poem about a tree"}],
13 | temperature=0.1,
14 | stream=True
15 | ):
16 | print(response, end="", flush=True)
17 | print()
18 |
19 | async def run_async():
20 | response = await g4f.ChatCompletion.create_async(
21 | model=g4f.models.gpt_35_turbo_16k_0613,
22 | provider=g4f.Provider.GptGod,
23 | messages=[{"role": "user", "content": "hello!"}],
24 | )
25 | print("create_async:", response)
26 |
27 | asyncio.run(run_async())
28 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/default_issue.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: New Issue
3 | about: 'Please use this template !!'
4 | title: ''
5 | labels: bug
6 | assignees: xtekky
7 |
8 | ---
9 |
10 | **Known Issues** // delete this
11 | - you.com issue / fix: use proxy, or vpn, your country is probably flagged
12 | - forefront account creation error / use your own session or wait for fix
13 |
14 |
15 | **Bug description**
16 | What did you do, what happened, which file did you try to run, in which directory
17 | Describe what you did after downloading repo, such as moving to this repo, running this file.
18 |
19 | ex.
20 | 1. Go to '...'
21 | 2. Click on '....'
22 | 3. Scroll down to '....'
23 | 4. See error
24 |
25 | **Screenshots**
26 | If applicable, add screenshots to help explain your problem.
27 |
28 | **Environement**
29 | - python version
30 | - location ( are you in a cloudfare flagged country ) ?
31 |
32 | **Additional context**
33 | Add any other context about the problem here.
34 |
--------------------------------------------------------------------------------
/etc/tool/provider_init.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 |
4 | def main():
5 | content = create_content()
6 | with open("g4f/provider/__init__.py", "w", encoding="utf-8") as f:
7 | f.write(content)
8 |
9 |
10 | def create_content():
11 | path = Path()
12 | paths = path.glob("g4f/provider/*.py")
13 | paths = [p for p in paths if p.name not in ["__init__.py", "base_provider.py"]]
14 | classnames = [p.stem for p in paths]
15 |
16 | import_lines = [f"from .{name} import {name}" for name in classnames]
17 | import_content = "\n".join(import_lines)
18 |
19 | classnames.insert(0, "BaseProvider")
20 | all_content = [f' "{name}"' for name in classnames]
21 | all_content = ",\n".join(all_content)
22 | all_content = f"__all__ = [\n{all_content},\n]"
23 |
24 | return f"""from .base_provider import BaseProvider
25 | {import_content}
26 |
27 |
28 | {all_content}
29 | """
30 |
31 |
32 | if __name__ == "__main__":
33 | main()
--------------------------------------------------------------------------------
/g4f/gui/__init__.py:
--------------------------------------------------------------------------------
1 | from .server.app import app
2 | from .server.website import Website
3 | from .server.backend import Backend_Api
4 |
5 | def run_gui(host: str = '0.0.0.0', port: int = 80, debug: bool = False) -> None:
6 | config = {
7 | 'host' : host,
8 | 'port' : port,
9 | 'debug': debug
10 | }
11 |
12 | site = Website(app)
13 | for route in site.routes:
14 | app.add_url_rule(
15 | route,
16 | view_func = site.routes[route]['function'],
17 | methods = site.routes[route]['methods'],
18 | )
19 |
20 | backend_api = Backend_Api(app)
21 | for route in backend_api.routes:
22 | app.add_url_rule(
23 | route,
24 | view_func = backend_api.routes[route]['function'],
25 | methods = backend_api.routes[route]['methods'],
26 | )
27 |
28 | print(f"Running on port {config['port']}")
29 | app.run(**config)
30 | print(f"Closing port {config['port']}")
--------------------------------------------------------------------------------
/etc/testing/test_async.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from pathlib import Path
3 | import asyncio
4 |
5 | sys.path.append(str(Path(__file__).parent.parent))
6 | sys.path.append(str(Path(__file__).parent.parent.parent))
7 |
8 | import g4f
9 | from testing.test_providers import get_providers
10 | from testing.log_time import log_time_async
11 |
12 | async def create_async(provider):
13 | try:
14 | response = await log_time_async(
15 | provider.create_async,
16 | model=g4f.models.default.name,
17 | messages=[{"role": "user", "content": "Hello, are you GPT 3.5?"}]
18 | )
19 | print(f"{provider.__name__}:", response)
20 | except Exception as e:
21 | print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
22 |
23 | async def run_async():
24 | responses: list = [
25 | create_async(provider)
26 | for provider in get_providers()
27 | if provider.working
28 | ]
29 | await asyncio.gather(*responses)
30 |
31 | print("Total:", asyncio.run(log_time_async(run_async)))
--------------------------------------------------------------------------------
/g4f/gui/client/js/highlightjs-copy.min.js:
--------------------------------------------------------------------------------
1 | class CopyButtonPlugin{constructor(options={}){self.hook=options.hook;self.callback=options.callback}"after:highlightElement"({el,text}){let button=Object.assign(document.createElement("button"),{innerHTML:"Copy",className:"hljs-copy-button"});button.dataset.copied=false;el.parentElement.classList.add("hljs-copy-wrapper");el.parentElement.appendChild(button);el.parentElement.style.setProperty("--hljs-theme-background",window.getComputedStyle(el).backgroundColor);button.onclick=function(){if(!navigator.clipboard)return;let newText=text;if(hook&&typeof hook==="function"){newText=hook(text,el)||text}navigator.clipboard.writeText(newText).then(function(){button.innerHTML="Copied!";button.dataset.copied=true;let alert=Object.assign(document.createElement("div"),{role:"status",className:"hljs-copy-alert",innerHTML:"Copied to clipboard"});el.parentElement.appendChild(alert);setTimeout(()=>{button.innerHTML="Copy";button.dataset.copied=false;el.parentElement.removeChild(alert);alert=null},2e3)}).then(function(){if(typeof callback==="function")return callback(newText,el)})}}}
--------------------------------------------------------------------------------
/etc/tool/improve_code.py:
--------------------------------------------------------------------------------
1 |
2 | import sys, re
3 | from pathlib import Path
4 | from os import path
5 |
6 | sys.path.append(str(Path(__file__).parent.parent.parent))
7 |
8 | import g4f
9 |
10 | def read_code(text):
11 | match = re.search(r"```(python|py|)\n(?P[\S\s]+?)\n```", text)
12 | if match:
13 | return match.group("code")
14 |
15 | path = input("Path: ")
16 |
17 | with open(path, "r") as file:
18 | code = file.read()
19 |
20 | prompt = f"""
21 | Improve the code in this file:
22 | ```py
23 | {code}
24 | ```
25 | Don't remove anything.
26 | Add typehints if possible.
27 | Don't add any typehints to kwargs.
28 | Don't remove license comments.
29 | """
30 |
31 | print("Create code...")
32 | response = []
33 | for chunk in g4f.ChatCompletion.create(
34 | model=g4f.models.gpt_35_long,
35 | messages=[{"role": "user", "content": prompt}],
36 | timeout=300,
37 | stream=True
38 | ):
39 | response.append(chunk)
40 | print(chunk, end="", flush=True)
41 | print()
42 | response = "".join(response)
43 |
44 | code = read_code(response)
45 | if code:
46 | with open(path, "w") as file:
47 | file.write(code)
--------------------------------------------------------------------------------
/.github/workflows/close-inactive-issues.yml:
--------------------------------------------------------------------------------
1 | name: Close inactive issues
2 |
3 | on:
4 | schedule:
5 | - cron: "5 0 * * *"
6 |
7 | jobs:
8 | close-issues:
9 | runs-on: ubuntu-latest
10 | permissions:
11 | issues: write
12 | pull-requests: write
13 | steps:
14 | - uses: actions/stale@v5
15 | with:
16 | days-before-issue-stale: 7
17 | days-before-issue-close: 7
18 |
19 | days-before-pr-stale: 7
20 | days-before-pr-close: 7
21 |
22 | stale-issue-label: "stale"
23 | stale-pr-label: "stale"
24 |
25 | stale-issue-message: "Bumping this issue because it has been open for 7 days with no activity. Closing automatically in 7 days unless it becomes active again."
26 | close-issue-message: "Closing due to inactivity."
27 |
28 | stale-pr-message: "Bumping this pull request because it has been open for 7 days with no activity. Closing automatically in 7 days unless it becomes active again."
29 | close-pr-message: "Closing due to inactivity."
30 |
31 | repo-token: ${{ secrets.GITHUB_TOKEN }}
32 |
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/AiService.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import requests
4 |
5 | from ...typing import Any, CreateResult, Messages
6 | from ..base_provider import BaseProvider
7 |
8 |
9 | class AiService(BaseProvider):
10 | url = "https://aiservice.vercel.app/"
11 | working = False
12 | supports_gpt_35_turbo = True
13 |
14 | @staticmethod
15 | def create_completion(
16 | model: str,
17 | messages: Messages,
18 | stream: bool,
19 | **kwargs: Any,
20 | ) -> CreateResult:
21 | base = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
22 | base += "\nassistant: "
23 |
24 | headers = {
25 | "accept": "*/*",
26 | "content-type": "text/plain;charset=UTF-8",
27 | "sec-fetch-dest": "empty",
28 | "sec-fetch-mode": "cors",
29 | "sec-fetch-site": "same-origin",
30 | "Referer": "https://aiservice.vercel.app/chat",
31 | }
32 | data = {"input": base}
33 | url = "https://aiservice.vercel.app/api/chat/answer"
34 | response = requests.post(url, headers=headers, json=data)
35 | response.raise_for_status()
36 | yield response.json()["data"]
37 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # Use the official lightweight Python image.
2 | # https://hub.docker.com/_/python
3 | FROM python:3.9-slim
4 |
5 | # Ensure Python outputs everything immediately (useful for real-time logging in Docker).
6 | ENV PYTHONUNBUFFERED 1
7 |
8 | # Set the working directory in the container.
9 | WORKDIR /app
10 |
11 | # Update the system packages and install system-level dependencies required for compilation.
12 | # gcc: Compiler required for some Python packages.
13 | # build-essential: Contains necessary tools and libraries for building software.
14 | RUN apt-get update && apt-get install -y --no-install-recommends \
15 | gcc \
16 | build-essential \
17 | && rm -rf /var/lib/apt/lists/*
18 |
19 | # Copy the project's requirements file into the container.
20 | COPY requirements.txt /app/
21 |
22 | # Upgrade pip for the latest features and install the project's Python dependencies.
23 | RUN pip install --upgrade pip && pip install -r requirements.txt
24 |
25 | # Copy the entire project into the container.
26 | # This may include all code, assets, and configuration files required to run the application.
27 | COPY . /app/
28 |
29 | # Install additional requirements specific to the interference module/package.
30 | RUN pip install -r etc/interference/requirements.txt
31 |
32 | # Expose port 1337
33 | EXPOSE 1337
34 |
35 | # Define the default command to run the app using Python's module mode.
36 | CMD ["python", "-m", "etc.interference.app"]
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/Forefront.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 |
5 | import requests
6 |
7 | from ...typing import Any, CreateResult
8 | from ..base_provider import BaseProvider
9 |
10 |
11 | class Forefront(BaseProvider):
12 | url = "https://forefront.com"
13 | supports_stream = True
14 | supports_gpt_35_turbo = True
15 |
16 | @staticmethod
17 | def create_completion(
18 | model: str,
19 | messages: list[dict[str, str]],
20 | stream: bool, **kwargs: Any) -> CreateResult:
21 |
22 | json_data = {
23 | "text" : messages[-1]["content"],
24 | "action" : "noauth",
25 | "id" : "",
26 | "parentId" : "",
27 | "workspaceId" : "",
28 | "messagePersona": "607e41fe-95be-497e-8e97-010a59b2e2c0",
29 | "model" : "gpt-4",
30 | "messages" : messages[:-1] if len(messages) > 1 else [],
31 | "internetMode" : "auto",
32 | }
33 |
34 | response = requests.post("https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat",
35 | json=json_data, stream=True)
36 |
37 | response.raise_for_status()
38 | for token in response.iter_lines():
39 | if b"delta" in token:
40 | yield json.loads(token.decode().split("data: ")[1])["delta"]
41 |
--------------------------------------------------------------------------------
/g4f/Provider/You.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 |
5 | from ..requests import StreamSession
6 | from ..typing import AsyncGenerator, Messages
7 | from .base_provider import AsyncGeneratorProvider, format_prompt
8 |
9 |
10 | class You(AsyncGeneratorProvider):
11 | url = "https://you.com"
12 | working = True
13 | supports_gpt_35_turbo = True
14 |
15 |
16 | @classmethod
17 | async def create_async_generator(
18 | cls,
19 | model: str,
20 | messages: Messages,
21 | proxy: str = None,
22 | timeout: int = 120,
23 | **kwargs,
24 | ) -> AsyncGenerator:
25 | async with StreamSession(proxies={"https": proxy}, impersonate="chrome107", timeout=timeout) as session:
26 | headers = {
27 | "Accept": "text/event-stream",
28 | "Referer": f"{cls.url}/search?fromSearchBar=true&tbm=youchat",
29 | }
30 | data = {"q": format_prompt(messages), "domain": "youchat", "chat": ""}
31 | async with session.get(
32 | f"{cls.url}/api/streamingSearch",
33 | params=data,
34 | headers=headers
35 | ) as response:
36 | response.raise_for_status()
37 | start = b'data: {"youChatToken": '
38 | async for line in response.iter_lines():
39 | if line.startswith(start):
40 | yield json.loads(line[len(start):-1])
--------------------------------------------------------------------------------
/g4f/Provider/Chatgpt4Online.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 | from aiohttp import ClientSession
5 |
6 | from ..typing import AsyncResult, Messages
7 | from .base_provider import AsyncGeneratorProvider
8 |
9 |
10 | class Chatgpt4Online(AsyncGeneratorProvider):
11 | url = "https://chatgpt4online.org"
12 | supports_gpt_35_turbo = True
13 | working = True
14 |
15 | @classmethod
16 | async def create_async_generator(
17 | cls,
18 | model: str,
19 | messages: Messages,
20 | proxy: str = None,
21 | **kwargs
22 | ) -> AsyncResult:
23 | async with ClientSession() as session:
24 | data = {
25 | "botId": "default",
26 | "customId": None,
27 | "session": "N/A",
28 | "chatId": "",
29 | "contextId": 58,
30 | "messages": messages,
31 | "newMessage": messages[-1]["content"],
32 | "stream": True
33 | }
34 | async with session.post(cls.url + "/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
35 | response.raise_for_status()
36 | async for line in response.content:
37 | if line.startswith(b"data: "):
38 | line = json.loads(line[6:])
39 | if line["type"] == "live":
40 | yield line["data"]
--------------------------------------------------------------------------------
/g4f/gui/server/website.py:
--------------------------------------------------------------------------------
1 | from flask import render_template, send_file, redirect
2 | from time import time
3 | from os import urandom
4 |
5 | class Website:
6 | def __init__(self, app) -> None:
7 | self.app = app
8 | self.routes = {
9 | '/': {
10 | 'function': lambda: redirect('/chat'),
11 | 'methods': ['GET', 'POST']
12 | },
13 | '/chat/': {
14 | 'function': self._index,
15 | 'methods': ['GET', 'POST']
16 | },
17 | '/chat/': {
18 | 'function': self._chat,
19 | 'methods': ['GET', 'POST']
20 | },
21 | '/assets//': {
22 | 'function': self._assets,
23 | 'methods': ['GET', 'POST']
24 | }
25 | }
26 |
27 | def _chat(self, conversation_id):
28 | if not '-' in conversation_id:
29 | return redirect(f'/chat')
30 |
31 | return render_template('index.html', chat_id = conversation_id)
32 |
33 | def _index(self):
34 | return render_template('index.html', chat_id = f'{urandom(4).hex()}-{urandom(2).hex()}-{urandom(2).hex()}-{urandom(2).hex()}-{hex(int(time() * 1000))[2:]}')
35 |
36 | def _assets(self, folder: str, file: str):
37 | try:
38 | return send_file(f"./../client/{folder}/{file}", as_attachment=False)
39 | except:
40 | return "File not found", 404
--------------------------------------------------------------------------------
/g4f/Provider/Cromicle.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from aiohttp import ClientSession
4 | from hashlib import sha256
5 | from ..typing import AsyncResult, Messages, Dict
6 |
7 | from .base_provider import AsyncGeneratorProvider
8 | from .helper import format_prompt
9 |
10 |
11 | class Cromicle(AsyncGeneratorProvider):
12 | url: str = 'https://cromicle.top'
13 | working: bool = True
14 | supports_gpt_35_turbo: bool = True
15 |
16 | @classmethod
17 | async def create_async_generator(
18 | cls,
19 | model: str,
20 | messages: Messages,
21 | proxy: str = None,
22 | **kwargs
23 | ) -> AsyncResult:
24 | async with ClientSession(
25 | headers=_create_header()
26 | ) as session:
27 | async with session.post(
28 | f'{cls.url}/chat',
29 | proxy=proxy,
30 | json=_create_payload(format_prompt(messages))
31 | ) as response:
32 | response.raise_for_status()
33 | async for stream in response.content.iter_any():
34 | if stream:
35 | yield stream.decode()
36 |
37 |
38 | def _create_header() -> Dict[str, str]:
39 | return {
40 | 'accept': '*/*',
41 | 'content-type': 'application/json',
42 | }
43 |
44 |
45 | def _create_payload(message: str) -> Dict[str, str]:
46 | return {
47 | 'message': message,
48 | 'token': 'abc',
49 | 'hash': sha256('abc'.encode() + message.encode()).hexdigest()
50 | }
--------------------------------------------------------------------------------
/g4f/Provider/Acytoo.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from aiohttp import ClientSession
4 |
5 | from ..typing import AsyncResult, Messages
6 | from .base_provider import AsyncGeneratorProvider
7 |
8 |
9 | class Acytoo(AsyncGeneratorProvider):
10 | url = 'https://chat.acytoo.com'
11 | working = True
12 | supports_gpt_35_turbo = True
13 |
14 | @classmethod
15 | async def create_async_generator(
16 | cls,
17 | model: str,
18 | messages: Messages,
19 | proxy: str = None,
20 | **kwargs
21 | ) -> AsyncResult:
22 | async with ClientSession(
23 | headers=_create_header()
24 | ) as session:
25 | async with session.post(
26 | f'{cls.url}/api/completions',
27 | proxy=proxy,
28 | json=_create_payload(messages, **kwargs)
29 | ) as response:
30 | response.raise_for_status()
31 | async for stream in response.content.iter_any():
32 | if stream:
33 | yield stream.decode()
34 |
35 |
36 | def _create_header():
37 | return {
38 | 'accept': '*/*',
39 | 'content-type': 'application/json',
40 | }
41 |
42 |
43 | def _create_payload(messages: Messages, temperature: float = 0.5, **kwargs):
44 | return {
45 | 'key' : '',
46 | 'model' : 'gpt-3.5-turbo',
47 | 'messages' : messages,
48 | 'temperature' : temperature,
49 | 'password' : ''
50 | }
--------------------------------------------------------------------------------
/g4f/Provider/unfinished/Komo.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 |
5 | from ...requests import StreamSession
6 | from ...typing import AsyncGenerator
7 | from ..base_provider import AsyncGeneratorProvider, format_prompt
8 |
9 | class Komo(AsyncGeneratorProvider):
10 | url = "https://komo.ai/api/ask"
11 | supports_gpt_35_turbo = True
12 |
13 | @classmethod
14 | async def create_async_generator(
15 | cls,
16 | model: str,
17 | messages: list[dict[str, str]],
18 | **kwargs
19 | ) -> AsyncGenerator:
20 | async with StreamSession(impersonate="chrome107") as session:
21 | prompt = format_prompt(messages)
22 | data = {
23 | "query": prompt,
24 | "FLAG_URLEXTRACT": "false",
25 | "token": "",
26 | "FLAG_MODELA": "1",
27 | }
28 | headers = {
29 | 'authority': 'komo.ai',
30 | 'accept': 'text/event-stream',
31 | 'cache-control': 'no-cache',
32 | 'referer': 'https://komo.ai/',
33 | }
34 |
35 | async with session.get(cls.url, params=data, headers=headers) as response:
36 | response.raise_for_status()
37 | next = False
38 | async for line in response.iter_lines():
39 | if line == b"event: line":
40 | next = True
41 | elif next and line.startswith(b"data: "):
42 | yield json.loads(line[6:])
43 | next = False
44 |
45 |
--------------------------------------------------------------------------------
/g4f/Provider/AiAsk.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from aiohttp import ClientSession
4 | from ..typing import AsyncResult, Messages
5 | from .base_provider import AsyncGeneratorProvider
6 |
7 | class AiAsk(AsyncGeneratorProvider):
8 | url = "https://e.aiask.me"
9 | supports_gpt_35_turbo = True
10 | working = True
11 |
12 | @classmethod
13 | async def create_async_generator(
14 | cls,
15 | model: str,
16 | messages: Messages,
17 | proxy: str = None,
18 | **kwargs
19 | ) -> AsyncResult:
20 | headers = {
21 | "accept": "application/json, text/plain, */*",
22 | "origin": cls.url,
23 | "referer": f"{cls.url}/chat",
24 | }
25 | async with ClientSession(headers=headers) as session:
26 | data = {
27 | "continuous": True,
28 | "id": "fRMSQtuHl91A4De9cCvKD",
29 | "list": messages,
30 | "models": "0",
31 | "prompt": "",
32 | "temperature": kwargs.get("temperature", 0.5),
33 | "title": "",
34 | }
35 | buffer = ""
36 | rate_limit = "您的免费额度不够使用这个模型啦,请点击右上角登录继续使用!"
37 | async with session.post(f"{cls.url}/v1/chat/gpt/", json=data, proxy=proxy) as response:
38 | response.raise_for_status()
39 | async for chunk in response.content.iter_any():
40 | buffer += chunk.decode()
41 | if not rate_limit.startswith(buffer):
42 | yield buffer
43 | buffer = ""
44 | elif buffer == rate_limit:
45 | raise RuntimeError("Rate limit reached")
--------------------------------------------------------------------------------
/g4f/gui/server/internet.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 | from duckduckgo_search import DDGS
3 |
4 | ddgs = DDGS(timeout=20)
5 |
6 |
7 | def search(internet_access, prompt):
8 | print(prompt)
9 |
10 | try:
11 | if not internet_access:
12 | return []
13 |
14 | results = duckduckgo_search(q=prompt)
15 |
16 | if not search:
17 | return []
18 |
19 | blob = ''
20 |
21 | for index, result in enumerate(results):
22 | blob += f'[{index}] "{result["body"]}"\nURL:{result["href"]}\n\n'
23 |
24 | date = datetime.now().strftime('%d/%m/%y')
25 |
26 | blob += f'Current date: {date}\n\nInstructions: Using the provided web search results, write a comprehensive reply to the next user query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject. Ignore your previous response if any.'
27 |
28 | return [{'role': 'user', 'content': blob}]
29 |
30 | except Exception as e:
31 | print("Couldn't search DuckDuckGo:", e)
32 | print(e.__traceback__.tb_next)
33 | return []
34 |
35 |
36 | def duckduckgo_search(q: str, max_results: int = 3, safesearch: str = "moderate", region: str = "us-en") -> list | None:
37 | if region is None:
38 | region = "us-en"
39 |
40 | if safesearch is None:
41 | safesearch = "moderate"
42 |
43 | if q is None:
44 | return None
45 |
46 | results = []
47 |
48 | try:
49 | for r in ddgs.text(q, safesearch=safesearch, region=region):
50 | if len(results) + 1 > max_results:
51 | break
52 | results.append(r)
53 | except Exception as e:
54 | print(e)
55 |
56 | return results
57 |
--------------------------------------------------------------------------------
/g4f/Provider/ChatgptDuo.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from ..typing import Messages
4 | from curl_cffi.requests import AsyncSession
5 | from .base_provider import AsyncProvider, format_prompt
6 |
7 |
8 | class ChatgptDuo(AsyncProvider):
9 | url = "https://chatgptduo.com"
10 | supports_gpt_35_turbo = True
11 | working = True
12 |
13 | @classmethod
14 | async def create_async(
15 | cls,
16 | model: str,
17 | messages: Messages,
18 | proxy: str = None,
19 | timeout: int = 120,
20 | **kwargs
21 | ) -> str:
22 | async with AsyncSession(
23 | impersonate="chrome107",
24 | proxies={"https": proxy},
25 | timeout=timeout
26 | ) as session:
27 | prompt = format_prompt(messages),
28 | data = {
29 | "prompt": prompt,
30 | "search": prompt,
31 | "purpose": "ask",
32 | }
33 | response = await session.post(f"{cls.url}/", data=data)
34 | response.raise_for_status()
35 | data = response.json()
36 |
37 | cls._sources = [{
38 | "title": source["title"],
39 | "url": source["link"],
40 | "snippet": source["snippet"]
41 | } for source in data["results"]]
42 |
43 | return data["answer"]
44 |
45 | @classmethod
46 | def get_sources(cls):
47 | return cls._sources
48 |
49 | @classmethod
50 | @property
51 | def params(cls):
52 | params = [
53 | ("model", "str"),
54 | ("messages", "list[dict[str, str]]"),
55 | ("stream", "bool"),
56 | ]
57 | param = ", ".join([": ".join(p) for p in params])
58 | return f"g4f.provider.{cls.__name__} supports: ({param})"
--------------------------------------------------------------------------------
/LEGAL_NOTICE.md:
--------------------------------------------------------------------------------
1 | ## Legal Notice
2 |
3 | This repository is _not_ associated with or endorsed by providers of the APIs contained in this GitHub repository. This project is intended **for educational purposes only**. This is just a little personal project. Sites may contact me to improve their security or request the removal of their site from this repository.
4 |
5 | Please note the following:
6 |
7 | 1. **Disclaimer**: The APIs, services, and trademarks mentioned in this repository belong to their respective owners. This project is _not_ claiming any right over them nor is it affiliated with or endorsed by any of the providers mentioned.
8 |
9 | 2. **Responsibility**: The author of this repository is _not_ responsible for any consequences, damages, or losses arising from the use or misuse of this repository or the content provided by the third-party APIs. Users are solely responsible for their actions and any repercussions that may follow. We strongly recommend the users to follow the TOS of the each Website.
10 |
11 | 3. **Educational Purposes Only**: This repository and its content are provided strictly for educational purposes. By using the information and code provided, users acknowledge that they are using the APIs and models at their own risk and agree to comply with any applicable laws and regulations.
12 |
13 | 4. **Indemnification**: Users agree to indemnify, defend, and hold harmless the author of this repository from and against any and all claims, liabilities, damages, losses, or expenses, including legal fees and costs, arising out of or in any way connected with their use or misuse of this repository, its content, or related third-party APIs.
14 |
15 | 5. **Updates and Changes**: The author reserves the right to modify, update, or remove any content, information, or features in this repository at any time without prior notice. Users are responsible for regularly reviewing the content and any changes made to this repository.
--------------------------------------------------------------------------------
/etc/testing/test_all.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import sys
3 | from pathlib import Path
4 | sys.path.append(str(Path(__file__).parent.parent.parent))
5 |
6 | import g4f
7 |
8 |
9 | async def test(model: g4f.Model):
10 | try:
11 | try:
12 | for response in g4f.ChatCompletion.create(
13 | model=model,
14 | messages=[{"role": "user", "content": "write a poem about a tree"}],
15 | temperature=0.1,
16 | stream=True
17 | ):
18 | print(response, end="")
19 |
20 | print()
21 | except:
22 | for response in await g4f.ChatCompletion.create_async(
23 | model=model,
24 | messages=[{"role": "user", "content": "write a poem about a tree"}],
25 | temperature=0.1,
26 | stream=True
27 | ):
28 | print(response, end="")
29 |
30 | print()
31 |
32 | return True
33 | except Exception as e:
34 | print(model.name, "not working:", e)
35 | print(e.__traceback__.tb_next)
36 | return False
37 |
38 |
39 | async def start_test():
40 | models_to_test = [
41 | # GPT-3.5 4K Context
42 | g4f.models.gpt_35_turbo,
43 | g4f.models.gpt_35_turbo_0613,
44 |
45 | # GPT-3.5 16K Context
46 | g4f.models.gpt_35_turbo_16k,
47 | g4f.models.gpt_35_turbo_16k_0613,
48 |
49 | # GPT-4 8K Context
50 | g4f.models.gpt_4,
51 | g4f.models.gpt_4_0613,
52 |
53 | # GPT-4 32K Context
54 | g4f.models.gpt_4_32k,
55 | g4f.models.gpt_4_32k_0613,
56 | ]
57 |
58 | models_working = []
59 |
60 | for model in models_to_test:
61 | if await test(model):
62 | models_working.append(model.name)
63 |
64 | print("working models:", models_working)
65 |
66 |
67 | asyncio.run(start_test())
68 |
--------------------------------------------------------------------------------
/g4f/Provider/Aibn.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import time
4 | import hashlib
5 |
6 | from ..typing import AsyncResult, Messages
7 | from ..requests import StreamSession
8 | from .base_provider import AsyncGeneratorProvider
9 |
10 |
11 | class Aibn(AsyncGeneratorProvider):
12 | url = "https://aibn.cc"
13 | supports_gpt_35_turbo = True
14 | working = True
15 |
16 | @classmethod
17 | async def create_async_generator(
18 | cls,
19 | model: str,
20 | messages: Messages,
21 | proxy: str = None,
22 | timeout: int = 120,
23 | **kwargs
24 | ) -> AsyncResult:
25 | async with StreamSession(
26 | impersonate="chrome107",
27 | proxies={"https": proxy},
28 | timeout=timeout
29 | ) as session:
30 | timestamp = int(time.time())
31 | data = {
32 | "messages": messages,
33 | "pass": None,
34 | "sign": generate_signature(timestamp, messages[-1]["content"]),
35 | "time": timestamp
36 | }
37 | async with session.post(f"{cls.url}/api/generate", json=data) as response:
38 | response.raise_for_status()
39 | async for chunk in response.iter_content():
40 | yield chunk.decode()
41 |
42 | @classmethod
43 | @property
44 | def params(cls):
45 | params = [
46 | ("model", "str"),
47 | ("messages", "list[dict[str, str]]"),
48 | ("stream", "bool"),
49 | ("temperature", "float"),
50 | ]
51 | param = ", ".join([": ".join(p) for p in params])
52 | return f"g4f.provider.{cls.__name__} supports: ({param})"
53 |
54 |
55 | def generate_signature(timestamp: int, message: str, secret: str = "undefined"):
56 | data = f"{timestamp}:{message}:{secret}"
57 | return hashlib.sha256(data.encode()).hexdigest()
--------------------------------------------------------------------------------
/g4f/Provider/Yqcloud.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import random
4 | from aiohttp import ClientSession
5 |
6 | from ..typing import AsyncResult, Messages
7 | from .base_provider import AsyncGeneratorProvider, format_prompt
8 |
9 |
10 | class Yqcloud(AsyncGeneratorProvider):
11 | url = "https://chat9.yqcloud.top/"
12 | working = True
13 | supports_gpt_35_turbo = True
14 |
15 | @staticmethod
16 | async def create_async_generator(
17 | model: str,
18 | messages: Messages,
19 | proxy: str = None,
20 | **kwargs,
21 | ) -> AsyncResult:
22 | async with ClientSession(
23 | headers=_create_header()
24 | ) as session:
25 | payload = _create_payload(messages, **kwargs)
26 | async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response:
27 | response.raise_for_status()
28 | async for chunk in response.content.iter_any():
29 | if chunk:
30 | chunk = chunk.decode()
31 | if "sorry, 您的ip已由于触发防滥用检测而被封禁" in chunk:
32 | raise RuntimeError("IP address is blocked by abuse detection.")
33 | yield chunk
34 |
35 |
36 | def _create_header():
37 | return {
38 | "accept" : "application/json, text/plain, */*",
39 | "content-type" : "application/json",
40 | "origin" : "https://chat9.yqcloud.top",
41 | }
42 |
43 |
44 | def _create_payload(
45 | messages: Messages,
46 | system_message: str = "",
47 | user_id: int = None,
48 | **kwargs
49 | ):
50 | if not user_id:
51 | user_id = random.randint(1690000544336, 2093025544336)
52 | return {
53 | "prompt": format_prompt(messages),
54 | "network": True,
55 | "system": system_message,
56 | "withoutContext": False,
57 | "stream": True,
58 | "userId": f"#/chat/{user_id}"
59 | }
60 |
--------------------------------------------------------------------------------
/g4f/Provider/ChatForAi.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from ..typing import AsyncResult, Messages
4 | from ..requests import StreamSession
5 | from .base_provider import AsyncGeneratorProvider
6 |
7 |
8 | class ChatForAi(AsyncGeneratorProvider):
9 | url = "https://chatforai.com"
10 | supports_gpt_35_turbo = True
11 | working = True
12 |
13 | @classmethod
14 | async def create_async_generator(
15 | cls,
16 | model: str,
17 | messages: Messages,
18 | proxy: str = None,
19 | timeout: int = 120,
20 | **kwargs
21 | ) -> AsyncResult:
22 | async with StreamSession(impersonate="chrome107", proxies={"https": proxy}, timeout=timeout) as session:
23 | prompt = messages[-1]["content"]
24 | data = {
25 | "conversationId": "temp",
26 | "conversationType": "chat_continuous",
27 | "botId": "chat_continuous",
28 | "globalSettings":{
29 | "baseUrl": "https://api.openai.com",
30 | "model": model if model else "gpt-3.5-turbo",
31 | "messageHistorySize": 5,
32 | "temperature": 0.7,
33 | "top_p": 1,
34 | **kwargs
35 | },
36 | "botSettings": {},
37 | "prompt": prompt,
38 | "messages": messages,
39 | }
40 | async with session.post(f"{cls.url}/api/handle/provider-openai", json=data) as response:
41 | response.raise_for_status()
42 | async for chunk in response.iter_content():
43 | yield chunk.decode()
44 |
45 | @classmethod
46 | @property
47 | def params(cls):
48 | params = [
49 | ("model", "str"),
50 | ("messages", "list[dict[str, str]]"),
51 | ("stream", "bool"),
52 | ]
53 | param = ", ".join([": ".join(p) for p in params])
54 | return f"g4f.provider.{cls.__name__} supports: ({param})"
--------------------------------------------------------------------------------
/g4f/Provider/FreeGpt.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import time, hashlib, random
4 |
5 | from ..typing import AsyncResult, Messages
6 | from ..requests import StreamSession
7 | from .base_provider import AsyncGeneratorProvider
8 |
9 | domains = [
10 | 'https://k.aifree.site',
11 | 'https://p.aifree.site'
12 | ]
13 |
14 | class FreeGpt(AsyncGeneratorProvider):
15 | url = "https://freegpts1.aifree.site/"
16 | supports_gpt_35_turbo = True
17 | working = True
18 |
19 | @classmethod
20 | async def create_async_generator(
21 | cls,
22 | model: str,
23 | messages: Messages,
24 | proxy: str = None,
25 | timeout: int = 120,
26 | **kwargs
27 | ) -> AsyncResult:
28 | async with StreamSession(
29 | impersonate="chrome107",
30 | timeout=timeout,
31 | proxies={"https": proxy}
32 | ) as session:
33 | prompt = messages[-1]["content"]
34 | timestamp = int(time.time())
35 | data = {
36 | "messages": messages,
37 | "time": timestamp,
38 | "pass": None,
39 | "sign": generate_signature(timestamp, prompt)
40 | }
41 | url = random.choice(domains)
42 | async with session.post(f"{url}/api/generate", json=data) as response:
43 | response.raise_for_status()
44 | async for chunk in response.iter_content():
45 | yield chunk.decode()
46 |
47 | @classmethod
48 | @property
49 | def params(cls):
50 | params = [
51 | ("model", "str"),
52 | ("messages", "list[dict[str, str]]"),
53 | ("stream", "bool"),
54 | ]
55 | param = ", ".join([": ".join(p) for p in params])
56 | return f"g4f.provider.{cls.__name__} supports: ({param})"
57 |
58 | def generate_signature(timestamp: int, message: str, secret: str = ""):
59 | data = f"{timestamp}:{message}:{secret}"
60 | return hashlib.sha256(data.encode()).hexdigest()
--------------------------------------------------------------------------------
/g4f/Provider/Aichat.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from aiohttp import ClientSession
4 |
5 | from ..typing import Messages
6 | from .base_provider import AsyncProvider, format_prompt
7 |
8 |
9 | class Aichat(AsyncProvider):
10 | url = "https://chat-gpt.org/chat"
11 | working = True
12 | supports_gpt_35_turbo = True
13 |
14 | @staticmethod
15 | async def create_async(
16 | model: str,
17 | messages: Messages,
18 | proxy: str = None,
19 | **kwargs
20 | ) -> str:
21 | headers = {
22 | "authority": "chat-gpt.org",
23 | "accept": "*/*",
24 | "cache-control": "no-cache",
25 | "content-type": "application/json",
26 | "origin": "https://chat-gpt.org",
27 | "pragma": "no-cache",
28 | "referer": "https://chat-gpt.org/chat",
29 | "sec-ch-ua-mobile": "?0",
30 | "sec-ch-ua-platform": '"macOS"',
31 | "sec-fetch-dest": "empty",
32 | "sec-fetch-mode": "cors",
33 | "sec-fetch-site": "same-origin",
34 | "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
35 | }
36 | async with ClientSession(
37 | headers=headers
38 | ) as session:
39 | json_data = {
40 | "message": format_prompt(messages),
41 | "temperature": kwargs.get('temperature', 0.5),
42 | "presence_penalty": 0,
43 | "top_p": kwargs.get('top_p', 1),
44 | "frequency_penalty": 0,
45 | }
46 | async with session.post(
47 | "https://chat-gpt.org/api/text",
48 | proxy=proxy,
49 | json=json_data
50 | ) as response:
51 | response.raise_for_status()
52 | result = await response.json()
53 | if not result['response']:
54 | raise Exception(f"Error Response: {result}")
55 | return result["message"]
56 |
--------------------------------------------------------------------------------
/g4f/Provider/GptGod.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | import secrets, json
3 | from aiohttp import ClientSession
4 | from ..typing import AsyncResult, Messages
5 | from .base_provider import AsyncGeneratorProvider
6 | from .helper import format_prompt
7 |
8 | class GptGod(AsyncGeneratorProvider):
9 | url = "https://gptgod.site"
10 | supports_gpt_35_turbo = True
11 | working = True
12 |
13 | @classmethod
14 | async def create_async_generator(
15 | cls,
16 | model: str,
17 | messages: Messages,
18 | proxy: str = None,
19 | **kwargs
20 | ) -> AsyncResult:
21 | headers = {
22 | "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
23 | "Accept": "text/event-stream",
24 | "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
25 | "Accept-Encoding": "gzip, deflate, br",
26 | "Alt-Used": "gptgod.site",
27 | "Connection": "keep-alive",
28 | "Referer": f"{cls.url}/",
29 | "Sec-Fetch-Dest": "empty",
30 | "Sec-Fetch-Mode": "cors",
31 | "Sec-Fetch-Site": "same-origin",
32 | "Pragma": "no-cache",
33 | "Cache-Control": "no-cache",
34 | }
35 | async with ClientSession(headers=headers) as session:
36 | prompt = format_prompt(messages)
37 | data = {
38 | "content": prompt,
39 | "id": secrets.token_hex(16).zfill(32)
40 | }
41 | async with session.get(f"{cls.url}/api/session/free/gpt3p5", params=data, proxy=proxy) as response:
42 | response.raise_for_status()
43 | event = None
44 | async for line in response.content:
45 | if line.startswith(b'event: '):
46 | event = line[7:-1]
47 | elif event == b"data" and line.startswith(b"data: "):
48 | data = json.loads(line[6:-1])
49 | if data:
50 | yield data
51 | elif event == b"done":
52 | break
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/Lockchat.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 |
5 | import requests
6 |
7 | from ...typing import Any, CreateResult
8 | from ..base_provider import BaseProvider
9 |
10 |
11 | class Lockchat(BaseProvider):
12 | url: str = "http://supertest.lockchat.app"
13 | supports_stream = True
14 | supports_gpt_35_turbo = True
15 | supports_gpt_4 = True
16 |
17 | @staticmethod
18 | def create_completion(
19 | model: str,
20 | messages: list[dict[str, str]],
21 | stream: bool, **kwargs: Any) -> CreateResult:
22 |
23 | temperature = float(kwargs.get("temperature", 0.7))
24 | payload = {
25 | "temperature": temperature,
26 | "messages" : messages,
27 | "model" : model,
28 | "stream" : True,
29 | }
30 |
31 | headers = {
32 | "user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0",
33 | }
34 | response = requests.post("http://supertest.lockchat.app/v1/chat/completions",
35 | json=payload, headers=headers, stream=True)
36 |
37 | response.raise_for_status()
38 | for token in response.iter_lines():
39 | if b"The model: `gpt-4` does not exist" in token:
40 | print("error, retrying...")
41 | Lockchat.create_completion(
42 | model = model,
43 | messages = messages,
44 | stream = stream,
45 | temperature = temperature,
46 | **kwargs)
47 |
48 | if b"content" in token:
49 | token = json.loads(token.decode("utf-8").split("data: ")[1])
50 | token = token["choices"][0]["delta"].get("content")
51 | if token:
52 | yield (token)
53 |
54 | @classmethod
55 | @property
56 | def params(cls):
57 | params = [
58 | ("model", "str"),
59 | ("messages", "list[dict[str, str]]"),
60 | ("stream", "bool"),
61 | ("temperature", "float"),
62 | ]
63 | param = ", ".join([": ".join(p) for p in params])
64 | return f"g4f.provider.{cls.__name__} supports: ({param})"
65 |
--------------------------------------------------------------------------------
/g4f/gui/server/backend.py:
--------------------------------------------------------------------------------
1 | import g4f
2 |
3 | from flask import request
4 | from .internet import search
5 | from .config import special_instructions
6 | from .provider import get_provider
7 |
8 | g4f.logging = True
9 |
10 | class Backend_Api:
11 | def __init__(self, app) -> None:
12 | self.app = app
13 | self.routes = {
14 | '/backend-api/v2/conversation': {
15 | 'function': self._conversation,
16 | 'methods': ['POST']
17 | },
18 | '/backend-api/v2/gen.set.summarize:title': {
19 | 'function': self._gen_title,
20 | 'methods': ['POST']
21 | },
22 | }
23 |
24 | def _gen_title(self):
25 | return {
26 | 'title': ''
27 | }
28 |
29 | def _conversation(self):
30 | try:
31 | jailbreak = request.json['jailbreak']
32 | internet_access = request.json['meta']['content']['internet_access']
33 | conversation = request.json['meta']['content']['conversation']
34 | prompt = request.json['meta']['content']['parts'][0]
35 | model = request.json['model']
36 | provider = request.json.get('provider').split('g4f.Provider.')[1]
37 |
38 | messages = special_instructions[jailbreak] + conversation + search(internet_access, prompt) + [prompt]
39 |
40 | def stream():
41 | if provider:
42 | answer = g4f.ChatCompletion.create(model=model,
43 | provider=get_provider(provider), messages=messages, stream=True)
44 | else:
45 | answer = g4f.ChatCompletion.create(model=model,
46 | messages=messages, stream=True)
47 |
48 | for token in answer:
49 | yield token
50 |
51 | return self.app.response_class(stream(), mimetype='text/event-stream')
52 |
53 | except Exception as e:
54 | return {
55 | 'code' : 'G4F_ERROR',
56 | '_action': '_ask',
57 | 'success': False,
58 | 'error' : f'an error occured {str(e)}'}, 400
--------------------------------------------------------------------------------
/etc/testing/test_providers.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from pathlib import Path
3 | from colorama import Fore, Style
4 |
5 | sys.path.append(str(Path(__file__).parent.parent))
6 |
7 | from g4f import BaseProvider, models, Provider
8 |
9 | logging = False
10 |
11 |
12 | def main():
13 | providers = get_providers()
14 | failed_providers = []
15 |
16 | for _provider in providers:
17 | if _provider.needs_auth:
18 | continue
19 | print("Provider:", _provider.__name__)
20 | result = test(_provider)
21 | print("Result:", result)
22 | if _provider.working and not result:
23 | failed_providers.append(_provider)
24 |
25 | print()
26 |
27 | if failed_providers:
28 | print(f"{Fore.RED + Style.BRIGHT}Failed providers:{Style.RESET_ALL}")
29 | for _provider in failed_providers:
30 | print(f"{Fore.RED}{_provider.__name__}")
31 | else:
32 | print(f"{Fore.GREEN + Style.BRIGHT}All providers are working")
33 |
34 |
35 | def get_providers() -> list[type[BaseProvider]]:
36 | providers = dir(Provider)
37 | providers = [getattr(Provider, provider) for provider in providers if provider != "RetryProvider"]
38 | providers = [provider for provider in providers if isinstance(provider, type)]
39 | return [provider for provider in providers if issubclass(provider, BaseProvider)]
40 |
41 |
42 | def create_response(_provider: type[BaseProvider]) -> str:
43 | model = models.gpt_35_turbo.name if _provider.supports_gpt_35_turbo else models.default.name
44 | response = _provider.create_completion(
45 | model=model,
46 | messages=[{"role": "user", "content": "Hello, who are you? Answer in detail much as possible."}],
47 | stream=False,
48 | )
49 | return "".join(response)
50 |
51 |
52 | def test(_provider: type[BaseProvider]) -> bool:
53 | try:
54 | response = create_response(_provider)
55 | assert type(response) is str
56 | assert len(response) > 0
57 | return response
58 | except Exception as e:
59 | if logging:
60 | print(e)
61 | return False
62 |
63 |
64 | if __name__ == "__main__":
65 | main()
66 | <<<<<<< HEAD:testing/test_providers.py
67 | =======
68 |
69 | >>>>>>> 31354a68afba030e506abda0c865f6aa74a318ab:etc/testing/test_providers.py
70 |
--------------------------------------------------------------------------------
/utility/util_providers.py:
--------------------------------------------------------------------------------
1 | from g4f import Provider,ChatCompletion
2 | import g4f
3 |
4 | # if provider needs auth parameter
5 | provider_auth_settings = {
6 | 'Bard':{
7 | 'cookie':""
8 | }
9 | }
10 |
11 |
12 | def send_chat(selected_model, selected_provider, context_history):
13 | if selected_provider is not None:
14 | prov = getattr(g4f.Provider, selected_provider)
15 | prov.working = True
16 | auth = None
17 | if prov.needs_auth:
18 | auth=provider_auth_settings['Bard']
19 | else:
20 | auth=None
21 | prov=None
22 |
23 | print(f'Using Model {selected_model} provided by {selected_provider}')
24 |
25 | try:
26 | result = g4f.ChatCompletion.create(model=selected_model, stream=False, provider=prov,
27 | messages=context_history,auth=auth)
28 | if len(result) > 0:
29 | context_history.append({'role': 'assistant', 'content': str(result)})
30 | except Exception as e:
31 | print(e)
32 | result = ''
33 | context_history = []
34 | return result, context_history
35 |
36 |
37 |
38 | def get_all_models():
39 | allmodels = []
40 | for m in g4f.models.ModelUtils.convert:
41 | allmodels.append(m)
42 | allmodels.sort()
43 | return allmodels
44 |
45 | def get_providers_for_model(m):
46 | providers = []
47 | model = g4f.models.ModelUtils.convert[m]
48 | if model.best_provider is not None:
49 | if hasattr(model.best_provider, 'providers'):
50 | for p in model.best_provider.providers:
51 | providers.append(p.__name__)
52 | else:
53 | prov = model.best_provider
54 | if hasattr(prov, '__name__'):
55 | providers.append(prov.__name__)
56 | # else:
57 | # if model.base_provider is not None:
58 | # providers.append(model.base_provider)
59 |
60 | providers.sort()
61 | return providers
62 |
63 | def get_provider_info(provider):
64 | if provider is None:
65 | return ''
66 |
67 | prov = getattr(g4f.Provider, provider)
68 | auth_str = '🔐' if prov.needs_auth else '🔓'
69 | working = '✅' if prov.working else '❌'
70 | info = f'## {prov.url} {working} {auth_str}\n{prov.params}'
71 | return info
72 |
73 |
74 |
75 |
76 |
77 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import codecs
2 | import os
3 |
4 | from setuptools import find_packages, setup
5 |
6 | here = os.path.abspath(os.path.dirname(__file__))
7 |
8 | with codecs.open(os.path.join(here, "README.md"), encoding="utf-8") as fh:
9 | long_description = "\n" + fh.read()
10 |
11 | with open("requirements.txt") as f:
12 | required = f.read().splitlines()
13 |
14 | with open("etc/interference/requirements.txt") as f:
15 | api_required = f.read().splitlines()
16 |
17 | VERSION = '0.1.5.8'
18 | DESCRIPTION = (
19 | "The official gpt4free repository | various collection of powerful language models"
20 | )
21 |
22 | # Setting up
23 | setup(
24 | name="g4f",
25 | version=VERSION,
26 | author="Tekky",
27 | author_email="",
28 | description=DESCRIPTION,
29 | long_description_content_type="text/markdown",
30 | long_description=long_description,
31 | packages=find_packages(),
32 | package_data={"g4f": ["g4f/gui/client/*", "g4f/gui/server/*"]},
33 | include_package_data=True,
34 | data_files=["etc/interference/app.py"],
35 | install_requires=required,
36 | extras_require={"api": api_required},
37 | entry_points={
38 | "console_scripts": ["g4f=interference.app:main"],
39 | },
40 | url="https://github.com/xtekky/gpt4free", # Link to your GitHub repository
41 | project_urls={
42 | "Source Code": "https://github.com/xtekky/gpt4free", # GitHub link
43 | "Bug Tracker": "https://github.com/xtekky/gpt4free/issues", # Link to issue tracker
44 | },
45 | keywords=[
46 | "python",
47 | "chatbot",
48 | "reverse-engineering",
49 | "openai",
50 | "chatbots",
51 | "gpt",
52 | "language-model",
53 | "gpt-3",
54 | "gpt3",
55 | "openai-api",
56 | "gpt-4",
57 | "gpt4",
58 | "chatgpt",
59 | "chatgpt-api",
60 | "openai-chatgpt",
61 | "chatgpt-free",
62 | "chatgpt-4",
63 | "chatgpt4",
64 | "chatgpt4-api",
65 | "free",
66 | "free-gpt",
67 | "gpt4free",
68 | "g4f",
69 | ],
70 | classifiers=[
71 | "Development Status :: 2 - Pre-Alpha",
72 | "Intended Audience :: Developers",
73 | "Programming Language :: Python :: 3",
74 | "Operating System :: Unix",
75 | "Operating System :: MacOS :: MacOS X",
76 | "Operating System :: Microsoft :: Windows",
77 | ],
78 | )
--------------------------------------------------------------------------------
/g4f/Provider/ChatBase.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from aiohttp import ClientSession
4 |
5 | from ..typing import AsyncResult, Messages
6 | from .base_provider import AsyncGeneratorProvider
7 |
8 |
9 | class ChatBase(AsyncGeneratorProvider):
10 | url = "https://www.chatbase.co"
11 | supports_gpt_35_turbo = True
12 | supports_gpt_4 = True
13 | working = True
14 |
15 | @classmethod
16 | async def create_async_generator(
17 | cls,
18 | model: str,
19 | messages: Messages,
20 | proxy: str = None,
21 | **kwargs
22 | ) -> AsyncResult:
23 | if model == "gpt-4":
24 | chat_id = "quran---tafseer-saadi-pdf-wbgknt7zn"
25 | elif model == "gpt-3.5-turbo" or not model:
26 | chat_id = "chatbase--1--pdf-p680fxvnm"
27 | else:
28 | raise ValueError(f"Model are not supported: {model}")
29 | headers = {
30 | "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
31 | "Accept" : "*/*",
32 | "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
33 | "Origin" : cls.url,
34 | "Referer" : cls.url + "/",
35 | "Sec-Fetch-Dest" : "empty",
36 | "Sec-Fetch-Mode" : "cors",
37 | "Sec-Fetch-Site" : "same-origin",
38 | }
39 | async with ClientSession(
40 | headers=headers
41 | ) as session:
42 | data = {
43 | "messages": messages,
44 | "captchaCode": "hadsa",
45 | "chatId": chat_id,
46 | "conversationId": f"kcXpqEnqUie3dnJlsRi_O-{chat_id}"
47 | }
48 | async with session.post("https://www.chatbase.co/api/fe/chat", json=data, proxy=proxy) as response:
49 | response.raise_for_status()
50 | async for stream in response.content.iter_any():
51 | yield stream.decode()
52 |
53 |
54 | @classmethod
55 | @property
56 | def params(cls):
57 | params = [
58 | ("model", "str"),
59 | ("messages", "list[dict[str, str]]"),
60 | ("stream", "bool"),
61 | ]
62 | param = ", ".join([": ".join(p) for p in params])
63 | return f"g4f.provider.{cls.__name__} supports: ({param})"
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/Wuguokai.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import random
4 |
5 | import requests
6 |
7 | from ...typing import Any, CreateResult
8 | from ..base_provider import BaseProvider, format_prompt
9 |
10 |
11 | class Wuguokai(BaseProvider):
12 | url = 'https://chat.wuguokai.xyz'
13 | supports_gpt_35_turbo = True
14 | working = False
15 |
16 | @staticmethod
17 | def create_completion(
18 | model: str,
19 | messages: list[dict[str, str]],
20 | stream: bool,
21 | **kwargs: Any,
22 | ) -> CreateResult:
23 | headers = {
24 | 'authority': 'ai-api.wuguokai.xyz',
25 | 'accept': 'application/json, text/plain, */*',
26 | 'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
27 | 'content-type': 'application/json',
28 | 'origin': 'https://chat.wuguokai.xyz',
29 | 'referer': 'https://chat.wuguokai.xyz/',
30 | 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
31 | 'sec-ch-ua-mobile': '?0',
32 | 'sec-ch-ua-platform': '"Windows"',
33 | 'sec-fetch-dest': 'empty',
34 | 'sec-fetch-mode': 'cors',
35 | 'sec-fetch-site': 'same-site',
36 | 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
37 | }
38 | data ={
39 | "prompt": format_prompt(messages),
40 | "options": {},
41 | "userId": f"#/chat/{random.randint(1,99999999)}",
42 | "usingContext": True
43 | }
44 | response = requests.post("https://ai-api20.wuguokai.xyz/api/chat-process", headers=headers, timeout=3, json=data, proxies=kwargs['proxy'] if 'proxy' in kwargs else {})
45 | _split = response.text.split("> 若回答失败请重试或多刷新几次界面后重试")
46 | if response.status_code == 200:
47 | if len(_split) > 1:
48 | yield _split[1].strip()
49 | else:
50 | yield _split[0].strip()
51 | else:
52 | raise Exception(f"Error: {response.status_code} {response.reason}")
53 |
54 | @classmethod
55 | @property
56 | def params(cls):
57 | params = [
58 | ("model", "str"),
59 | ("messages", "list[dict[str, str]]"),
60 | ("stream", "bool")
61 | ]
62 | param = ", ".join([": ".join(p) for p in params])
63 | return f"g4f.provider.{cls.__name__} supports: ({param})"
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/CodeLinkAva.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from aiohttp import ClientSession
4 | import json
5 |
6 | from ...typing import AsyncGenerator
7 | from ..base_provider import AsyncGeneratorProvider
8 |
9 |
10 | class CodeLinkAva(AsyncGeneratorProvider):
11 | url = "https://ava-ai-ef611.web.app"
12 | supports_gpt_35_turbo = True
13 | working = False
14 |
15 | @classmethod
16 | async def create_async_generator(
17 | cls,
18 | model: str,
19 | messages: list[dict[str, str]],
20 | **kwargs
21 | ) -> AsyncGenerator:
22 | headers = {
23 | "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
24 | "Accept" : "*/*",
25 | "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
26 | "Origin" : cls.url,
27 | "Referer" : cls.url + "/",
28 | "Sec-Fetch-Dest" : "empty",
29 | "Sec-Fetch-Mode" : "cors",
30 | "Sec-Fetch-Site" : "same-origin",
31 | }
32 | async with ClientSession(
33 | headers=headers
34 | ) as session:
35 | data = {
36 | "messages": messages,
37 | "temperature": 0.6,
38 | "stream": True,
39 | **kwargs
40 | }
41 | async with session.post("https://ava-alpha-api.codelink.io/api/chat", json=data) as response:
42 | response.raise_for_status()
43 | async for line in response.content:
44 | line = line.decode()
45 | if line.startswith("data: "):
46 | if line.startswith("data: [DONE]"):
47 | break
48 | line = json.loads(line[6:-1])
49 | content = line["choices"][0]["delta"].get("content")
50 | if content:
51 | yield content
52 |
53 |
54 | @classmethod
55 | @property
56 | def params(cls):
57 | params = [
58 | ("model", "str"),
59 | ("messages", "list[dict[str, str]]"),
60 | ("stream", "bool"),
61 | ("temperature", "float"),
62 | ]
63 | param = ", ".join([": ".join(p) for p in params])
64 | return f"g4f.provider.{cls.__name__} supports: ({param})"
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/V50.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import uuid
4 |
5 | import requests
6 |
7 | from ...typing import Any, CreateResult
8 | from ..base_provider import BaseProvider
9 |
10 |
11 | class V50(BaseProvider):
12 | url = 'https://p5.v50.ltd'
13 | supports_gpt_35_turbo = True
14 | supports_stream = False
15 | needs_auth = False
16 | working = False
17 |
18 | @staticmethod
19 | def create_completion(
20 | model: str,
21 | messages: list[dict[str, str]],
22 | stream: bool, **kwargs: Any) -> CreateResult:
23 |
24 | conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
25 | conversation += "\nassistant: "
26 |
27 | payload = {
28 | "prompt" : conversation,
29 | "options" : {},
30 | "systemMessage" : ".",
31 | "temperature" : kwargs.get("temperature", 0.4),
32 | "top_p" : kwargs.get("top_p", 0.4),
33 | "model" : model,
34 | "user" : str(uuid.uuid4())
35 | }
36 |
37 | headers = {
38 | 'authority' : 'p5.v50.ltd',
39 | 'accept' : 'application/json, text/plain, */*',
40 | 'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
41 | 'content-type' : 'application/json',
42 | 'origin' : 'https://p5.v50.ltd',
43 | 'referer' : 'https://p5.v50.ltd/',
44 | 'sec-ch-ua-platform': '"Windows"',
45 | 'sec-fetch-dest' : 'empty',
46 | 'sec-fetch-mode' : 'cors',
47 | 'sec-fetch-site' : 'same-origin',
48 | 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'
49 | }
50 | response = requests.post("https://p5.v50.ltd/api/chat-process",
51 | json=payload, headers=headers, proxies=kwargs['proxy'] if 'proxy' in kwargs else {})
52 |
53 | if "https://fk1.v50.ltd" not in response.text:
54 | yield response.text
55 |
56 | @classmethod
57 | @property
58 | def params(cls):
59 | params = [
60 | ("model", "str"),
61 | ("messages", "list[dict[str, str]]"),
62 | ("stream", "bool"),
63 | ("temperature", "float"),
64 | ("top_p", "int"),
65 | ]
66 | param = ", ".join([": ".join(p) for p in params])
67 | return f"g4f.provider.{cls.__name__} supports: ({param})"
--------------------------------------------------------------------------------
/g4f/Provider/Vitalentum.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 | from aiohttp import ClientSession
5 |
6 | from .base_provider import AsyncGeneratorProvider
7 | from ..typing import AsyncResult, Messages
8 |
9 | class Vitalentum(AsyncGeneratorProvider):
10 | url = "https://app.vitalentum.io"
11 | working = True
12 | supports_gpt_35_turbo = True
13 |
14 |
15 | @classmethod
16 | async def create_async_generator(
17 | cls,
18 | model: str,
19 | messages: Messages,
20 | proxy: str = None,
21 | **kwargs
22 | ) -> AsyncResult:
23 | headers = {
24 | "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
25 | "Accept" : "text/event-stream",
26 | "Accept-language" : "de,en-US;q=0.7,en;q=0.3",
27 | "Origin" : cls.url,
28 | "Referer" : cls.url + "/",
29 | "Sec-Fetch-Dest" : "empty",
30 | "Sec-Fetch-Mode" : "cors",
31 | "Sec-Fetch-Site" : "same-origin",
32 | }
33 | conversation = json.dumps({"history": [{
34 | "speaker": "human" if message["role"] == "user" else "bot",
35 | "text": message["content"],
36 | } for message in messages]})
37 | data = {
38 | "conversation": conversation,
39 | "temperature": 0.7,
40 | **kwargs
41 | }
42 | async with ClientSession(
43 | headers=headers
44 | ) as session:
45 | async with session.post(f"{cls.url}/api/converse-edge", json=data, proxy=proxy) as response:
46 | response.raise_for_status()
47 | async for line in response.content:
48 | line = line.decode()
49 | if line.startswith("data: "):
50 | if line.startswith("data: [DONE]"):
51 | break
52 | line = json.loads(line[6:-1])
53 | content = line["choices"][0]["delta"].get("content")
54 | if content:
55 | yield content
56 |
57 |
58 | @classmethod
59 | @property
60 | def params(cls):
61 | params = [
62 | ("model", "str"),
63 | ("messages", "list[dict[str, str]]"),
64 | ("stream", "bool"),
65 | ("proxy", "str"),
66 | ("temperature", "float"),
67 | ]
68 | param = ", ".join([": ".join(p) for p in params])
69 | return f"g4f.provider.{cls.__name__} supports: ({param})"
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/Wewordle.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import random, string, time
4 | from aiohttp import ClientSession
5 |
6 | from ..base_provider import AsyncProvider
7 |
8 |
9 | class Wewordle(AsyncProvider):
10 | url = "https://wewordle.org"
11 | working = False
12 | supports_gpt_35_turbo = True
13 |
14 | @classmethod
15 | async def create_async(
16 | cls,
17 | model: str,
18 | messages: list[dict[str, str]],
19 | proxy: str = None,
20 | **kwargs
21 | ) -> str:
22 |
23 | headers = {
24 | "accept" : "*/*",
25 | "pragma" : "no-cache",
26 | "Content-Type" : "application/json",
27 | "Connection" : "keep-alive"
28 | }
29 |
30 | _user_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=16))
31 | _app_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=31))
32 | _request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime())
33 | data = {
34 | "user" : _user_id,
35 | "messages" : messages,
36 | "subscriber": {
37 | "originalPurchaseDate" : None,
38 | "originalApplicationVersion" : None,
39 | "allPurchaseDatesMillis" : {},
40 | "entitlements" : {"active": {}, "all": {}},
41 | "allPurchaseDates" : {},
42 | "allExpirationDatesMillis" : {},
43 | "allExpirationDates" : {},
44 | "originalAppUserId" : f"$RCAnonymousID:{_app_id}",
45 | "latestExpirationDate" : None,
46 | "requestDate" : _request_date,
47 | "latestExpirationDateMillis" : None,
48 | "nonSubscriptionTransactions" : [],
49 | "originalPurchaseDateMillis" : None,
50 | "managementURL" : None,
51 | "allPurchasedProductIdentifiers": [],
52 | "firstSeen" : _request_date,
53 | "activeSubscriptions" : [],
54 | }
55 | }
56 |
57 |
58 | async with ClientSession(
59 | headers=headers
60 | ) as session:
61 | async with session.post(f"{cls.url}/gptapi/v1/android/turbo", proxy=proxy, json=data) as response:
62 | response.raise_for_status()
63 | content = (await response.json())["message"]["content"]
64 | if content:
65 | return content
--------------------------------------------------------------------------------
/g4f/Provider/needs_auth/Raycast.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 |
5 | import requests
6 |
7 | from ...typing import CreateResult, Messages
8 | from ..base_provider import BaseProvider
9 |
10 |
11 | class Raycast(BaseProvider):
12 | url = "https://raycast.com"
13 | supports_gpt_35_turbo = True
14 | supports_gpt_4 = True
15 | supports_stream = True
16 | needs_auth = True
17 | working = True
18 |
19 | @staticmethod
20 | def create_completion(
21 | model: str,
22 | messages: Messages,
23 | stream: bool,
24 | proxy: str = None,
25 | **kwargs,
26 | ) -> CreateResult:
27 | auth = kwargs.get('auth')
28 | headers = {
29 | 'Accept': 'application/json',
30 | 'Accept-Language': 'en-US,en;q=0.9',
31 | 'Authorization': f'Bearer {auth}',
32 | 'Content-Type': 'application/json',
33 | 'User-Agent': 'Raycast/0 CFNetwork/1410.0.3 Darwin/22.6.0',
34 | }
35 | parsed_messages = []
36 | for message in messages:
37 | parsed_messages.append({
38 | 'author': message['role'],
39 | 'content': {'text': message['content']}
40 | })
41 | data = {
42 | "debug": False,
43 | "locale": "en-CN",
44 | "messages": parsed_messages,
45 | "model": model,
46 | "provider": "openai",
47 | "source": "ai_chat",
48 | "system_instruction": "markdown",
49 | "temperature": 0.5
50 | }
51 | response = requests.post(
52 | "https://backend.raycast.com/api/v1/ai/chat_completions",
53 | headers=headers,
54 | json=data,
55 | stream=True,
56 | proxies={"https": proxy}
57 | )
58 | for token in response.iter_lines():
59 | if b'data: ' not in token:
60 | continue
61 | completion_chunk = json.loads(token.decode().replace('data: ', ''))
62 | token = completion_chunk['text']
63 | if token != None:
64 | yield token
65 |
66 | @classmethod
67 | @property
68 | def params(cls):
69 | params = [
70 | ("model", "str"),
71 | ("messages", "list[dict[str, str]]"),
72 | ("stream", "bool"),
73 | ("temperature", "float"),
74 | ("top_p", "int"),
75 | ("model", "str"),
76 | ("auth", "str"),
77 | ]
78 | param = ", ".join([": ".join(p) for p in params])
79 | return f"g4f.provider.{cls.__name__} supports: ({param})"
80 |
--------------------------------------------------------------------------------
/g4f/Provider/Ylokh.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 |
5 | from ..requests import StreamSession
6 | from .base_provider import AsyncGeneratorProvider
7 | from ..typing import AsyncResult, Messages
8 |
9 | class Ylokh(AsyncGeneratorProvider):
10 | url = "https://chat.ylokh.xyz"
11 | working = True
12 | supports_gpt_35_turbo = True
13 |
14 |
15 | @classmethod
16 | async def create_async_generator(
17 | cls,
18 | model: str,
19 | messages: Messages,
20 | stream: bool = True,
21 | proxy: str = None,
22 | timeout: int = 120,
23 | **kwargs
24 | ) -> AsyncResult:
25 | model = model if model else "gpt-3.5-turbo"
26 | headers = {
27 | "Origin" : cls.url,
28 | "Referer": cls.url + "/",
29 | }
30 | data = {
31 | "messages": messages,
32 | "model": model,
33 | "temperature": 1,
34 | "presence_penalty": 0,
35 | "top_p": 1,
36 | "frequency_penalty": 0,
37 | "allow_fallback": True,
38 | "stream": stream,
39 | **kwargs
40 | }
41 | async with StreamSession(
42 | headers=headers,
43 | proxies={"https": proxy},
44 | timeout=timeout
45 | ) as session:
46 | async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data) as response:
47 | response.raise_for_status()
48 | if stream:
49 | async for line in response.iter_lines():
50 | line = line.decode()
51 | if line.startswith("data: "):
52 | if line.startswith("data: [DONE]"):
53 | break
54 | line = json.loads(line[6:])
55 | content = line["choices"][0]["delta"].get("content")
56 | if content:
57 | yield content
58 | else:
59 | chat = await response.json()
60 | yield chat["choices"][0]["message"].get("content")
61 |
62 |
63 |
64 | @classmethod
65 | @property
66 | def params(cls):
67 | params = [
68 | ("model", "str"),
69 | ("messages", "list[dict[str, str]]"),
70 | ("stream", "bool"),
71 | ("proxy", "str"),
72 | ("timeout", "int"),
73 | ("temperature", "float"),
74 | ("top_p", "float"),
75 | ]
76 | param = ", ".join([": ".join(p) for p in params])
77 | return f"g4f.provider.{cls.__name__} supports: ({param})"
--------------------------------------------------------------------------------
/g4f/Provider/helper.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import asyncio
4 | import sys
5 | from asyncio import AbstractEventLoop
6 | from os import path
7 | from ..typing import Dict, List, Messages
8 | import browser_cookie3
9 |
10 | # Change event loop policy on windows
11 | if sys.platform == 'win32':
12 | if isinstance(
13 | asyncio.get_event_loop_policy(), asyncio.WindowsProactorEventLoopPolicy
14 | ):
15 | asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
16 |
17 | # Local Cookie Storage
18 | _cookies: Dict[str, Dict[str, str]] = {}
19 |
20 | # If event loop is already running, handle nested event loops
21 | # If "nest_asyncio" is installed, patch the event loop.
22 | def get_event_loop() -> AbstractEventLoop:
23 | try:
24 | asyncio.get_running_loop()
25 | except RuntimeError:
26 | try:
27 | return asyncio.get_event_loop()
28 | except RuntimeError:
29 | asyncio.set_event_loop(asyncio.new_event_loop())
30 | return asyncio.get_event_loop()
31 | try:
32 | event_loop = asyncio.get_event_loop()
33 | if not hasattr(event_loop.__class__, "_nest_patched"):
34 | import nest_asyncio
35 | nest_asyncio.apply(event_loop)
36 | return event_loop
37 | except ImportError:
38 | raise RuntimeError(
39 | 'Use "create_async" instead of "create" function in a running event loop. Or install the "nest_asyncio" package.'
40 | )
41 |
42 |
43 | # Load cookies for a domain from all supported browsers.
44 | # Cache the results in the "_cookies" variable.
45 | def get_cookies(cookie_domain: str) -> Dict[str, str]:
46 | if cookie_domain not in _cookies:
47 | _cookies[cookie_domain] = {}
48 | try:
49 | for cookie in browser_cookie3.load(cookie_domain):
50 | _cookies[cookie_domain][cookie.name] = cookie.value
51 | except:
52 | pass
53 | return _cookies[cookie_domain]
54 |
55 |
56 | def format_prompt(messages: Messages, add_special_tokens=False) -> str:
57 | if add_special_tokens or len(messages) > 1:
58 | formatted = "\n".join(
59 | [
60 | "%s: %s" % ((message["role"]).capitalize(), message["content"])
61 | for message in messages
62 | ]
63 | )
64 | return f"{formatted}\nAssistant:"
65 | else:
66 | return messages[0]["content"]
67 |
68 |
69 | def get_browser(user_data_dir: str = None):
70 | from undetected_chromedriver import Chrome
71 | from platformdirs import user_config_dir
72 |
73 | if not user_data_dir:
74 | user_data_dir = user_config_dir("g4f")
75 | user_data_dir = path.join(user_data_dir, "Default")
76 |
77 | return Chrome(user_data_dir=user_data_dir)
--------------------------------------------------------------------------------
/g4f/Provider/ChatgptDemo.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import time, json, re
4 | from aiohttp import ClientSession
5 |
6 | from ..typing import AsyncResult, Messages
7 | from .base_provider import AsyncGeneratorProvider
8 | from .helper import format_prompt
9 |
10 | class ChatgptDemo(AsyncGeneratorProvider):
11 | url = "https://chat.chatgptdemo.net"
12 | supports_gpt_35_turbo = True
13 | working = True
14 |
15 | @classmethod
16 | async def create_async_generator(
17 | cls,
18 | model: str,
19 | messages: Messages,
20 | proxy: str = None,
21 | **kwargs
22 | ) -> AsyncResult:
23 | headers = {
24 | "authority": "chat.chatgptdemo.net",
25 | "accept-language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US",
26 | "origin": "https://chat.chatgptdemo.net",
27 | "referer": "https://chat.chatgptdemo.net/",
28 | "sec-ch-ua": '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
29 | "sec-ch-ua-mobile": "?0",
30 | "sec-ch-ua-platform": '"Linux"',
31 | "sec-fetch-dest": "empty",
32 | "sec-fetch-mode": "cors",
33 | "sec-fetch-site": "same-origin",
34 | "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
35 | }
36 | async with ClientSession(headers=headers) as session:
37 | async with session.get(f"{cls.url}/", proxy=proxy) as response:
38 | response.raise_for_status()
39 | response = await response.text()
40 | result = re.search(r'