├── .gitattributes
├── .github
├── FUNDING.yml
├── ISSUE_TEMPLATE
│ ├── default_issue.md
│ └── feature_request.md
└── workflows
│ ├── close-inactive-issues.yml
│ ├── copilot.yml
│ ├── publish-to-pypi.yml
│ ├── publish-workflow.yaml
│ └── unittest.yml
├── .gitignore
├── .gitpod.yml
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── LEGAL_NOTICE.md
├── LICENSE
├── MANIFEST.in
├── README.md
├── SECURITY.md
├── docker-compose.yml
├── docker
├── Dockerfile
├── background.png
├── supervisor-gui.conf
└── supervisor.conf
├── docs
├── async_client.md
├── cat.jpeg
├── cat.webp
├── client.md
├── docker.md
├── git.md
├── guides
│ ├── create_provider.md
│ ├── help_me.md
│ ├── phone.md
│ ├── phone.png
│ └── phone2.jpeg
├── interference.md
├── legacy.md
├── requirements.md
├── waterfall.jpeg
└── webview.md
├── etc
├── examples
│ ├── api.py
│ ├── image_api.py
│ ├── image_chat_reka.py
│ └── openaichat.py
├── testing
│ ├── _providers.py
│ ├── log_time.py
│ ├── test_all.py
│ ├── test_api.py
│ ├── test_async.py
│ ├── test_chat_completion.py
│ ├── test_gui.py
│ ├── test_interference.py
│ ├── test_needs_auth.py
│ └── test_providers.py
├── tool
│ ├── contributers.py
│ ├── copilot.py
│ ├── create_provider.py
│ ├── improve_code.py
│ ├── provider_init.py
│ ├── readme_table.py
│ ├── translate_readme.py
│ └── vercel.py
└── unittest
│ ├── __main__.py
│ ├── async_client.py
│ ├── asyncio.py
│ ├── backend.py
│ ├── client.py
│ ├── include.py
│ ├── integration.py
│ ├── main.py
│ ├── mocks.py
│ └── model.py
├── g4f
├── Provider
│ ├── Aichatos.py
│ ├── Aura.py
│ ├── Bing.py
│ ├── BingCreateImages.py
│ ├── Blackbox.py
│ ├── ChatForAi.py
│ ├── Chatgpt4Online.py
│ ├── ChatgptAi.py
│ ├── ChatgptFree.py
│ ├── ChatgptNext.py
│ ├── ChatgptX.py
│ ├── Cnote.py
│ ├── Cohere.py
│ ├── DeepInfra.py
│ ├── DeepInfraImage.py
│ ├── Feedough.py
│ ├── FlowGpt.py
│ ├── FreeChatgpt.py
│ ├── FreeGpt.py
│ ├── GeminiPro.py
│ ├── GeminiProChat.py
│ ├── GigaChat.py
│ ├── GptTalkRu.py
│ ├── HuggingChat.py
│ ├── HuggingFace.py
│ ├── Koala.py
│ ├── Liaobots.py
│ ├── Llama.py
│ ├── Local.py
│ ├── MetaAI.py
│ ├── MetaAIAccount.py
│ ├── Ollama.py
│ ├── PerplexityLabs.py
│ ├── Pi.py
│ ├── Pizzagpt.py
│ ├── Reka.py
│ ├── Replicate.py
│ ├── ReplicateImage.py
│ ├── Vercel.py
│ ├── WhiteRabbitNeo.py
│ ├── You.py
│ ├── __init__.py
│ ├── base_provider.py
│ ├── bing
│ │ ├── __init__.py
│ │ ├── conversation.py
│ │ ├── create_images.py
│ │ └── upload_image.py
│ ├── deprecated
│ │ ├── Acytoo.py
│ │ ├── AiAsk.py
│ │ ├── AiChatOnline.py
│ │ ├── AiService.py
│ │ ├── Aibn.py
│ │ ├── Aichat.py
│ │ ├── Ails.py
│ │ ├── Aivvm.py
│ │ ├── Berlin.py
│ │ ├── ChatAnywhere.py
│ │ ├── ChatgptDuo.py
│ │ ├── CodeLinkAva.py
│ │ ├── Cromicle.py
│ │ ├── DfeHub.py
│ │ ├── EasyChat.py
│ │ ├── Equing.py
│ │ ├── FakeGpt.py
│ │ ├── FastGpt.py
│ │ ├── Forefront.py
│ │ ├── GPTalk.py
│ │ ├── GeekGpt.py
│ │ ├── GetGpt.py
│ │ ├── H2o.py
│ │ ├── Hashnode.py
│ │ ├── Lockchat.py
│ │ ├── Myshell.py
│ │ ├── NoowAi.py
│ │ ├── Opchatgpts.py
│ │ ├── OpenAssistant.py
│ │ ├── Phind.py
│ │ ├── V50.py
│ │ ├── Vercel.py
│ │ ├── Vitalentum.py
│ │ ├── VoiGpt.py
│ │ ├── Wewordle.py
│ │ ├── Wuguokai.py
│ │ ├── Ylokh.py
│ │ ├── Yqcloud.py
│ │ └── __init__.py
│ ├── gigachat_crt
│ │ └── russian_trusted_root_ca_pem.crt
│ ├── helper.py
│ ├── needs_auth
│ │ ├── Gemini.py
│ │ ├── Groq.py
│ │ ├── OpenRouter.py
│ │ ├── Openai.py
│ │ ├── OpenaiAccount.py
│ │ ├── OpenaiChat.py
│ │ ├── PerplexityApi.py
│ │ ├── Poe.py
│ │ ├── Raycast.py
│ │ ├── Theb.py
│ │ ├── ThebApi.py
│ │ └── __init__.py
│ ├── not_working
│ │ ├── AItianhu.py
│ │ ├── Bestim.py
│ │ ├── ChatBase.py
│ │ ├── ChatgptDemo.py
│ │ ├── ChatgptDemoAi.py
│ │ ├── ChatgptLogin.py
│ │ ├── Chatxyz.py
│ │ ├── Gpt6.py
│ │ ├── GptChatly.py
│ │ ├── GptForLove.py
│ │ ├── GptGo.py
│ │ ├── GptGod.py
│ │ ├── OnlineGpt.py
│ │ └── __init__.py
│ ├── npm
│ │ ├── node_modules
│ │ │ ├── .package-lock.json
│ │ │ └── crypto-js
│ │ │ │ ├── README.md
│ │ │ │ └── crypto-js.js
│ │ ├── package-lock.json
│ │ └── package.json
│ ├── openai
│ │ ├── __init__.py
│ │ ├── crypt.py
│ │ ├── har_file.py
│ │ └── proofofwork.py
│ ├── selenium
│ │ ├── AItianhuSpace.py
│ │ ├── Bard.py
│ │ ├── MyShell.py
│ │ ├── PerplexityAi.py
│ │ ├── Phind.py
│ │ ├── TalkAi.py
│ │ └── __init__.py
│ ├── unfinished
│ │ ├── AiChatting.py
│ │ ├── ChatAiGpt.py
│ │ ├── Komo.py
│ │ ├── MikuChat.py
│ │ └── __init__.py
│ └── you
│ │ ├── __init__.py
│ │ └── har_file.py
├── __init__.py
├── api
│ ├── __init__.py
│ ├── _logging.py
│ ├── _tokenizer.py
│ └── run.py
├── cli.py
├── client
│ ├── __init__.py
│ ├── async_client.py
│ ├── client.py
│ ├── helper.py
│ ├── image_models.py
│ ├── service.py
│ ├── stubs.py
│ └── types.py
├── cookies.py
├── debug.py
├── errors.py
├── gui
│ ├── __init__.py
│ ├── client
│ │ ├── index.html
│ │ └── static
│ │ │ ├── css
│ │ │ ├── dracula.min.css
│ │ │ └── style.css
│ │ │ ├── img
│ │ │ ├── android-chrome-192x192.png
│ │ │ ├── android-chrome-512x512.png
│ │ │ ├── apple-touch-icon.png
│ │ │ ├── favicon-16x16.png
│ │ │ ├── favicon-32x32.png
│ │ │ ├── gpt.png
│ │ │ ├── site.webmanifest
│ │ │ └── user.png
│ │ │ └── js
│ │ │ ├── chat.v1.js
│ │ │ ├── highlight.min.js
│ │ │ ├── highlightjs-copy.min.js
│ │ │ ├── icons.js
│ │ │ └── text_to_speech
│ │ │ ├── 630.index.js
│ │ │ ├── 900.index.js
│ │ │ └── index.js
│ ├── gui_parser.py
│ ├── run.py
│ ├── server
│ │ ├── __init__.py
│ │ ├── android_gallery.py
│ │ ├── api.py
│ │ ├── app.py
│ │ ├── backend.py
│ │ ├── config.py
│ │ ├── internet.py
│ │ ├── js_api.py
│ │ └── website.py
│ └── webview.py
├── image.py
├── local
│ └── __init__.py
├── locals
│ ├── __init__.py
│ ├── models.py
│ └── provider.py
├── models.py
├── providers
│ ├── __init__.py
│ ├── base_provider.py
│ ├── conversation.py
│ ├── create_images.py
│ ├── helper.py
│ ├── retry_provider.py
│ └── types.py
├── requests
│ ├── __init__.py
│ ├── aiohttp.py
│ ├── curl_cffi.py
│ ├── defaults.py
│ └── raise_for_status.py
├── stubs.py
├── typing.py
├── version.py
└── webdriver.py
├── generated_images
└── .gitkeep
├── har_and_cookies
└── .gitkeep
├── models
└── .local-model-here
├── projects
├── android
│ └── buildozer.spec
├── text_to_speech
│ ├── README.md
│ ├── constants.js
│ ├── index.js
│ ├── package-lock.json
│ ├── package.json
│ ├── utils.js
│ ├── webpack.config.js
│ └── worker.js
└── windows
│ ├── copy.sh
│ ├── docker-compose.yml
│ ├── icon.ico
│ ├── main.py
│ └── main.spec
├── requirements-min.txt
├── requirements.txt
└── setup.py
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | ko_fi: xtekky
2 | github: [xtekky, hlohaus]
3 | patreon: xtekky
4 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/default_issue.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: New Issue
3 | about: 'Please use this template !!'
4 | title: ''
5 | labels: bug
6 | assignees: xtekky
7 |
8 | ---
9 |
10 | **Known Issues** // delete this
11 | - you.com issue / fix: use proxy, or vpn, your country is probably flagged
12 | - forefront account creation error / use your own session or wait for fix
13 |
14 |
15 | **Bug description**
16 | What did you do, what happened, which file did you try to run, in which directory
17 | Describe what you did after downloading repo, such as moving to this repo, running this file.
18 |
19 | ex.
20 | 1. Go to '...'
21 | 2. Click on '....'
22 | 3. Scroll down to '....'
23 | 4. See error
24 |
25 | **Screenshots**
26 | If applicable, add screenshots to help explain your problem.
27 |
28 | **Environment**
29 | - python version
30 | - location ( are you in a cloudfare flagged country ) ?
31 |
32 | **Additional context**
33 | Add any other context about the problem here.
34 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/.github/workflows/close-inactive-issues.yml:
--------------------------------------------------------------------------------
1 | name: Close inactive issues
2 |
3 | on:
4 | schedule:
5 | - cron: "5 0 * * *"
6 |
7 | jobs:
8 | close-issues:
9 | runs-on: ubuntu-latest
10 | permissions:
11 | issues: write
12 | pull-requests: write
13 | steps:
14 | - uses: actions/stale@v5
15 | with:
16 | days-before-issue-stale: 7
17 | days-before-issue-close: 7
18 |
19 | days-before-pr-stale: 7
20 | days-before-pr-close: 7
21 |
22 | stale-issue-label: "stale"
23 | stale-pr-label: "stale"
24 |
25 | stale-issue-message: "Bumping this issue because it has been open for 7 days with no activity. Closing automatically in 7 days unless it becomes active again."
26 | close-issue-message: "Closing due to inactivity."
27 |
28 | stale-pr-message: "Bumping this pull request because it has been open for 7 days with no activity. Closing automatically in 7 days unless it becomes active again."
29 | close-pr-message: "Closing due to inactivity."
30 |
31 | repo-token: ${{ secrets.GITHUB_TOKEN }}
32 |
--------------------------------------------------------------------------------
/.github/workflows/copilot.yml:
--------------------------------------------------------------------------------
1 | name: AI Code Reviewer
2 |
3 | on:
4 | workflow_run:
5 | workflows: ["Unittest"]
6 | types:
7 | - completed
8 |
9 | jobs:
10 | review:
11 | runs-on: ubuntu-latest
12 | permissions:
13 | contents: read
14 | pull-requests: write
15 | steps:
16 | - name: Checkout Repo
17 | uses: actions/checkout@v3
18 | - name: 'Download artifact'
19 | uses: actions/github-script@v6
20 | with:
21 | script: |
22 | let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({
23 | owner: context.repo.owner,
24 | repo: context.repo.repo,
25 | run_id: context.payload.workflow_run.id,
26 | });
27 | let matchArtifact = allArtifacts.data.artifacts.filter((artifact) => {
28 | return artifact.name == "pr_number"
29 | })[0];
30 | let download = await github.rest.actions.downloadArtifact({
31 | owner: context.repo.owner,
32 | repo: context.repo.repo,
33 | artifact_id: matchArtifact.id,
34 | archive_format: 'zip',
35 | });
36 | let fs = require('fs');
37 | fs.writeFileSync(`${process.env.GITHUB_WORKSPACE}/pr_number.zip`, Buffer.from(download.data));
38 | - name: 'Unzip artifact'
39 | run: unzip pr_number.zip
40 | - name: Setup Python
41 | uses: actions/setup-python@v4
42 | with:
43 | python-version: "3.x"
44 | cache: 'pip'
45 | - name: Install Requirements
46 | run: |
47 | pip install -r requirements.txt
48 | pip install PyGithub
49 | - name: AI Code Review
50 | env:
51 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
52 | GITHUB_REPOSITORY: ${{ github.repository }}
53 | run: python -m etc.tool.copilot
54 |
--------------------------------------------------------------------------------
/.github/workflows/publish-to-pypi.yml:
--------------------------------------------------------------------------------
1 | name: Publish Python 🐍 distribution 📦 to PyPI
2 |
3 | on: push
4 |
5 | env:
6 | G4F_VERSION: ${{ github.ref_name }}
7 |
8 | jobs:
9 | build:
10 | name: Build distribution 📦
11 | if: startsWith(github.ref, 'refs/tags/')
12 | runs-on: ubuntu-latest
13 | steps:
14 | - uses: actions/checkout@v4
15 | - name: Set up Python
16 | uses: actions/setup-python@v4
17 | with:
18 | python-version: "3.x"
19 | - name: Install pypa/build
20 | run: >-
21 | python3 -m
22 | pip install
23 | build
24 | --user
25 | - name: Build a binary wheel and a source tarball
26 | run: python3 -m build
27 | - name: Store the distribution packages
28 | uses: actions/upload-artifact@v3
29 | with:
30 | name: python-package-distributions
31 | path: dist/
32 |
33 | publish-to-pypi:
34 | name: >-
35 | Publish distribution on PyPI 🐍
36 | if: startsWith(github.ref, 'refs/tags/')
37 | needs:
38 | - build
39 | runs-on: ubuntu-latest
40 | environment:
41 | name: pypi
42 | url: https://pypi.org/p/g4f
43 | permissions:
44 | id-token: write
45 | steps:
46 | - name: Download all the dists
47 | uses: actions/download-artifact@v3
48 | with:
49 | name: python-package-distributions
50 | path: dist/
51 | - name: Publish distribution 📦 to PyPI
52 | uses: pypa/gh-action-pypi-publish@release/v1
--------------------------------------------------------------------------------
/.github/workflows/publish-workflow.yaml:
--------------------------------------------------------------------------------
1 | name: Publish Docker image
2 |
3 | on:
4 | push:
5 | tags:
6 | - '**'
7 |
8 | jobs:
9 | publish:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - name: Checkout repository
13 | uses: actions/checkout@v4
14 | - name: Set up QEMU
15 | uses: docker/setup-qemu-action@v3
16 | - name: Set up Docker Buildx
17 | uses: docker/setup-buildx-action@v3
18 |
19 | - name: Get metadata for Docker
20 | id: metadata
21 | uses: docker/metadata-action@v5
22 | with:
23 | images: |
24 | hlohaus789/g4f
25 | ghcr.io/${{ github.repository }}
26 |
27 | - name: Log in to Docker Hub
28 | uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a
29 | with:
30 | username: ${{ secrets.DOCKER_USERNAME }}
31 | password: ${{ secrets.DOCKER_PASSWORD }}
32 |
33 | - name: Login to GitHub Container Registry
34 | uses: docker/login-action@v3
35 | with:
36 | registry: ghcr.io
37 | username: ${{ github.repository_owner }}
38 | password: ${{ secrets.GHCR_PAT }}
39 |
40 | - name: Build and push image
41 | uses: docker/build-push-action@v5
42 | with:
43 | context: .
44 | file: docker/Dockerfile
45 | platforms: linux/amd64,linux/arm64
46 | push: true
47 | tags: ${{ steps.metadata.outputs.tags }}
48 | labels: ${{ steps.metadata.outputs.labels }}
49 | build-args: |
50 | G4F_VERSION=${{ github.ref_name }}
51 |
--------------------------------------------------------------------------------
/.github/workflows/unittest.yml:
--------------------------------------------------------------------------------
1 | name: Unittest
2 |
3 | on:
4 | pull_request:
5 | types:
6 | - opened
7 | - synchronize
8 | push:
9 | branches:
10 | - 'main'
11 |
12 | jobs:
13 | build:
14 | name: Build unittest
15 | runs-on: ubuntu-latest
16 | steps:
17 | - uses: actions/checkout@v4
18 | - name: Set up Python 3.8
19 | uses: actions/setup-python@v4
20 | with:
21 | python-version: "3.8"
22 | cache: 'pip'
23 | - name: Install min requirements
24 | run: pip install -r requirements-min.txt
25 | - name: Run tests
26 | run: python -m etc.unittest
27 | - name: Set up Python 3.12
28 | uses: actions/setup-python@v4
29 | with:
30 | python-version: "3.12"
31 | cache: 'pip'
32 | - name: Install requirements
33 | run: |
34 | pip install -r requirements.txt
35 | pip uninstall -y nodriver
36 | - name: Run tests
37 | run: python -m etc.unittest
38 | - name: Save PR number
39 | env:
40 | PR_NUMBER: ${{ github.event.number }}
41 | run: |
42 | mkdir -p ./pr
43 | echo $PR_NUMBER > ./pr/pr_number
44 | - uses: actions/upload-artifact@v4
45 | with:
46 | name: pr_number
47 | path: pr/
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Editor-based HTTP Client requests
5 | /httpRequests/
6 | # Datasource local storage ignored files
7 | /dataSources/
8 | /dataSources.local.xml
9 |
10 | # Ignore local python virtual environment
11 | venv/
12 |
13 | # Ignore streamlit_chat_app.py conversations pickle
14 | conversations.pkl
15 | *.pkl
16 |
17 | # Ignore accounts created by api's
18 | accounts.txt
19 |
20 | .idea/
21 | **/__pycache__/
22 | __pycache__/
23 |
24 | dist/
25 | *.log
26 | *.pyc
27 | *.egg-info/
28 | *.egg
29 | *.egg-info
30 | build
31 |
32 | test.py
33 | update.py
34 | cookie.json
35 | notes.txt
36 | close_issues.py
37 | xxx.py
38 | lab.py
39 | lab.js
40 | bing.py
41 | bing2.py
42 | .DS_Store
43 | lab/*
44 | lab
45 | tstt.py
46 | providerstest.py
47 | prv.py
48 | # Emacs crap
49 | *~
50 | x.js
51 | x.py
52 | info.txt
53 | local.py
54 | *.gguf
55 | image.py
56 | .buildozer
57 | hardir
58 | har_and_cookies
59 | node_modules
60 | models
61 | projects/windows/g4f
62 | doc.txt
63 | dist.py
64 | x.txt
65 | bench.py
66 | to-reverse.txt
67 | g4f/Provider/OpenaiChat2.py
68 | generated_images/
--------------------------------------------------------------------------------
/.gitpod.yml:
--------------------------------------------------------------------------------
1 | # Please adjust to your needs (see https://www.gitpod.io/docs/introduction/learn-gitpod/gitpod-yaml)
2 | # and commit this file to your remote git repository to share the goodness with others.
3 |
4 | # Learn more from ready-to-use templates: https://www.gitpod.io/docs/introduction/getting-started/quickstart
5 |
6 | tasks:
7 | - init: pip install -r requirements.txt
8 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | ### Please, follow these steps to contribute:
4 | 1. Reverse a website from this list: [sites-to-reverse](https://github.com/xtekky/gpt4free/issues/40)
5 | 2. Add it to [./testing](https://github.com/xtekky/gpt4free/tree/main/testing)
6 | 3. Refactor it and add it to [./g4f](https://github.com/xtekky/gpt4free/tree/main/g4f)
7 |
8 | ### We will be grateful to see you as a contributor!
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | recursive-include g4f/gui/server *
2 | recursive-include g4f/gui/client *
3 | recursive-include g4f/Provider/npm *
4 | recursive-include g4f/Provider/gigachat_crt *
5 | recursive-include g4f/Provider/you *
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | ## Reporting a Vulnerability
2 |
3 | Reporting a Vulnerability
4 | Please report (suspected) security vulnerabilities to https://t.me/xtekky. You will receive a response within 48 hours. If the issue is confirmed, we will release a patch as soon as possible depending on complexity but historically within a few days.
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 |
3 | services:
4 | gpt4free:
5 | image: hlohaus789/g4f:latest
6 | shm_size: 2gb
7 | build:
8 | context: .
9 | dockerfile: docker/Dockerfile
10 | volumes:
11 | - .:/app
12 | ports:
13 | - '8080:8080'
14 | - '1337:1337'
15 | - '7900:7900'
--------------------------------------------------------------------------------
/docker/background.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meowmurmur/gpt4free/fea9e7a198fd21aa22c9ced90977c941c4ceffb6/docker/background.png
--------------------------------------------------------------------------------
/docker/supervisor-gui.conf:
--------------------------------------------------------------------------------
1 | [program:g4f-gui]
2 | priority=15
3 | command=python -m g4f.cli gui
4 | directory=/app
5 | stopasgroup=true
6 | autostart=true
7 | autorestart=true
8 |
9 | ;Logs (all Hub activity redirected to stdout so it can be seen through "docker logs"
10 | redirect_stderr=true
11 | stdout_logfile=/dev/stdout
12 | stdout_logfile_maxbytes=0
--------------------------------------------------------------------------------
/docker/supervisor.conf:
--------------------------------------------------------------------------------
1 | [program:xvfb]
2 | priority=0
3 | command=/opt/bin/start-xvfb.sh
4 | autostart=true
5 | autorestart=true
6 |
7 | ;Logs
8 | redirect_stderr=false
9 | stdout_logfile=/var/log/supervisor/xvfb-stdout.log
10 | stderr_logfile=/var/log/supervisor/xvfb-stderr.log
11 | stdout_logfile_maxbytes=50MB
12 | stderr_logfile_maxbytes=50MB
13 | stdout_logfile_backups=5
14 | stderr_logfile_backups=5
15 | stdout_capture_maxbytes=50MB
16 | stderr_capture_maxbytes=50MB
17 |
18 | [program:vnc]
19 | priority=5
20 | command=/opt/bin/start-vnc.sh
21 | autostart=true
22 | autorestart=true
23 |
24 | ;Logs
25 | redirect_stderr=false
26 | stdout_logfile=/var/log/supervisor/vnc-stdout.log
27 | stderr_logfile=/var/log/supervisor/vnc-stderr.log
28 | stdout_logfile_maxbytes=50MB
29 | stderr_logfile_maxbytes=50MB
30 | stdout_logfile_backups=5
31 | stderr_logfile_backups=5
32 | stdout_capture_maxbytes=50MB
33 | stderr_capture_maxbytes=50MB
34 |
35 | [program:novnc]
36 | priority=10
37 | command=/opt/bin/start-novnc.sh
38 | autostart=true
39 | autorestart=true
40 |
41 | ;Logs
42 | redirect_stderr=false
43 | stdout_logfile=/var/log/supervisor/novnc-stdout.log
44 | stderr_logfile=/var/log/supervisor/novnc-stderr.log
45 | stdout_logfile_maxbytes=50MB
46 | stderr_logfile_maxbytes=50MB
47 | stdout_logfile_backups=5
48 | stderr_logfile_backups=5
49 | stdout_capture_maxbytes=50MB
50 | stderr_capture_maxbytes=50MB
51 |
52 | [program:g4f-api]
53 | priority=15
54 | command=python -m g4f.cli api
55 | directory=/app
56 | stopasgroup=true
57 | autostart=true
58 | autorestart=true
59 |
60 | ;Logs (all Hub activity redirected to stdout so it can be seen through "docker logs"
61 | redirect_stderr=true
62 | stdout_logfile=/dev/stdout
63 | stdout_logfile_maxbytes=0
--------------------------------------------------------------------------------
/docs/cat.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meowmurmur/gpt4free/fea9e7a198fd21aa22c9ced90977c941c4ceffb6/docs/cat.jpeg
--------------------------------------------------------------------------------
/docs/cat.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meowmurmur/gpt4free/fea9e7a198fd21aa22c9ced90977c941c4ceffb6/docs/cat.webp
--------------------------------------------------------------------------------
/docs/docker.md:
--------------------------------------------------------------------------------
1 | ### G4F - Docker Setup
2 |
3 | Easily set up and run the G4F project using Docker without the hassle of manual dependency installation.
4 |
5 | 1. **Prerequisites:**
6 | - [Install Docker](https://docs.docker.com/get-docker/)
7 | - [Install Docker Compose](https://docs.docker.com/compose/install/)
8 |
9 | 2. **Clone the Repository:**
10 |
11 | ```bash
12 | git clone https://github.com/xtekky/gpt4free.git
13 | ```
14 |
15 | 3. **Navigate to the Project Directory:**
16 |
17 | ```bash
18 | cd gpt4free
19 | ```
20 |
21 | 4. **Build the Docker Image:**
22 |
23 | ```bash
24 | docker pull selenium/node-chrome
25 | docker-compose build
26 | ```
27 |
28 | 5. **Start the Service:**
29 |
30 | ```bash
31 | docker-compose up
32 | ```
33 |
34 | Your server will now be accessible at `http://localhost:1337`. Interact with the API or run tests as usual.
35 |
36 | To stop the Docker containers, simply run:
37 |
38 | ```bash
39 | docker-compose down
40 | ```
41 |
42 | > [!Note]
43 | > Changes made to local files reflect in the Docker container due to volume mapping in `docker-compose.yml`. However, if you add or remove dependencies, rebuild the Docker image using `docker-compose build`.
44 |
45 | [Return to Home](/)
--------------------------------------------------------------------------------
/docs/git.md:
--------------------------------------------------------------------------------
1 | ### G4F - Installation Guide
2 |
3 | Follow these steps to install G4F from the source code:
4 |
5 | 1. **Clone the Repository:**
6 |
7 | ```bash
8 | git clone https://github.com/xtekky/gpt4free.git
9 | ```
10 |
11 | 2. **Navigate to the Project Directory:**
12 |
13 | ```bash
14 | cd gpt4free
15 | ```
16 |
17 | 3. **(Optional) Create a Python Virtual Environment:**
18 |
19 | It's recommended to isolate your project dependencies. You can follow the [Python official documentation](https://docs.python.org/3/tutorial/venv.html) for virtual environments.
20 |
21 | ```bash
22 | python3 -m venv venv
23 | ```
24 |
25 | 4. **Activate the Virtual Environment:**
26 |
27 | - On Windows:
28 |
29 | ```bash
30 | .\venv\Scripts\activate
31 | ```
32 |
33 | - On macOS and Linux:
34 |
35 | ```bash
36 | source venv/bin/activate
37 | ```
38 |
39 | 5. **Install Minimum Requirements:**
40 |
41 | Install the minimum required packages:
42 |
43 | ```bash
44 | pip install -r requirements-min.txt
45 | ```
46 |
47 | 6. **Or Install All Packages from `requirements.txt`:**
48 |
49 | If you prefer, you can install all packages listed in `requirements.txt`:
50 |
51 | ```bash
52 | pip install -r requirements.txt
53 | ```
54 |
55 | 7. **Start Using the Repository:**
56 |
57 | You can now create Python scripts and utilize the G4F functionalities. Here's a basic example:
58 |
59 | Create a `test.py` file in the root folder and start using the repository:
60 |
61 | ```python
62 | import g4f
63 | # Your code here
64 | ```
65 |
66 | [Return to Home](/)
--------------------------------------------------------------------------------
/docs/guides/create_provider.md:
--------------------------------------------------------------------------------
1 | #### Create Provider with AI Tool
2 |
3 | Call in your terminal the `create_provider` script:
4 | ```bash
5 | python -m etc.tool.create_provider
6 | ```
7 | 1. Enter your name for the new provider.
8 | 2. Copy and paste the `cURL` command from your browser developer tools.
9 | 3. Let the AI create the provider for you.
10 | 4. Customize the provider according to your needs.
11 |
12 | #### Create Provider
13 |
14 | 1. Check out the current [list of potential providers](https://github.com/zukixa/cool-ai-stuff#ai-chat-websites), or find your own provider source!
15 | 2. Create a new file in [g4f/Provider](/g4f/Provider) with the name of the Provider.
16 | 3. Implement a class that extends [BaseProvider](/g4f/providers/base_provider.py).
17 |
18 | ```py
19 | from __future__ import annotations
20 |
21 | from ..typing import AsyncResult, Messages
22 | from .base_provider import AsyncGeneratorProvider
23 |
24 | class HogeService(AsyncGeneratorProvider):
25 | url = "https://chat-gpt.com"
26 | working = True
27 | supports_gpt_35_turbo = True
28 |
29 | @classmethod
30 | async def create_async_generator(
31 | cls,
32 | model: str,
33 | messages: Messages,
34 | proxy: str = None,
35 | **kwargs
36 | ) -> AsyncResult:
37 | yield ""
38 | ```
39 |
40 | 4. Here, you can adjust the settings, for example, if the website does support streaming, set `supports_stream` to `True`...
41 | 5. Write code to request the provider in `create_async_generator` and `yield` the response, _even if_ it's a one-time response, do not hesitate to look at other providers for inspiration.
42 | 6. Add the Provider Import in [`g4f/Provider/__init__.py`](./g4f/Provider/__init__.py)
43 |
44 | ```py
45 | from .HogeService import HogeService
46 |
47 | __all__ = [
48 | HogeService,
49 | ]
50 | ```
51 |
52 | 7. You are done !, test the provider by calling it:
53 |
54 | ```py
55 | import g4f
56 |
57 | response = g4f.ChatCompletion.create(model='gpt-3.5-turbo', provider=g4f.Provider.PROVIDERNAME,
58 | messages=[{"role": "user", "content": "test"}], stream=g4f.Provider.PROVIDERNAME.supports_stream)
59 |
60 | for message in response:
61 | print(message, flush=True, end='')
62 | ```
--------------------------------------------------------------------------------
/docs/guides/phone.md:
--------------------------------------------------------------------------------
1 | ### Guide: Running the G4F GUI on Your Smartphone
2 |
3 | Running Python applications on your smartphone is possible with specialized apps like Pydroid. This tutorial will walk you through the process using an Android smartphone with Pydroid. Note that the steps may vary slightly for iPhone users due to differences in app names and ownership.
4 |
5 |
6 | On the first screenshot is Pydroid and on the second is the Web UI in a browser 7 |
8 | 9 |
10 |
11 |
12 |
[\S\s]+?)\n```", text):
12 | return match.group("code")
13 |
14 | path = input("Path: ")
15 |
16 | with open(path, "r") as file:
17 | code = file.read()
18 |
19 | prompt = f"""
20 | Improve the code in this file:
21 | ```py
22 | {code}
23 | ```
24 | Don't remove anything.
25 | Add typehints if possible.
26 | Don't add any typehints to kwargs.
27 | Don't remove license comments.
28 | """
29 |
30 | print("Create code...")
31 | response = []
32 | for chunk in g4f.ChatCompletion.create(
33 | model=g4f.models.gpt_35_long,
34 | messages=[{"role": "user", "content": prompt}],
35 | timeout=300,
36 | stream=True
37 | ):
38 | response.append(chunk)
39 | print(chunk, end="", flush=True)
40 | print()
41 | response = "".join(response)
42 |
43 | if code := read_code(response):
44 | with open(path, "w") as file:
45 | file.write(code)
--------------------------------------------------------------------------------
/etc/tool/provider_init.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 |
4 | def main():
5 | content = create_content()
6 | with open("g4f/provider/__init__.py", "w", encoding="utf-8") as f:
7 | f.write(content)
8 |
9 |
10 | def create_content():
11 | path = Path()
12 | paths = path.glob("g4f/provider/*.py")
13 | paths = [p for p in paths if p.name not in ["__init__.py", "base_provider.py"]]
14 | classnames = [p.stem for p in paths]
15 |
16 | import_lines = [f"from .{name} import {name}" for name in classnames]
17 | import_content = "\n".join(import_lines)
18 |
19 | classnames.insert(0, "BaseProvider")
20 | all_content = [f' "{name}"' for name in classnames]
21 | all_content = ",\n".join(all_content)
22 | all_content = f"__all__ = [\n{all_content},\n]"
23 |
24 | return f"""from .base_provider import BaseProvider
25 | {import_content}
26 |
27 |
28 | {all_content}
29 | """
30 |
31 |
32 | if __name__ == "__main__":
33 | main()
34 |
--------------------------------------------------------------------------------
/etc/tool/translate_readme.py:
--------------------------------------------------------------------------------
1 |
2 | import sys
3 | from pathlib import Path
4 | import asyncio
5 |
6 | sys.path.append(str(Path(__file__).parent.parent.parent))
7 |
8 | import g4f
9 | g4f.debug.logging = True
10 | from g4f.debug import access_token
11 | provider = g4f.Provider.OpenaiChat
12 |
13 | iso = "GE"
14 | language = "german"
15 | translate_prompt = f"""
16 | Translate this markdown document to {language}.
17 | Don't translate or change inline code examples.
18 | ```md
19 | """
20 | keep_note = "Keep this: [!Note] as [!Note].\n"
21 | blocklist = [
22 | '## ©️ Copyright',
23 | '## 🚀 Providers and Models',
24 | '## 🔗 Related GPT4Free Projects'
25 | ]
26 | allowlist = [
27 | "### Other",
28 | "### Models"
29 | ]
30 |
31 | def read_text(text):
32 | start = end = 0
33 | new = text.strip().split('\n')
34 | for i, line in enumerate(new):
35 | if line.startswith('```'):
36 | if not start:
37 | start = i + 1
38 | end = i
39 | return '\n'.join(new[start:end]).strip()
40 |
41 | async def translate(text):
42 | prompt = translate_prompt + text.strip() + '\n```'
43 | if "[!Note]" in text:
44 | prompt = keep_note + prompt
45 | result = read_text(await provider.create_async(
46 | model="",
47 | messages=[{"role": "user", "content": prompt}],
48 | access_token=access_token
49 | ))
50 | if text.endswith("```") and not result.endswith("```"):
51 | result += "\n```"
52 | return result
53 |
54 | async def translate_part(part, i):
55 | blocklisted = False
56 | for headline in blocklist:
57 | if headline in part:
58 | blocklisted = True
59 | if blocklisted:
60 | lines = part.split('\n')
61 | lines[0] = await translate(lines[0])
62 | part = '\n'.join(lines)
63 | for trans in allowlist:
64 | if trans in part:
65 | part = part.replace(trans, await translate(trans))
66 | else:
67 | part = await translate(part)
68 | print(f"[{i}] translated")
69 | return part
70 |
71 | async def translate_readme(readme) -> str:
72 | parts = readme.split('\n## ')
73 | print(f"{len(parts)} parts...")
74 | parts = await asyncio.gather(
75 | *[translate_part("## " + part, i) for i, part in enumerate(parts)]
76 | )
77 | return "\n\n".join(parts)
78 |
79 | with open("README.md", "r") as fp:
80 | readme = fp.read()
81 |
82 | print("Translate readme...")
83 | readme = asyncio.run(translate_readme(readme))
84 |
85 | file = f"README-{iso}.md"
86 | with open(file, "w") as fp:
87 | fp.write(readme)
88 | print(f'"{file}" saved')
--------------------------------------------------------------------------------
/etc/unittest/__main__.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from .asyncio import *
3 | from .backend import *
4 | from .main import *
5 | from .model import *
6 | from .client import *
7 | from .async_client import *
8 | from .include import *
9 | from .integration import *
10 |
11 | unittest.main()
--------------------------------------------------------------------------------
/etc/unittest/backend.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import asyncio
3 | from unittest.mock import MagicMock
4 | from .mocks import ProviderMock
5 | import g4f
6 | from g4f.errors import MissingRequirementsError
7 |
8 | try:
9 | from g4f.gui.server.backend import Backend_Api, get_error_message
10 | has_requirements = True
11 | except:
12 | has_requirements = False
13 |
14 | class TestBackendApi(unittest.TestCase):
15 |
16 | def setUp(self):
17 | if not has_requirements:
18 | self.skipTest("gui is not installed")
19 | self.app = MagicMock()
20 | self.api = Backend_Api(self.app)
21 |
22 | def test_version(self):
23 | response = self.api.get_version()
24 | self.assertIn("version", response)
25 | self.assertIn("latest_version", response)
26 |
27 | def test_get_models(self):
28 | response = self.api.get_models()
29 | self.assertIsInstance(response, list)
30 | self.assertTrue(len(response) > 0)
31 |
32 | def test_get_providers(self):
33 | response = self.api.get_providers()
34 | self.assertIsInstance(response, list)
35 | self.assertTrue(len(response) > 0)
36 |
37 | def test_search(self):
38 | from g4f.gui.server.internet import search
39 | try:
40 | result = asyncio.run(search("Hello"))
41 | except MissingRequirementsError:
42 | self.skipTest("search is not installed")
43 | self.assertEqual(5, len(result))
44 |
45 | class TestUtilityFunctions(unittest.TestCase):
46 |
47 | def setUp(self):
48 | if not has_requirements:
49 | self.skipTest("gui is not installed")
50 |
51 | def test_get_error_message(self):
52 | g4f.debug.last_provider = ProviderMock
53 | exception = Exception("Message")
54 | result = get_error_message(exception)
55 | self.assertEqual("ProviderMock: Exception: Message", result)
56 |
57 | if __name__ == '__main__':
58 | unittest.main()
--------------------------------------------------------------------------------
/etc/unittest/include.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | class TestImport(unittest.TestCase):
4 |
5 | def test_get_cookies(self):
6 | from g4f import get_cookies as get_cookies_alias
7 | from g4f.cookies import get_cookies
8 | self.assertEqual(get_cookies_alias, get_cookies)
9 |
10 | def test_requests(self):
11 | from g4f.requests import StreamSession
12 | self.assertIsInstance(StreamSession, type)
13 |
14 | if __name__ == '__main__':
15 | unittest.main()
--------------------------------------------------------------------------------
/etc/unittest/integration.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import json
3 |
4 | try:
5 | import nest_asyncio
6 | has_nest_asyncio = True
7 | except ImportError:
8 | has_nest_asyncio = False
9 |
10 | from g4f.client import Client, ChatCompletion
11 | from g4f.Provider import Bing, OpenaiChat
12 |
13 | DEFAULT_MESSAGES = [{"role": "system", "content": 'Response in json, Example: {"success": false}'},
14 | {"role": "user", "content": "Say success true in json"}]
15 |
16 | class TestProviderIntegration(unittest.TestCase):
17 | def setUp(self):
18 | if not has_nest_asyncio:
19 | self.skipTest("nest_asyncio is not installed")
20 |
21 | def test_bing(self):
22 | self.skipTest("Not working")
23 | client = Client(provider=Bing)
24 | response = client.chat.completions.create(DEFAULT_MESSAGES, "", response_format={"type": "json_object"})
25 | self.assertIsInstance(response, ChatCompletion)
26 | self.assertIn("success", json.loads(response.choices[0].message.content))
27 |
28 | def test_openai(self):
29 | self.skipTest("not working in this network")
30 | client = Client(provider=OpenaiChat)
31 | response = client.chat.completions.create(DEFAULT_MESSAGES, "", response_format={"type": "json_object"})
32 | self.assertIsInstance(response, ChatCompletion)
33 | self.assertIn("success", json.loads(response.choices[0].message.content))
34 |
35 | if __name__ == '__main__':
36 | unittest.main()
--------------------------------------------------------------------------------
/etc/unittest/main.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import asyncio
3 | import g4f
4 | from g4f import ChatCompletion, get_last_provider
5 | from g4f.Provider import RetryProvider
6 | from .mocks import ProviderMock
7 |
8 | DEFAULT_MESSAGES = [{'role': 'user', 'content': 'Hello'}]
9 |
10 | class NoTestChatCompletion(unittest.TestCase):
11 |
12 | def no_test_create_default(self):
13 | result = ChatCompletion.create(g4f.models.default, DEFAULT_MESSAGES)
14 | if "Good" not in result and "Hi" not in result:
15 | self.assertIn("Hello", result)
16 |
17 | def no_test_bing_provider(self):
18 | provider = g4f.Provider.Bing
19 | result = ChatCompletion.create(g4f.models.default, DEFAULT_MESSAGES, provider)
20 | self.assertIn("Bing", result)
21 |
22 | class TestGetLastProvider(unittest.TestCase):
23 |
24 | def test_get_last_provider(self):
25 | ChatCompletion.create(g4f.models.default, DEFAULT_MESSAGES, ProviderMock)
26 | self.assertEqual(get_last_provider(), ProviderMock)
27 |
28 | def test_get_last_provider_retry(self):
29 | ChatCompletion.create(g4f.models.default, DEFAULT_MESSAGES, RetryProvider([ProviderMock]))
30 | self.assertEqual(get_last_provider(), ProviderMock)
31 |
32 | def test_get_last_provider_async(self):
33 | coroutine = ChatCompletion.create_async(g4f.models.default, DEFAULT_MESSAGES, ProviderMock)
34 | asyncio.run(coroutine)
35 | self.assertEqual(get_last_provider(), ProviderMock)
36 |
37 | def test_get_last_provider_as_dict(self):
38 | ChatCompletion.create(g4f.models.default, DEFAULT_MESSAGES, ProviderMock)
39 | last_provider_dict = get_last_provider(True)
40 | self.assertIsInstance(last_provider_dict, dict)
41 | self.assertIn('name', last_provider_dict)
42 | self.assertEqual(ProviderMock.__name__, last_provider_dict['name'])
--------------------------------------------------------------------------------
/etc/unittest/mocks.py:
--------------------------------------------------------------------------------
1 | from g4f.providers.base_provider import AbstractProvider, AsyncProvider, AsyncGeneratorProvider
2 |
3 | class ProviderMock(AbstractProvider):
4 | working = True
5 |
6 | def create_completion(
7 | model, messages, stream, **kwargs
8 | ):
9 | yield "Mock"
10 |
11 | class AsyncProviderMock(AsyncProvider):
12 | working = True
13 |
14 | async def create_async(
15 | model, messages, **kwargs
16 | ):
17 | return "Mock"
18 |
19 | class AsyncGeneratorProviderMock(AsyncGeneratorProvider):
20 | working = True
21 |
22 | async def create_async_generator(
23 | model, messages, stream, **kwargs
24 | ):
25 | yield "Mock"
26 |
27 | class ModelProviderMock(AbstractProvider):
28 | working = True
29 |
30 | def create_completion(
31 | model, messages, stream, **kwargs
32 | ):
33 | yield model
34 |
35 | class YieldProviderMock(AsyncGeneratorProvider):
36 | working = True
37 |
38 | async def create_async_generator(
39 | model, messages, stream, **kwargs
40 | ):
41 | for message in messages:
42 | yield message["content"]
--------------------------------------------------------------------------------
/etc/unittest/model.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import g4f
3 | from g4f import ChatCompletion
4 | from .mocks import ModelProviderMock
5 |
6 | DEFAULT_MESSAGES = [{'role': 'user', 'content': 'Hello'}]
7 |
8 | test_model = g4f.models.Model(
9 | name = "test/test_model",
10 | base_provider = "",
11 | best_provider = ModelProviderMock
12 | )
13 | g4f.models.ModelUtils.convert["test_model"] = test_model
14 |
15 | class TestPassModel(unittest.TestCase):
16 |
17 | def test_model_instance(self):
18 | response = ChatCompletion.create(test_model, DEFAULT_MESSAGES)
19 | self.assertEqual(test_model.name, response)
20 |
21 | def test_model_name(self):
22 | response = ChatCompletion.create("test_model", DEFAULT_MESSAGES)
23 | self.assertEqual(test_model.name, response)
24 |
25 | def test_model_pass(self):
26 | response = ChatCompletion.create("test/test_model", DEFAULT_MESSAGES, ModelProviderMock)
27 | self.assertEqual(test_model.name, response)
--------------------------------------------------------------------------------
/g4f/Provider/Aichatos.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from aiohttp import ClientSession
4 |
5 | from ..typing import AsyncResult, Messages
6 | from .base_provider import AsyncGeneratorProvider
7 | from .helper import format_prompt
8 |
9 | import random
10 |
11 | class Aichatos(AsyncGeneratorProvider):
12 | url = "https://chat10.aichatos.xyz"
13 | api = "https://api.binjie.fun"
14 | working = True
15 | supports_gpt_35_turbo = True
16 |
17 | @classmethod
18 | async def create_async_generator(
19 | cls,
20 | model: str,
21 | messages: Messages,
22 | proxy: str = None,
23 | **kwargs
24 | ) -> AsyncResult:
25 | headers = {
26 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
27 | "Accept": "application/json, text/plain, */*",
28 | "Accept-Language": "en-US,en;q=0.5",
29 | "Accept-Encoding": "gzip, deflate, br",
30 | "Content-Type": "application/json",
31 | "Origin": "https://chat10.aichatos.xyz",
32 | "DNT": "1",
33 | "Sec-GPC": "1",
34 | "Connection": "keep-alive",
35 | "Sec-Fetch-Dest": "empty",
36 | "Sec-Fetch-Mode": "cors",
37 | "Sec-Fetch-Site": "cross-site",
38 | "TE": "trailers",
39 | }
40 | async with ClientSession(headers=headers) as session:
41 | prompt = format_prompt(messages)
42 | userId = random.randint(1000000000000, 9999999999999)
43 | system_message: str = "",
44 | data = {
45 | "prompt": prompt,
46 | "userId": "#/chat/{userId}",
47 | "network": True,
48 | "system": system_message,
49 | "withoutContext": False,
50 | "stream": True,
51 | }
52 | async with session.post(f"{cls.api}/api/generateStream", json=data, proxy=proxy) as response:
53 | response.raise_for_status()
54 | async for chunk in response.content:
55 | if chunk:
56 | yield chunk.decode()
57 |
--------------------------------------------------------------------------------
/g4f/Provider/Aura.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from aiohttp import ClientSession
4 |
5 | from ..typing import AsyncResult, Messages
6 | from .base_provider import AsyncGeneratorProvider
7 | from ..requests import get_args_from_browser
8 | from ..webdriver import WebDriver
9 |
10 | class Aura(AsyncGeneratorProvider):
11 | url = "https://openchat.team"
12 | working = True
13 |
14 | @classmethod
15 | async def create_async_generator(
16 | cls,
17 | model: str,
18 | messages: Messages,
19 | proxy: str = None,
20 | temperature: float = 0.5,
21 | max_tokens: int = 8192,
22 | webdriver: WebDriver = None,
23 | **kwargs
24 | ) -> AsyncResult:
25 | args = get_args_from_browser(cls.url, webdriver, proxy)
26 | async with ClientSession(**args) as session:
27 | new_messages = []
28 | system_message = []
29 | for message in messages:
30 | if message["role"] == "system":
31 | system_message.append(message["content"])
32 | else:
33 | new_messages.append(message)
34 | data = {
35 | "model": {
36 | "id": "openchat_v3.2_mistral",
37 | "name": "OpenChat Aura",
38 | "maxLength": 24576,
39 | "tokenLimit": max_tokens
40 | },
41 | "messages": new_messages,
42 | "key": "",
43 | "prompt": "\n".join(system_message),
44 | "temperature": temperature
45 | }
46 | async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response:
47 | response.raise_for_status()
48 | async for chunk in response.content.iter_any():
49 | yield chunk.decode(error="ignore")
--------------------------------------------------------------------------------
/g4f/Provider/BingCreateImages.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from ..cookies import get_cookies
4 | from ..image import ImageResponse
5 | from ..errors import MissingAuthError
6 | from ..typing import AsyncResult, Messages, Cookies
7 | from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
8 | from .bing.create_images import create_images, create_session
9 |
10 | class BingCreateImages(AsyncGeneratorProvider, ProviderModelMixin):
11 | label = "Microsoft Designer in Bing"
12 | parent = "Bing"
13 | url = "https://www.bing.com/images/create"
14 | working = True
15 | needs_auth = True
16 | image_models = ["dall-e"]
17 |
18 | def __init__(self, cookies: Cookies = None, proxy: str = None, api_key: str = None) -> None:
19 | if api_key is not None:
20 | if cookies is None:
21 | cookies = {}
22 | cookies["_U"] = api_key
23 | self.cookies = cookies
24 | self.proxy = proxy
25 |
26 | @classmethod
27 | async def create_async_generator(
28 | cls,
29 | model: str,
30 | messages: Messages,
31 | api_key: str = None,
32 | cookies: Cookies = None,
33 | proxy: str = None,
34 | **kwargs
35 | ) -> AsyncResult:
36 | session = BingCreateImages(cookies, proxy, api_key)
37 | yield await session.generate(messages[-1]["content"])
38 |
39 | async def generate(self, prompt: str) -> ImageResponse:
40 | """
41 | Asynchronously creates a markdown formatted string with images based on the prompt.
42 |
43 | Args:
44 | prompt (str): Prompt to generate images.
45 |
46 | Returns:
47 | str: Markdown formatted string with images.
48 | """
49 | cookies = self.cookies or get_cookies(".bing.com", False)
50 | if cookies is None or "_U" not in cookies:
51 | raise MissingAuthError('Missing "_U" cookie')
52 | async with create_session(cookies, self.proxy) as session:
53 | images = await create_images(session, prompt)
54 | return ImageResponse(images, prompt, {"preview": "{image}?w=200&h=200"} if len(images) > 1 else {})
--------------------------------------------------------------------------------
/g4f/Provider/Blackbox.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import uuid
4 | import secrets
5 | from aiohttp import ClientSession
6 |
7 | from ..typing import AsyncResult, Messages, ImageType
8 | from ..image import to_data_uri
9 | from .base_provider import AsyncGeneratorProvider
10 |
11 | class Blackbox(AsyncGeneratorProvider):
12 | url = "https://www.blackbox.ai"
13 | working = True
14 |
15 | @classmethod
16 | async def create_async_generator(
17 | cls,
18 | model: str,
19 | messages: Messages,
20 | proxy: str = None,
21 | image: ImageType = None,
22 | image_name: str = None,
23 | **kwargs
24 | ) -> AsyncResult:
25 | if image is not None:
26 | messages[-1]["data"] = {
27 | "fileText": image_name,
28 | "imageBase64": to_data_uri(image)
29 | }
30 | headers = {
31 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
32 | "Accept": "*/*",
33 | "Accept-Language": "en-US,en;q=0.5",
34 | "Accept-Encoding": "gzip, deflate, br",
35 | "Referer": cls.url,
36 | "Content-Type": "application/json",
37 | "Origin": cls.url,
38 | "DNT": "1",
39 | "Sec-GPC": "1",
40 | "Alt-Used": "www.blackbox.ai",
41 | "Connection": "keep-alive",
42 | }
43 | async with ClientSession(headers=headers) as session:
44 | random_id = secrets.token_hex(16)
45 | random_user_id = str(uuid.uuid4())
46 | data = {
47 | "messages": messages,
48 | "id": random_id,
49 | "userId": random_user_id,
50 | "codeModelMode": True,
51 | "agentMode": {},
52 | "trendingAgentMode": {},
53 | "isMicMode": False,
54 | "isChromeExt": False,
55 | "playgroundMode": False,
56 | "webSearchMode": False,
57 | "userSystemPrompt": "",
58 | "githubToken": None
59 | }
60 | async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response:
61 | response.raise_for_status()
62 | async for chunk in response.content:
63 | if chunk:
64 | yield chunk.decode()
65 |
--------------------------------------------------------------------------------
/g4f/Provider/ChatForAi.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import time
4 | import hashlib
5 | import uuid
6 |
7 | from ..typing import AsyncResult, Messages
8 | from ..requests import StreamSession, raise_for_status
9 | from ..errors import RateLimitError
10 | from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
11 |
12 | class ChatForAi(AsyncGeneratorProvider, ProviderModelMixin):
13 | url = "https://chatforai.store"
14 | working = True
15 | default_model = "gpt-3.5-turbo"
16 | supports_message_history = True
17 | supports_gpt_35_turbo = True
18 |
19 | @classmethod
20 | async def create_async_generator(
21 | cls,
22 | model: str,
23 | messages: Messages,
24 | proxy: str = None,
25 | timeout: int = 120,
26 | temperature: float = 0.7,
27 | top_p: float = 1,
28 | **kwargs
29 | ) -> AsyncResult:
30 | model = cls.get_model(model)
31 | headers = {
32 | "Content-Type": "text/plain;charset=UTF-8",
33 | "Origin": cls.url,
34 | "Referer": f"{cls.url}/?r=b",
35 | }
36 | async with StreamSession(impersonate="chrome", headers=headers, proxies={"https": proxy}, timeout=timeout) as session:
37 | timestamp = int(time.time() * 1e3)
38 | conversation_id = str(uuid.uuid4())
39 | data = {
40 | "conversationId": conversation_id,
41 | "conversationType": "chat_continuous",
42 | "botId": "chat_continuous",
43 | "globalSettings":{
44 | "baseUrl": "https://api.openai.com",
45 | "model": model,
46 | "messageHistorySize": 5,
47 | "temperature": temperature,
48 | "top_p": top_p,
49 | **kwargs
50 | },
51 | "prompt": "",
52 | "messages": messages,
53 | "timestamp": timestamp,
54 | "sign": generate_signature(timestamp, "", conversation_id)
55 | }
56 | async with session.post(f"{cls.url}/api/handle/provider-openai", json=data) as response:
57 | await raise_for_status(response)
58 | async for chunk in response.iter_content():
59 | if b"https://chatforai.store" in chunk:
60 | raise RuntimeError(f"Response: {chunk.decode(errors='ignore')}")
61 | yield chunk.decode(errors="ignore")
62 |
63 |
64 | def generate_signature(timestamp: int, message: str, id: str):
65 | buffer = f"{id}:{timestamp}:{message}:h496Jd6b"
66 | return hashlib.sha256(buffer.encode()).hexdigest()
67 |
--------------------------------------------------------------------------------
/g4f/Provider/Cnote.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 | from aiohttp import ClientSession
5 |
6 | from ..typing import AsyncResult, Messages
7 | from .base_provider import AsyncGeneratorProvider
8 | from .helper import format_prompt
9 |
10 |
11 | class Cnote(AsyncGeneratorProvider):
12 | url = "https://f1.cnote.top"
13 | api_url = "https://p1api.xjai.pro/freeapi/chat-process"
14 | working = True
15 | supports_gpt_35_turbo = True
16 |
17 | @classmethod
18 | async def create_async_generator(
19 | cls,
20 | model: str,
21 | messages: Messages,
22 | proxy: str = None,
23 | **kwargs
24 | ) -> AsyncResult:
25 | headers = {
26 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
27 | "Accept": "application/json, text/plain, */*",
28 | "Accept-Language": "en-US,en;q=0.5",
29 | "Accept-Encoding": "gzip, deflate, br",
30 | "Content-Type": "application/json",
31 | "Origin": cls.url,
32 | "DNT": "1",
33 | "Sec-GPC": "1",
34 | "Connection": "keep-alive",
35 | "Sec-Fetch-Dest": "empty",
36 | "Sec-Fetch-Mode": "cors",
37 | "Sec-Fetch-Site": "cross-site",
38 | "TE": "trailers",
39 | }
40 | async with ClientSession(headers=headers) as session:
41 | prompt = format_prompt(messages)
42 | system_message: str = "",
43 | data = {
44 | "prompt": prompt,
45 | "systemMessage": system_message,
46 | "temperature": 0.8,
47 | "top_p": 1,
48 | }
49 | async with session.post(cls.api_url, json=data, proxy=proxy) as response:
50 | response.raise_for_status()
51 | async for chunk in response.content:
52 | if chunk:
53 | try:
54 | data = json.loads(chunk.decode().split("&KFw6loC9Qvy&")[-1])
55 | text = data.get("text", "")
56 | yield text
57 | except (json.JSONDecodeError, IndexError):
58 | pass
59 |
--------------------------------------------------------------------------------
/g4f/Provider/DeepInfra.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import requests
4 | from ..typing import AsyncResult, Messages
5 | from .needs_auth.Openai import Openai
6 |
7 | class DeepInfra(Openai):
8 | label = "DeepInfra"
9 | url = "https://deepinfra.com"
10 | working = True
11 | needs_auth = True
12 | supports_stream = True
13 | supports_message_history = True
14 | default_model = "meta-llama/Meta-Llama-3-70B-Instruct"
15 | default_vision_model = "llava-hf/llava-1.5-7b-hf"
16 | model_aliases = {
17 | 'dbrx-instruct': 'databricks/dbrx-instruct',
18 | }
19 |
20 | @classmethod
21 | def get_models(cls):
22 | if not cls.models:
23 | url = 'https://api.deepinfra.com/models/featured'
24 | models = requests.get(url).json()
25 | cls.models = [model['model_name'] for model in models if model["type"] == "text-generation"]
26 | return cls.models
27 |
28 | @classmethod
29 | def create_async_generator(
30 | cls,
31 | model: str,
32 | messages: Messages,
33 | stream: bool,
34 | api_base: str = "https://api.deepinfra.com/v1/openai",
35 | temperature: float = 0.7,
36 | max_tokens: int = 1028,
37 | **kwargs
38 | ) -> AsyncResult:
39 | headers = {
40 | 'Accept-Encoding': 'gzip, deflate, br',
41 | 'Accept-Language': 'en-US',
42 | 'Connection': 'keep-alive',
43 | 'Origin': 'https://deepinfra.com',
44 | 'Referer': 'https://deepinfra.com/',
45 | 'Sec-Fetch-Dest': 'empty',
46 | 'Sec-Fetch-Mode': 'cors',
47 | 'Sec-Fetch-Site': 'same-site',
48 | 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
49 | 'X-Deepinfra-Source': 'web-embed',
50 | 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
51 | 'sec-ch-ua-mobile': '?0',
52 | 'sec-ch-ua-platform': '"macOS"',
53 | }
54 | return super().create_async_generator(
55 | model, messages,
56 | stream=stream,
57 | api_base=api_base,
58 | temperature=temperature,
59 | max_tokens=max_tokens,
60 | headers=headers,
61 | **kwargs
62 | )
--------------------------------------------------------------------------------
/g4f/Provider/Feedough.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 | from aiohttp import ClientSession
5 |
6 | from ..typing import AsyncResult, Messages
7 | from .base_provider import AsyncGeneratorProvider
8 | from .helper import format_prompt
9 |
10 |
11 | class Feedough(AsyncGeneratorProvider):
12 | url = "https://www.feedough.com"
13 | working = True
14 | supports_gpt_35_turbo = True
15 |
16 | @classmethod
17 | async def create_async_generator(
18 | cls,
19 | model: str,
20 | messages: Messages,
21 | proxy: str = None,
22 | **kwargs
23 | ) -> AsyncResult:
24 | headers = {
25 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
26 | "Accept": "*/*",
27 | "Accept-Language": "en-US,en;q=0.5",
28 | "Accept-Encoding": "gzip, deflate, br",
29 | "Referer": "https://www.feedough.com/ai-prompt-generator/",
30 | "Content-Type": "application/x-www-form-urlencoded;charset=UTF-8",
31 | "Origin": "https://www.feedough.com",
32 | "DNT": "1",
33 | "Sec-GPC": "1",
34 | "Connection": "keep-alive",
35 | "Sec-Fetch-Dest": "empty",
36 | "Sec-Fetch-Mode": "cors",
37 | "Sec-Fetch-Site": "same-origin",
38 | "TE": "trailers",
39 | }
40 | async with ClientSession(headers=headers) as session:
41 | prompt = format_prompt(messages)
42 | data = {
43 | "action": "aixg_generate",
44 | "prompt": prompt,
45 | }
46 | async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, proxy=proxy) as response:
47 | response.raise_for_status()
48 | response_text = await response.text()
49 | response_json = json.loads(response_text)
50 | if response_json["success"]:
51 | message = response_json["data"]["message"]
52 | yield message
53 |
--------------------------------------------------------------------------------
/g4f/Provider/FreeGpt.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import time, hashlib, random
4 |
5 | from ..typing import AsyncResult, Messages
6 | from ..requests import StreamSession, raise_for_status
7 | from .base_provider import AsyncGeneratorProvider
8 | from ..errors import RateLimitError
9 |
10 | domains = [
11 | "https://s.aifree.site",
12 | "https://v.aifree.site/"
13 | ]
14 |
15 | class FreeGpt(AsyncGeneratorProvider):
16 | url = "https://freegptsnav.aifree.site"
17 | working = True
18 | supports_message_history = True
19 | supports_system_message = True
20 | supports_gpt_35_turbo = True
21 |
22 | @classmethod
23 | async def create_async_generator(
24 | cls,
25 | model: str,
26 | messages: Messages,
27 | proxy: str = None,
28 | timeout: int = 120,
29 | **kwargs
30 | ) -> AsyncResult:
31 | async with StreamSession(
32 | impersonate="chrome",
33 | timeout=timeout,
34 | proxies={"all": proxy}
35 | ) as session:
36 | prompt = messages[-1]["content"]
37 | timestamp = int(time.time())
38 | data = {
39 | "messages": messages,
40 | "time": timestamp,
41 | "pass": None,
42 | "sign": generate_signature(timestamp, prompt)
43 | }
44 | domain = random.choice(domains)
45 | async with session.post(f"{domain}/api/generate", json=data) as response:
46 | await raise_for_status(response)
47 | async for chunk in response.iter_content():
48 | chunk = chunk.decode(errors="ignore")
49 | if chunk == "当前地区当日额度已消耗完":
50 | raise RateLimitError("Rate limit reached")
51 | yield chunk
52 |
53 | def generate_signature(timestamp: int, message: str, secret: str = ""):
54 | data = f"{timestamp}:{message}:{secret}"
55 | return hashlib.sha256(data.encode()).hexdigest()
56 |
--------------------------------------------------------------------------------
/g4f/Provider/GptTalkRu.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from aiohttp import ClientSession, BaseConnector
4 |
5 | from ..typing import AsyncResult, Messages
6 | from .base_provider import AsyncGeneratorProvider
7 | from .helper import get_random_string, get_connector
8 | from ..requests import raise_for_status, get_args_from_browser, WebDriver
9 | from ..webdriver import has_seleniumwire
10 | from ..errors import MissingRequirementsError
11 |
12 | class GptTalkRu(AsyncGeneratorProvider):
13 | url = "https://gpttalk.ru"
14 | working = True
15 | supports_gpt_35_turbo = True
16 |
17 | @classmethod
18 | async def create_async_generator(
19 | cls,
20 | model: str,
21 | messages: Messages,
22 | proxy: str = None,
23 | connector: BaseConnector = None,
24 | webdriver: WebDriver = None,
25 | **kwargs
26 | ) -> AsyncResult:
27 | if not model:
28 | model = "gpt-3.5-turbo"
29 | if not has_seleniumwire:
30 | raise MissingRequirementsError('Install "selenium-wire" package')
31 | args = get_args_from_browser(f"{cls.url}", webdriver)
32 | args["headers"]["accept"] = "application/json, text/plain, */*"
33 | async with ClientSession(connector=get_connector(connector, proxy), **args) as session:
34 | async with session.get("https://gpttalk.ru/getToken") as response:
35 | await raise_for_status(response)
36 | public_key = (await response.json())["response"]["key"]["publicKey"]
37 | random_string = get_random_string(8)
38 | data = {
39 | "model": model,
40 | "modelType": 1,
41 | "prompt": messages,
42 | "responseType": "stream",
43 | "security": {
44 | "randomMessage": random_string,
45 | "shifrText": encrypt(public_key, random_string)
46 | }
47 | }
48 | async with session.post(f"{cls.url}/gpt2", json=data, proxy=proxy) as response:
49 | await raise_for_status(response)
50 | async for chunk in response.content.iter_any():
51 | yield chunk.decode(errors="ignore")
52 |
53 | def encrypt(public_key: str, value: str) -> str:
54 | from Crypto.Cipher import PKCS1_v1_5
55 | from Crypto.PublicKey import RSA
56 | import base64
57 | rsa_key = RSA.importKey(public_key)
58 | cipher = PKCS1_v1_5.new(rsa_key)
59 | return base64.b64encode(cipher.encrypt(value.encode())).decode()
--------------------------------------------------------------------------------
/g4f/Provider/Koala.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 | from aiohttp import ClientSession, BaseConnector
5 |
6 | from ..typing import AsyncResult, Messages
7 | from .base_provider import AsyncGeneratorProvider
8 | from .helper import get_random_string, get_connector
9 | from ..requests import raise_for_status
10 |
11 | class Koala(AsyncGeneratorProvider):
12 | url = "https://koala.sh"
13 | working = True
14 | supports_gpt_35_turbo = True
15 | supports_message_history = True
16 |
17 | @classmethod
18 | async def create_async_generator(
19 | cls,
20 | model: str,
21 | messages: Messages,
22 | proxy: str = None,
23 | connector: BaseConnector = None,
24 | **kwargs
25 | ) -> AsyncResult:
26 | if not model:
27 | model = "gpt-3.5-turbo"
28 | headers = {
29 | "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0",
30 | "Accept": "text/event-stream",
31 | "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
32 | "Accept-Encoding": "gzip, deflate, br",
33 | "Referer": f"{cls.url}/chat",
34 | "Flag-Real-Time-Data": "false",
35 | "Visitor-ID": get_random_string(20),
36 | "Origin": cls.url,
37 | "Alt-Used": "koala.sh",
38 | "Sec-Fetch-Dest": "empty",
39 | "Sec-Fetch-Mode": "cors",
40 | "Sec-Fetch-Site": "same-origin",
41 | "TE": "trailers",
42 | }
43 | async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session:
44 | input = messages[-1]["content"]
45 | system_messages = [message["content"] for message in messages if message["role"] == "system"]
46 | if system_messages:
47 | input += " ".join(system_messages)
48 | data = {
49 | "input": input,
50 | "inputHistory": [
51 | message["content"]
52 | for message in messages[:-1]
53 | if message["role"] == "user"
54 | ],
55 | "outputHistory": [
56 | message["content"]
57 | for message in messages
58 | if message["role"] == "assistant"
59 | ],
60 | "model": model,
61 | }
62 | async with session.post(f"{cls.url}/api/gpt/", json=data, proxy=proxy) as response:
63 | await raise_for_status(response)
64 | async for chunk in response.content:
65 | if chunk.startswith(b"data: "):
66 | yield json.loads(chunk[6:])
--------------------------------------------------------------------------------
/g4f/Provider/Local.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from ..locals.models import get_models
4 | try:
5 | from ..locals.provider import LocalProvider
6 | has_requirements = True
7 | except ImportError:
8 | has_requirements = False
9 |
10 | from ..typing import Messages, CreateResult
11 | from ..providers.base_provider import AbstractProvider, ProviderModelMixin
12 | from ..errors import MissingRequirementsError
13 |
14 | class Local(AbstractProvider, ProviderModelMixin):
15 | label = "GPT4All"
16 | working = True
17 | supports_message_history = True
18 | supports_system_message = True
19 | supports_stream = True
20 |
21 | @classmethod
22 | def get_models(cls):
23 | if not cls.models:
24 | cls.models = list(get_models())
25 | cls.default_model = cls.models[0]
26 | return cls.models
27 |
28 | @classmethod
29 | def create_completion(
30 | cls,
31 | model: str,
32 | messages: Messages,
33 | stream: bool,
34 | **kwargs
35 | ) -> CreateResult:
36 | if not has_requirements:
37 | raise MissingRequirementsError('Install "gpt4all" package | pip install -U g4f[local]')
38 | return LocalProvider.create_completion(
39 | cls.get_model(model),
40 | messages,
41 | stream,
42 | **kwargs
43 | )
--------------------------------------------------------------------------------
/g4f/Provider/MetaAIAccount.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from ..typing import AsyncResult, Messages, Cookies
4 | from .helper import format_prompt, get_cookies
5 | from .MetaAI import MetaAI
6 |
7 | class MetaAIAccount(MetaAI):
8 | needs_auth = True
9 | parent = "MetaAI"
10 | image_models = ["meta"]
11 |
12 | @classmethod
13 | async def create_async_generator(
14 | cls,
15 | model: str,
16 | messages: Messages,
17 | proxy: str = None,
18 | cookies: Cookies = None,
19 | **kwargs
20 | ) -> AsyncResult:
21 | cookies = get_cookies(".meta.ai", True, True) if cookies is None else cookies
22 | async for chunk in cls(proxy).prompt(format_prompt(messages), cookies):
23 | yield chunk
--------------------------------------------------------------------------------
/g4f/Provider/Ollama.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import requests
4 |
5 | from .needs_auth.Openai import Openai
6 | from ..typing import AsyncResult, Messages
7 |
8 | class Ollama(Openai):
9 | label = "Ollama"
10 | url = "https://ollama.com"
11 | needs_auth = False
12 | working = True
13 |
14 | @classmethod
15 | def get_models(cls):
16 | if not cls.models:
17 | url = 'http://127.0.0.1:11434/api/tags'
18 | models = requests.get(url).json()["models"]
19 | cls.models = [model['name'] for model in models]
20 | cls.default_model = cls.models[0]
21 | return cls.models
22 |
23 | @classmethod
24 | def create_async_generator(
25 | cls,
26 | model: str,
27 | messages: Messages,
28 | api_base: str = "http://localhost:11434/v1",
29 | **kwargs
30 | ) -> AsyncResult:
31 | return super().create_async_generator(
32 | model, messages, api_base=api_base, **kwargs
33 | )
--------------------------------------------------------------------------------
/g4f/Provider/Pi.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 |
5 | from ..typing import CreateResult, Messages
6 | from .base_provider import AbstractProvider, format_prompt
7 | from ..requests import Session, get_session_from_browser, raise_for_status
8 |
9 | class Pi(AbstractProvider):
10 | url = "https://pi.ai/talk"
11 | working = True
12 | supports_stream = True
13 | _session = None
14 |
15 | @classmethod
16 | def create_completion(
17 | cls,
18 | model: str,
19 | messages: Messages,
20 | stream: bool,
21 | proxy: str = None,
22 | timeout: int = 180,
23 | conversation_id: str = None,
24 | **kwargs
25 | ) -> CreateResult:
26 | if cls._session is None:
27 | cls._session = get_session_from_browser(url=cls.url, proxy=proxy, timeout=timeout)
28 | if not conversation_id:
29 | conversation_id = cls.start_conversation(cls._session)
30 | prompt = format_prompt(messages)
31 | else:
32 | prompt = messages[-1]["content"]
33 | answer = cls.ask(cls._session, prompt, conversation_id)
34 | for line in answer:
35 | if "text" in line:
36 | yield line["text"]
37 |
38 | @classmethod
39 | def start_conversation(cls, session: Session) -> str:
40 | response = session.post('https://pi.ai/api/chat/start', data="{}", headers={
41 | 'accept': 'application/json',
42 | 'x-api-version': '3'
43 | })
44 | raise_for_status(response)
45 | return response.json()['conversations'][0]['sid']
46 |
47 | def get_chat_history(session: Session, conversation_id: str):
48 | params = {
49 | 'conversation': conversation_id,
50 | }
51 | response = session.get('https://pi.ai/api/chat/history', params=params)
52 | raise_for_status(response)
53 | return response.json()
54 |
55 | def ask(session: Session, prompt: str, conversation_id: str):
56 | json_data = {
57 | 'text': prompt,
58 | 'conversation': conversation_id,
59 | 'mode': 'BASE',
60 | }
61 | response = session.post('https://pi.ai/api/chat', json=json_data, stream=True)
62 | raise_for_status(response)
63 | for line in response.iter_lines():
64 | if line.startswith(b'data: {"text":'):
65 | yield json.loads(line.split(b'data: ')[1])
66 | elif line.startswith(b'data: {"title":'):
67 | yield json.loads(line.split(b'data: ')[1])
68 |
--------------------------------------------------------------------------------
/g4f/Provider/Pizzagpt.py:
--------------------------------------------------------------------------------
1 | import json
2 | from aiohttp import ClientSession
3 |
4 | from ..typing import Messages, AsyncResult
5 | from .base_provider import AsyncGeneratorProvider
6 |
7 | class Pizzagpt(AsyncGeneratorProvider):
8 | url = "https://www.pizzagpt.it"
9 | api_endpoint = "/api/chatx-completion"
10 | supports_message_history = False
11 | supports_gpt_35_turbo = True
12 | working = True
13 |
14 | @classmethod
15 | async def create_async_generator(
16 | cls,
17 | model: str,
18 | messages: Messages,
19 | proxy: str = None,
20 | **kwargs
21 | ) -> AsyncResult:
22 | payload = {
23 | "question": messages[-1]["content"]
24 | }
25 | headers = {
26 | "Accept": "application/json",
27 | "Accept-Encoding": "gzip, deflate, br, zstd",
28 | "Accept-Language": "en-US,en;q=0.9",
29 | "Content-Type": "application/json",
30 | "Origin": cls.url,
31 | "Referer": f"{cls.url}/en",
32 | "Sec-Fetch-Dest": "empty",
33 | "Sec-Fetch-Mode": "cors",
34 | "Sec-Fetch-Site": "same-origin",
35 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
36 | "X-Secret": "Marinara"
37 | }
38 |
39 | async with ClientSession() as session:
40 | async with session.post(
41 | f"{cls.url}{cls.api_endpoint}",
42 | json=payload,
43 | proxy=proxy,
44 | headers=headers
45 | ) as response:
46 | response.raise_for_status()
47 | response_json = await response.json()
48 | yield response_json["answer"]["content"]
49 |
--------------------------------------------------------------------------------
/g4f/Provider/WhiteRabbitNeo.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from aiohttp import ClientSession, BaseConnector
4 |
5 | from ..typing import AsyncResult, Messages, Cookies
6 | from ..requests.raise_for_status import raise_for_status
7 | from .base_provider import AsyncGeneratorProvider
8 | from .helper import get_cookies, get_connector, get_random_string
9 |
10 | class WhiteRabbitNeo(AsyncGeneratorProvider):
11 | url = "https://www.whiterabbitneo.com"
12 | working = True
13 | supports_message_history = True
14 | needs_auth = True
15 |
16 | @classmethod
17 | async def create_async_generator(
18 | cls,
19 | model: str,
20 | messages: Messages,
21 | cookies: Cookies = None,
22 | connector: BaseConnector = None,
23 | proxy: str = None,
24 | **kwargs
25 | ) -> AsyncResult:
26 | if cookies is None:
27 | cookies = get_cookies("www.whiterabbitneo.com")
28 | headers = {
29 | "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:123.0) Gecko/20100101 Firefox/123.0",
30 | "Accept": "*/*",
31 | "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
32 | "Accept-Encoding": "gzip, deflate, br",
33 | "Referer": f"{cls.url}/",
34 | "Content-Type": "text/plain;charset=UTF-8",
35 | "Origin": cls.url,
36 | "Connection": "keep-alive",
37 | "Sec-Fetch-Dest": "empty",
38 | "Sec-Fetch-Mode": "cors",
39 | "Sec-Fetch-Site": "same-origin",
40 | "TE": "trailers"
41 | }
42 | async with ClientSession(
43 | headers=headers,
44 | cookies=cookies,
45 | connector=get_connector(connector, proxy)
46 | ) as session:
47 | data = {
48 | "messages": messages,
49 | "id": get_random_string(6),
50 | "enhancePrompt": False,
51 | "useFunctions": False
52 | }
53 | async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response:
54 | await raise_for_status(response)
55 | async for chunk in response.content.iter_any():
56 | if chunk:
57 | yield chunk.decode(errors="ignore")
--------------------------------------------------------------------------------
/g4f/Provider/base_provider.py:
--------------------------------------------------------------------------------
1 | from ..providers.base_provider import *
2 | from ..providers.types import FinishReason, Streaming
3 | from ..providers.conversation import BaseConversation
4 | from .helper import get_cookies, format_prompt
--------------------------------------------------------------------------------
/g4f/Provider/bing/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meowmurmur/gpt4free/fea9e7a198fd21aa22c9ced90977c941c4ceffb6/g4f/Provider/bing/__init__.py
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/Acytoo.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from aiohttp import ClientSession
4 |
5 | from ...typing import AsyncResult, Messages
6 | from ..base_provider import AsyncGeneratorProvider
7 |
8 |
9 | class Acytoo(AsyncGeneratorProvider):
10 | url = 'https://chat.acytoo.com'
11 | working = False
12 | supports_message_history = True
13 | supports_gpt_35_turbo = True
14 |
15 | @classmethod
16 | async def create_async_generator(
17 | cls,
18 | model: str,
19 | messages: Messages,
20 | proxy: str = None,
21 | **kwargs
22 | ) -> AsyncResult:
23 | async with ClientSession(
24 | headers=_create_header()
25 | ) as session:
26 | async with session.post(
27 | f'{cls.url}/api/completions',
28 | proxy=proxy,
29 | json=_create_payload(messages, **kwargs)
30 | ) as response:
31 | response.raise_for_status()
32 | async for stream in response.content.iter_any():
33 | if stream:
34 | yield stream.decode()
35 |
36 |
37 | def _create_header():
38 | return {
39 | 'accept': '*/*',
40 | 'content-type': 'application/json',
41 | }
42 |
43 |
44 | def _create_payload(messages: Messages, temperature: float = 0.5, **kwargs):
45 | return {
46 | 'key' : '',
47 | 'model' : 'gpt-3.5-turbo',
48 | 'messages' : messages,
49 | 'temperature' : temperature,
50 | 'password' : ''
51 | }
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/AiAsk.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from aiohttp import ClientSession
4 | from ...typing import AsyncResult, Messages
5 | from ..base_provider import AsyncGeneratorProvider
6 |
7 | class AiAsk(AsyncGeneratorProvider):
8 | url = "https://e.aiask.me"
9 | supports_message_history = True
10 | supports_gpt_35_turbo = True
11 | working = False
12 |
13 | @classmethod
14 | async def create_async_generator(
15 | cls,
16 | model: str,
17 | messages: Messages,
18 | proxy: str = None,
19 | **kwargs
20 | ) -> AsyncResult:
21 | headers = {
22 | "accept": "application/json, text/plain, */*",
23 | "origin": cls.url,
24 | "referer": f"{cls.url}/chat",
25 | }
26 | async with ClientSession(headers=headers) as session:
27 | data = {
28 | "continuous": True,
29 | "id": "fRMSQtuHl91A4De9cCvKD",
30 | "list": messages,
31 | "models": "0",
32 | "prompt": "",
33 | "temperature": kwargs.get("temperature", 0.5),
34 | "title": "",
35 | }
36 | buffer = ""
37 | rate_limit = "您的免费额度不够使用这个模型啦,请点击右上角登录继续使用!"
38 | async with session.post(f"{cls.url}/v1/chat/gpt/", json=data, proxy=proxy) as response:
39 | response.raise_for_status()
40 | async for chunk in response.content.iter_any():
41 | buffer += chunk.decode()
42 | if not rate_limit.startswith(buffer):
43 | yield buffer
44 | buffer = ""
45 | elif buffer == rate_limit:
46 | raise RuntimeError("Rate limit reached")
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/AiChatOnline.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 | from aiohttp import ClientSession
5 |
6 | from ...typing import AsyncResult, Messages
7 | from ..base_provider import AsyncGeneratorProvider
8 | from ..helper import get_random_string
9 |
10 | class AiChatOnline(AsyncGeneratorProvider):
11 | url = "https://aichatonline.org"
12 | working = False
13 | supports_gpt_35_turbo = True
14 | supports_message_history = False
15 |
16 | @classmethod
17 | async def create_async_generator(
18 | cls,
19 | model: str,
20 | messages: Messages,
21 | proxy: str = None,
22 | **kwargs
23 | ) -> AsyncResult:
24 | headers = {
25 | "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
26 | "Accept": "text/event-stream",
27 | "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
28 | "Accept-Encoding": "gzip, deflate, br",
29 | "Referer": f"{cls.url}/chatgpt/chat/",
30 | "Content-Type": "application/json",
31 | "Origin": cls.url,
32 | "Alt-Used": "aichatonline.org",
33 | "Connection": "keep-alive",
34 | "Sec-Fetch-Dest": "empty",
35 | "Sec-Fetch-Mode": "cors",
36 | "Sec-Fetch-Site": "same-origin",
37 | "TE": "trailers"
38 | }
39 | async with ClientSession(headers=headers) as session:
40 | data = {
41 | "botId": "default",
42 | "customId": None,
43 | "session": get_random_string(16),
44 | "chatId": get_random_string(),
45 | "contextId": 7,
46 | "messages": messages,
47 | "newMessage": messages[-1]["content"],
48 | "newImageId": None,
49 | "stream": True
50 | }
51 | async with session.post(f"{cls.url}/chatgpt/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
52 | response.raise_for_status()
53 | async for chunk in response.content:
54 | if chunk.startswith(b"data: "):
55 | data = json.loads(chunk[6:])
56 | if data["type"] == "live":
57 | yield data["data"]
58 | elif data["type"] == "end":
59 | break
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/AiService.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import requests
4 |
5 | from ...typing import Any, CreateResult, Messages
6 | from ..base_provider import AbstractProvider
7 |
8 |
9 | class AiService(AbstractProvider):
10 | url = "https://aiservice.vercel.app/"
11 | working = False
12 | supports_gpt_35_turbo = True
13 |
14 | @staticmethod
15 | def create_completion(
16 | model: str,
17 | messages: Messages,
18 | stream: bool,
19 | **kwargs: Any,
20 | ) -> CreateResult:
21 | base = (
22 | "\n".join(
23 | f"{message['role']}: {message['content']}" for message in messages
24 | )
25 | + "\nassistant: "
26 | )
27 | headers = {
28 | "accept": "*/*",
29 | "content-type": "text/plain;charset=UTF-8",
30 | "sec-fetch-dest": "empty",
31 | "sec-fetch-mode": "cors",
32 | "sec-fetch-site": "same-origin",
33 | "Referer": "https://aiservice.vercel.app/chat",
34 | }
35 | data = {"input": base}
36 | url = "https://aiservice.vercel.app/api/chat/answer"
37 | response = requests.post(url, headers=headers, json=data)
38 | response.raise_for_status()
39 | yield response.json()["data"]
40 |
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/Aibn.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import time
4 | import hashlib
5 |
6 | from ...typing import AsyncResult, Messages
7 | from ...requests import StreamSession
8 | from ..base_provider import AsyncGeneratorProvider
9 |
10 |
11 | class Aibn(AsyncGeneratorProvider):
12 | url = "https://aibn.cc"
13 | working = False
14 | supports_message_history = True
15 | supports_gpt_35_turbo = True
16 |
17 | @classmethod
18 | async def create_async_generator(
19 | cls,
20 | model: str,
21 | messages: Messages,
22 | proxy: str = None,
23 | timeout: int = 120,
24 | **kwargs
25 | ) -> AsyncResult:
26 | async with StreamSession(
27 | impersonate="chrome107",
28 | proxies={"https": proxy},
29 | timeout=timeout
30 | ) as session:
31 | timestamp = int(time.time())
32 | data = {
33 | "messages": messages,
34 | "pass": None,
35 | "sign": generate_signature(timestamp, messages[-1]["content"]),
36 | "time": timestamp
37 | }
38 | async with session.post(f"{cls.url}/api/generate", json=data) as response:
39 | response.raise_for_status()
40 | async for chunk in response.iter_content():
41 | yield chunk.decode()
42 |
43 |
44 | def generate_signature(timestamp: int, message: str, secret: str = "undefined"):
45 | data = f"{timestamp}:{message}:{secret}"
46 | return hashlib.sha256(data.encode()).hexdigest()
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/ChatAnywhere.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from aiohttp import ClientSession, ClientTimeout
4 |
5 | from ...typing import AsyncResult, Messages
6 | from ..base_provider import AsyncGeneratorProvider
7 |
8 |
9 | class ChatAnywhere(AsyncGeneratorProvider):
10 | url = "https://chatanywhere.cn"
11 | supports_gpt_35_turbo = True
12 | supports_message_history = True
13 | working = False
14 |
15 | @classmethod
16 | async def create_async_generator(
17 | cls,
18 | model: str,
19 | messages: Messages,
20 | proxy: str = None,
21 | timeout: int = 120,
22 | temperature: float = 0.5,
23 | **kwargs
24 | ) -> AsyncResult:
25 | headers = {
26 | "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
27 | "Accept": "application/json, text/plain, */*",
28 | "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
29 | "Accept-Encoding": "gzip, deflate, br",
30 | "Content-Type": "application/json",
31 | "Referer": f"{cls.url}/",
32 | "Origin": cls.url,
33 | "Sec-Fetch-Dest": "empty",
34 | "Sec-Fetch-Mode": "cors",
35 | "Sec-Fetch-Site": "same-origin",
36 | "Authorization": "",
37 | "Connection": "keep-alive",
38 | "TE": "trailers"
39 | }
40 | async with ClientSession(headers=headers, timeout=ClientTimeout(timeout)) as session:
41 | data = {
42 | "list": messages,
43 | "id": "s1_qYuOLXjI3rEpc7WHfQ",
44 | "title": messages[-1]["content"],
45 | "prompt": "",
46 | "temperature": temperature,
47 | "models": "61490748",
48 | "continuous": True
49 | }
50 | async with session.post(f"{cls.url}/v1/chat/gpt/", json=data, proxy=proxy) as response:
51 | response.raise_for_status()
52 | async for chunk in response.content.iter_any():
53 | if chunk:
54 | yield chunk.decode()
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/ChatgptDuo.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from ...typing import Messages
4 | from ...requests import StreamSession
5 | from ..base_provider import AsyncProvider, format_prompt
6 |
7 |
8 | class ChatgptDuo(AsyncProvider):
9 | url = "https://chatgptduo.com"
10 | supports_gpt_35_turbo = True
11 | working = False
12 |
13 | @classmethod
14 | async def create_async(
15 | cls,
16 | model: str,
17 | messages: Messages,
18 | proxy: str = None,
19 | timeout: int = 120,
20 | **kwargs
21 | ) -> str:
22 | async with StreamSession(
23 | impersonate="chrome107",
24 | proxies={"https": proxy},
25 | timeout=timeout
26 | ) as session:
27 | prompt = format_prompt(messages),
28 | data = {
29 | "prompt": prompt,
30 | "search": prompt,
31 | "purpose": "ask",
32 | }
33 | response = await session.post(f"{cls.url}/", data=data)
34 | response.raise_for_status()
35 | data = response.json()
36 |
37 | cls._sources = [{
38 | "title": source["title"],
39 | "url": source["link"],
40 | "snippet": source["snippet"]
41 | } for source in data["results"]]
42 |
43 | return data["answer"]
44 |
45 | @classmethod
46 | def get_sources(cls):
47 | return cls._sources
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/CodeLinkAva.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from aiohttp import ClientSession
4 | import json
5 |
6 | from ...typing import AsyncGenerator
7 | from ..base_provider import AsyncGeneratorProvider
8 |
9 |
10 | class CodeLinkAva(AsyncGeneratorProvider):
11 | url = "https://ava-ai-ef611.web.app"
12 | supports_gpt_35_turbo = True
13 | working = False
14 |
15 | @classmethod
16 | async def create_async_generator(
17 | cls,
18 | model: str,
19 | messages: list[dict[str, str]],
20 | **kwargs
21 | ) -> AsyncGenerator:
22 | headers = {
23 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
24 | "Accept": "*/*",
25 | "Accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
26 | "Origin": cls.url,
27 | "Referer": f"{cls.url}/",
28 | "Sec-Fetch-Dest": "empty",
29 | "Sec-Fetch-Mode": "cors",
30 | "Sec-Fetch-Site": "same-origin",
31 | }
32 | async with ClientSession(
33 | headers=headers
34 | ) as session:
35 | data = {
36 | "messages": messages,
37 | "temperature": 0.6,
38 | "stream": True,
39 | **kwargs
40 | }
41 | async with session.post("https://ava-alpha-api.codelink.io/api/chat", json=data) as response:
42 | response.raise_for_status()
43 | async for line in response.content:
44 | line = line.decode()
45 | if line.startswith("data: "):
46 | if line.startswith("data: [DONE]"):
47 | break
48 | line = json.loads(line[6:-1])
49 |
50 | content = line["choices"][0]["delta"].get("content")
51 | if content:
52 | yield content
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/Cromicle.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from aiohttp import ClientSession
4 | from hashlib import sha256
5 | from ...typing import AsyncResult, Messages, Dict
6 |
7 | from ..base_provider import AsyncGeneratorProvider
8 | from ..helper import format_prompt
9 |
10 |
11 | class Cromicle(AsyncGeneratorProvider):
12 | url: str = 'https://cromicle.top'
13 | working: bool = False
14 | supports_gpt_35_turbo: bool = True
15 |
16 | @classmethod
17 | async def create_async_generator(
18 | cls,
19 | model: str,
20 | messages: Messages,
21 | proxy: str = None,
22 | **kwargs
23 | ) -> AsyncResult:
24 | async with ClientSession(
25 | headers=_create_header()
26 | ) as session:
27 | async with session.post(
28 | f'{cls.url}/chat',
29 | proxy=proxy,
30 | json=_create_payload(format_prompt(messages))
31 | ) as response:
32 | response.raise_for_status()
33 | async for stream in response.content.iter_any():
34 | if stream:
35 | yield stream.decode()
36 |
37 |
38 | def _create_header() -> Dict[str, str]:
39 | return {
40 | 'accept': '*/*',
41 | 'content-type': 'application/json',
42 | }
43 |
44 |
45 | def _create_payload(message: str) -> Dict[str, str]:
46 | return {
47 | 'message': message,
48 | 'token': 'abc',
49 | 'hash': sha256('abc'.encode() + message.encode()).hexdigest()
50 | }
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/DfeHub.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 | import re
5 | import time
6 |
7 | import requests
8 |
9 | from ...typing import Any, CreateResult
10 | from ..base_provider import AbstractProvider
11 |
12 |
13 | class DfeHub(AbstractProvider):
14 | url = "https://chat.dfehub.com/"
15 | supports_stream = True
16 | supports_gpt_35_turbo = True
17 |
18 | @staticmethod
19 | def create_completion(
20 | model: str,
21 | messages: list[dict[str, str]],
22 | stream: bool, **kwargs: Any) -> CreateResult:
23 |
24 | headers = {
25 | "authority" : "chat.dfehub.com",
26 | "accept" : "*/*",
27 | "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
28 | "content-type" : "application/json",
29 | "origin" : "https://chat.dfehub.com",
30 | "referer" : "https://chat.dfehub.com/",
31 | "sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
32 | "sec-ch-ua-mobile" : "?0",
33 | "sec-ch-ua-platform": '"macOS"',
34 | "sec-fetch-dest" : "empty",
35 | "sec-fetch-mode" : "cors",
36 | "sec-fetch-site" : "same-origin",
37 | "user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
38 | "x-requested-with" : "XMLHttpRequest",
39 | }
40 |
41 | json_data = {
42 | "messages" : messages,
43 | "model" : "gpt-3.5-turbo",
44 | "temperature" : kwargs.get("temperature", 0.5),
45 | "presence_penalty" : kwargs.get("presence_penalty", 0),
46 | "frequency_penalty" : kwargs.get("frequency_penalty", 0),
47 | "top_p" : kwargs.get("top_p", 1),
48 | "stream" : True
49 | }
50 |
51 | response = requests.post("https://chat.dfehub.com/api/openai/v1/chat/completions",
52 | headers=headers, json=json_data, timeout=3)
53 |
54 | for chunk in response.iter_lines():
55 | if b"detail" in chunk:
56 | delay = re.findall(r"\d+\.\d+", chunk.decode())
57 | delay = float(delay[-1])
58 | time.sleep(delay)
59 | yield from DfeHub.create_completion(model, messages, stream, **kwargs)
60 | if b"content" in chunk:
61 | data = json.loads(chunk.decode().split("data: ")[1])
62 | yield (data["choices"][0]["delta"]["content"])
63 |
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/Forefront.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 |
5 | import requests
6 |
7 | from ...typing import Any, CreateResult
8 | from ..base_provider import AbstractProvider
9 |
10 |
11 | class Forefront(AbstractProvider):
12 | url = "https://forefront.com"
13 | supports_stream = True
14 | supports_gpt_35_turbo = True
15 |
16 | @staticmethod
17 | def create_completion(
18 | model: str,
19 | messages: list[dict[str, str]],
20 | stream: bool, **kwargs: Any) -> CreateResult:
21 |
22 | json_data = {
23 | "text" : messages[-1]["content"],
24 | "action" : "noauth",
25 | "id" : "",
26 | "parentId" : "",
27 | "workspaceId" : "",
28 | "messagePersona": "607e41fe-95be-497e-8e97-010a59b2e2c0",
29 | "model" : "gpt-4",
30 | "messages" : messages[:-1] if len(messages) > 1 else [],
31 | "internetMode" : "auto",
32 | }
33 |
34 | response = requests.post("https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat",
35 | json=json_data, stream=True)
36 |
37 | response.raise_for_status()
38 | for token in response.iter_lines():
39 | if b"delta" in token:
40 | yield json.loads(token.decode().split("data: ")[1])["delta"]
41 |
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/Lockchat.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 |
5 | import requests
6 |
7 | from ...typing import Any, CreateResult
8 | from ..base_provider import AbstractProvider
9 |
10 |
11 | class Lockchat(AbstractProvider):
12 | url: str = "http://supertest.lockchat.app"
13 | supports_stream = True
14 | supports_gpt_35_turbo = True
15 | supports_gpt_4 = True
16 |
17 | @staticmethod
18 | def create_completion(
19 | model: str,
20 | messages: list[dict[str, str]],
21 | stream: bool, **kwargs: Any) -> CreateResult:
22 |
23 | temperature = float(kwargs.get("temperature", 0.7))
24 | payload = {
25 | "temperature": temperature,
26 | "messages" : messages,
27 | "model" : model,
28 | "stream" : True,
29 | }
30 |
31 | headers = {
32 | "user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0",
33 | }
34 | response = requests.post("http://supertest.lockchat.app/v1/chat/completions",
35 | json=payload, headers=headers, stream=True)
36 |
37 | response.raise_for_status()
38 | for token in response.iter_lines():
39 | if b"The model: `gpt-4` does not exist" in token:
40 | print("error, retrying...")
41 |
42 | Lockchat.create_completion(
43 | model = model,
44 | messages = messages,
45 | stream = stream,
46 | temperature = temperature,
47 | **kwargs)
48 |
49 | if b"content" in token:
50 | token = json.loads(token.decode("utf-8").split("data: ")[1])
51 | token = token["choices"][0]["delta"].get("content")
52 |
53 | if token:
54 | yield (token)
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/Opchatgpts.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import random, string, json
4 | from aiohttp import ClientSession
5 |
6 | from ...typing import Messages, AsyncResult
7 | from ..base_provider import AsyncGeneratorProvider
8 | from ..helper import get_random_string
9 |
10 | class Opchatgpts(AsyncGeneratorProvider):
11 | url = "https://opchatgpts.net"
12 | working = False
13 | supports_message_history = True
14 | supports_gpt_35_turbo = True
15 |
16 | @classmethod
17 | async def create_async_generator(
18 | cls,
19 | model: str,
20 | messages: Messages,
21 | proxy: str = None, **kwargs) -> AsyncResult:
22 |
23 | headers = {
24 | "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
25 | "Accept" : "*/*",
26 | "Accept-Language" : "de,en-US;q=0.7,en;q=0.3",
27 | "Origin" : cls.url,
28 | "Alt-Used" : "opchatgpts.net",
29 | "Referer" : f"{cls.url}/chatgpt-free-use/",
30 | "Sec-Fetch-Dest" : "empty",
31 | "Sec-Fetch-Mode" : "cors",
32 | "Sec-Fetch-Site" : "same-origin",
33 | }
34 | async with ClientSession(
35 | headers=headers
36 | ) as session:
37 | data = {
38 | "botId": "default",
39 | "chatId": get_random_string(),
40 | "contextId": 28,
41 | "customId": None,
42 | "messages": messages,
43 | "newMessage": messages[-1]["content"],
44 | "session": "N/A",
45 | "stream": True
46 | }
47 | async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
48 | response.raise_for_status()
49 | async for line in response.content:
50 | if line.startswith(b"data: "):
51 | try:
52 | line = json.loads(line[6:])
53 | assert "type" in line
54 | except:
55 | raise RuntimeError(f"Broken line: {line.decode()}")
56 | if line["type"] == "live":
57 | yield line["data"]
58 | elif line["type"] == "end":
59 | break
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/V50.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import uuid
4 |
5 | import requests
6 |
7 | from ...typing import Any, CreateResult
8 | from ..base_provider import AbstractProvider
9 |
10 |
11 | class V50(AbstractProvider):
12 | url = 'https://p5.v50.ltd'
13 | supports_gpt_35_turbo = True
14 | supports_stream = False
15 | needs_auth = False
16 | working = False
17 |
18 | @staticmethod
19 | def create_completion(
20 | model: str,
21 | messages: list[dict[str, str]],
22 | stream: bool, **kwargs: Any) -> CreateResult:
23 |
24 | conversation = (
25 | "\n".join(
26 | f"{message['role']}: {message['content']}" for message in messages
27 | )
28 | + "\nassistant: "
29 | )
30 | payload = {
31 | "prompt" : conversation,
32 | "options" : {},
33 | "systemMessage" : ".",
34 | "temperature" : kwargs.get("temperature", 0.4),
35 | "top_p" : kwargs.get("top_p", 0.4),
36 | "model" : model,
37 | "user" : str(uuid.uuid4())
38 | }
39 |
40 | headers = {
41 | 'authority' : 'p5.v50.ltd',
42 | 'accept' : 'application/json, text/plain, */*',
43 | 'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
44 | 'content-type' : 'application/json',
45 | 'origin' : 'https://p5.v50.ltd',
46 | 'referer' : 'https://p5.v50.ltd/',
47 | 'sec-ch-ua-platform': '"Windows"',
48 | 'sec-fetch-dest' : 'empty',
49 | 'sec-fetch-mode' : 'cors',
50 | 'sec-fetch-site' : 'same-origin',
51 | 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'
52 | }
53 | response = requests.post(
54 | "https://p5.v50.ltd/api/chat-process",
55 | json=payload,
56 | headers=headers,
57 | proxies=kwargs.get('proxy', {}),
58 | )
59 |
60 | if "https://fk1.v50.ltd" not in response.text:
61 | yield response.text
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/Vitalentum.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 | from aiohttp import ClientSession
5 |
6 | from ..base_provider import AsyncGeneratorProvider
7 | from ...typing import AsyncResult, Messages
8 |
9 | class Vitalentum(AsyncGeneratorProvider):
10 | url = "https://app.vitalentum.io"
11 | supports_gpt_35_turbo = True
12 |
13 |
14 | @classmethod
15 | async def create_async_generator(
16 | cls,
17 | model: str,
18 | messages: Messages,
19 | proxy: str = None,
20 | **kwargs
21 | ) -> AsyncResult:
22 | headers = {
23 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
24 | "Accept": "text/event-stream",
25 | "Accept-language": "de,en-US;q=0.7,en;q=0.3",
26 | "Origin": cls.url,
27 | "Referer": f"{cls.url}/",
28 | "Sec-Fetch-Dest": "empty",
29 | "Sec-Fetch-Mode": "cors",
30 | "Sec-Fetch-Site": "same-origin",
31 | }
32 | conversation = json.dumps({"history": [{
33 | "speaker": "human" if message["role"] == "user" else "bot",
34 | "text": message["content"],
35 | } for message in messages]})
36 | data = {
37 | "conversation": conversation,
38 | "temperature": 0.7,
39 | **kwargs
40 | }
41 | async with ClientSession(
42 | headers=headers
43 | ) as session:
44 | async with session.post(f"{cls.url}/api/converse-edge", json=data, proxy=proxy) as response:
45 | response.raise_for_status()
46 | async for line in response.content:
47 | line = line.decode()
48 | if line.startswith("data: "):
49 | if line.startswith("data: [DONE]"):
50 | break
51 | line = json.loads(line[6:-1])
52 | content = line["choices"][0]["delta"].get("content")
53 |
54 | if content:
55 | yield content
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/Wewordle.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import random, string, time
4 | from aiohttp import ClientSession
5 |
6 | from ..base_provider import AsyncProvider
7 |
8 |
9 | class Wewordle(AsyncProvider):
10 | url = "https://wewordle.org"
11 | working = False
12 | supports_gpt_35_turbo = True
13 |
14 | @classmethod
15 | async def create_async(
16 | cls,
17 | model: str,
18 | messages: list[dict[str, str]],
19 | proxy: str = None,
20 | **kwargs
21 | ) -> str:
22 |
23 | headers = {
24 | "accept" : "*/*",
25 | "pragma" : "no-cache",
26 | "Content-Type" : "application/json",
27 | "Connection" : "keep-alive"
28 | }
29 |
30 | _user_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=16))
31 | _app_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=31))
32 | _request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime())
33 | data = {
34 | "user" : _user_id,
35 | "messages" : messages,
36 | "subscriber": {
37 | "originalPurchaseDate" : None,
38 | "originalApplicationVersion" : None,
39 | "allPurchaseDatesMillis" : {},
40 | "entitlements" : {"active": {}, "all": {}},
41 | "allPurchaseDates" : {},
42 | "allExpirationDatesMillis" : {},
43 | "allExpirationDates" : {},
44 | "originalAppUserId" : f"$RCAnonymousID:{_app_id}",
45 | "latestExpirationDate" : None,
46 | "requestDate" : _request_date,
47 | "latestExpirationDateMillis" : None,
48 | "nonSubscriptionTransactions" : [],
49 | "originalPurchaseDateMillis" : None,
50 | "managementURL" : None,
51 | "allPurchasedProductIdentifiers": [],
52 | "firstSeen" : _request_date,
53 | "activeSubscriptions" : [],
54 | }
55 | }
56 |
57 |
58 | async with ClientSession(
59 | headers=headers
60 | ) as session:
61 | async with session.post(f"{cls.url}/gptapi/v1/android/turbo", proxy=proxy, json=data) as response:
62 | response.raise_for_status()
63 | content = (await response.json())["message"]["content"]
64 | if content:
65 | return content
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/Wuguokai.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import random
4 |
5 | import requests
6 |
7 | from ...typing import Any, CreateResult
8 | from ..base_provider import AbstractProvider, format_prompt
9 |
10 |
11 | class Wuguokai(AbstractProvider):
12 | url = 'https://chat.wuguokai.xyz'
13 | supports_gpt_35_turbo = True
14 | working = False
15 |
16 | @staticmethod
17 | def create_completion(
18 | model: str,
19 | messages: list[dict[str, str]],
20 | stream: bool,
21 | **kwargs: Any,
22 | ) -> CreateResult:
23 | headers = {
24 | 'authority': 'ai-api.wuguokai.xyz',
25 | 'accept': 'application/json, text/plain, */*',
26 | 'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
27 | 'content-type': 'application/json',
28 | 'origin': 'https://chat.wuguokai.xyz',
29 | 'referer': 'https://chat.wuguokai.xyz/',
30 | 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
31 | 'sec-ch-ua-mobile': '?0',
32 | 'sec-ch-ua-platform': '"Windows"',
33 | 'sec-fetch-dest': 'empty',
34 | 'sec-fetch-mode': 'cors',
35 | 'sec-fetch-site': 'same-site',
36 | 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
37 | }
38 | data ={
39 | "prompt": format_prompt(messages),
40 | "options": {},
41 | "userId": f"#/chat/{random.randint(1,99999999)}",
42 | "usingContext": True
43 | }
44 | response = requests.post(
45 | "https://ai-api20.wuguokai.xyz/api/chat-process",
46 | headers=headers,
47 | timeout=3,
48 | json=data,
49 | proxies=kwargs.get('proxy', {}),
50 | )
51 | _split = response.text.split("> 若回答失败请重试或多刷新几次界面后重试")
52 | if response.status_code != 200:
53 | raise Exception(f"Error: {response.status_code} {response.reason}")
54 | if len(_split) > 1:
55 | yield _split[1].strip()
56 | else:
57 | yield _split[0].strip()
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/Ylokh.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 |
5 | from ...requests import StreamSession
6 | from ..base_provider import AsyncGeneratorProvider
7 | from ...typing import AsyncResult, Messages
8 |
9 | class Ylokh(AsyncGeneratorProvider):
10 | url = "https://chat.ylokh.xyz"
11 | working = False
12 | supports_message_history = True
13 | supports_gpt_35_turbo = True
14 |
15 |
16 | @classmethod
17 | async def create_async_generator(
18 | cls,
19 | model: str,
20 | messages: Messages,
21 | stream: bool = True,
22 | proxy: str = None,
23 | timeout: int = 120,
24 | **kwargs
25 | ) -> AsyncResult:
26 | model = model if model else "gpt-3.5-turbo"
27 | headers = {"Origin": cls.url, "Referer": f"{cls.url}/"}
28 | data = {
29 | "messages": messages,
30 | "model": model,
31 | "temperature": 1,
32 | "presence_penalty": 0,
33 | "top_p": 1,
34 | "frequency_penalty": 0,
35 | "allow_fallback": True,
36 | "stream": stream,
37 | **kwargs
38 | }
39 | async with StreamSession(
40 | headers=headers,
41 | proxies={"https": proxy},
42 | timeout=timeout
43 | ) as session:
44 | async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data) as response:
45 | response.raise_for_status()
46 | if stream:
47 | async for line in response.iter_lines():
48 | line = line.decode()
49 | if line.startswith("data: "):
50 | if line.startswith("data: [DONE]"):
51 | break
52 | line = json.loads(line[6:])
53 | content = line["choices"][0]["delta"].get("content")
54 | if content:
55 | yield content
56 | else:
57 | chat = await response.json()
58 | yield chat["choices"][0]["message"].get("content")
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/Yqcloud.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import random
4 | from ...requests import StreamSession
5 |
6 | from ...typing import AsyncResult, Messages
7 | from ..base_provider import AsyncGeneratorProvider, format_prompt
8 |
9 |
10 | class Yqcloud(AsyncGeneratorProvider):
11 | url = "https://chat9.yqcloud.top/"
12 | working = True
13 | supports_gpt_35_turbo = True
14 |
15 | @staticmethod
16 | async def create_async_generator(
17 | model: str,
18 | messages: Messages,
19 | proxy: str = None,
20 | timeout: int = 120,
21 | **kwargs,
22 | ) -> AsyncResult:
23 | async with StreamSession(
24 | headers=_create_header(), proxies={"https": proxy}, timeout=timeout
25 | ) as session:
26 | payload = _create_payload(messages, **kwargs)
27 | async with session.post("https://api.aichatos.cloud/api/generateStream", json=payload) as response:
28 | response.raise_for_status()
29 | async for chunk in response.iter_content():
30 | if chunk:
31 | chunk = chunk.decode()
32 | if "sorry, 您的ip已由于触发防滥用检测而被封禁" in chunk:
33 | raise RuntimeError("IP address is blocked by abuse detection.")
34 | yield chunk
35 |
36 |
37 | def _create_header():
38 | return {
39 | "accept" : "application/json, text/plain, */*",
40 | "content-type" : "application/json",
41 | "origin" : "https://chat9.yqcloud.top",
42 | "referer" : "https://chat9.yqcloud.top/"
43 | }
44 |
45 |
46 | def _create_payload(
47 | messages: Messages,
48 | system_message: str = "",
49 | user_id: int = None,
50 | **kwargs
51 | ):
52 | if not user_id:
53 | user_id = random.randint(1690000544336, 2093025544336)
54 | return {
55 | "prompt": format_prompt(messages),
56 | "network": True,
57 | "system": system_message,
58 | "withoutContext": False,
59 | "stream": True,
60 | "userId": f"#/chat/{user_id}"
61 | }
62 |
--------------------------------------------------------------------------------
/g4f/Provider/deprecated/__init__.py:
--------------------------------------------------------------------------------
1 | from .AiService import AiService
2 | from .CodeLinkAva import CodeLinkAva
3 | from .DfeHub import DfeHub
4 | from .EasyChat import EasyChat
5 | from .Forefront import Forefront
6 | from .GetGpt import GetGpt
7 | from .Lockchat import Lockchat
8 | from .Wewordle import Wewordle
9 | from .Equing import Equing
10 | from .Wuguokai import Wuguokai
11 | from .V50 import V50
12 | from .FastGpt import FastGpt
13 | from .Aivvm import Aivvm
14 | from .Vitalentum import Vitalentum
15 | from .H2o import H2o
16 | from .Myshell import Myshell
17 | from .Acytoo import Acytoo
18 | from .Aibn import Aibn
19 | from .Ails import Ails
20 | from .ChatgptDuo import ChatgptDuo
21 | from .Cromicle import Cromicle
22 | from .Opchatgpts import Opchatgpts
23 | from .Yqcloud import Yqcloud
24 | from .Aichat import Aichat
25 | from .Berlin import Berlin
26 | from .Phind import Phind
27 | from .AiAsk import AiAsk
28 | from .AiChatOnline import AiChatOnline
29 | from .ChatAnywhere import ChatAnywhere
30 | from .FakeGpt import FakeGpt
31 | from .GeekGpt import GeekGpt
32 | from .GPTalk import GPTalk
33 | from .Hashnode import Hashnode
34 | from .Ylokh import Ylokh
35 | from .OpenAssistant import OpenAssistant
--------------------------------------------------------------------------------
/g4f/Provider/gigachat_crt/russian_trusted_root_ca_pem.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIFwjCCA6qgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwcDELMAkGA1UEBhMCUlUx
3 | PzA9BgNVBAoMNlRoZSBNaW5pc3RyeSBvZiBEaWdpdGFsIERldmVsb3BtZW50IGFu
4 | ZCBDb21tdW5pY2F0aW9uczEgMB4GA1UEAwwXUnVzc2lhbiBUcnVzdGVkIFJvb3Qg
5 | Q0EwHhcNMjIwMzAxMjEwNDE1WhcNMzIwMjI3MjEwNDE1WjBwMQswCQYDVQQGEwJS
6 | VTE/MD0GA1UECgw2VGhlIE1pbmlzdHJ5IG9mIERpZ2l0YWwgRGV2ZWxvcG1lbnQg
7 | YW5kIENvbW11bmljYXRpb25zMSAwHgYDVQQDDBdSdXNzaWFuIFRydXN0ZWQgUm9v
8 | dCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMfFOZ8pUAL3+r2n
9 | qqE0Zp52selXsKGFYoG0GM5bwz1bSFtCt+AZQMhkWQheI3poZAToYJu69pHLKS6Q
10 | XBiwBC1cvzYmUYKMYZC7jE5YhEU2bSL0mX7NaMxMDmH2/NwuOVRj8OImVa5s1F4U
11 | zn4Kv3PFlDBjjSjXKVY9kmjUBsXQrIHeaqmUIsPIlNWUnimXS0I0abExqkbdrXbX
12 | YwCOXhOO2pDUx3ckmJlCMUGacUTnylyQW2VsJIyIGA8V0xzdaeUXg0VZ6ZmNUr5Y
13 | Ber/EAOLPb8NYpsAhJe2mXjMB/J9HNsoFMBFJ0lLOT/+dQvjbdRZoOT8eqJpWnVD
14 | U+QL/qEZnz57N88OWM3rabJkRNdU/Z7x5SFIM9FrqtN8xewsiBWBI0K6XFuOBOTD
15 | 4V08o4TzJ8+Ccq5XlCUW2L48pZNCYuBDfBh7FxkB7qDgGDiaftEkZZfApRg2E+M9
16 | G8wkNKTPLDc4wH0FDTijhgxR3Y4PiS1HL2Zhw7bD3CbslmEGgfnnZojNkJtcLeBH
17 | BLa52/dSwNU4WWLubaYSiAmA9IUMX1/RpfpxOxd4Ykmhz97oFbUaDJFipIggx5sX
18 | ePAlkTdWnv+RWBxlJwMQ25oEHmRguNYf4Zr/Rxr9cS93Y+mdXIZaBEE0KS2iLRqa
19 | OiWBki9IMQU4phqPOBAaG7A+eP8PAgMBAAGjZjBkMB0GA1UdDgQWBBTh0YHlzlpf
20 | BKrS6badZrHF+qwshzAfBgNVHSMEGDAWgBTh0YHlzlpfBKrS6badZrHF+qwshzAS
21 | BgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsF
22 | AAOCAgEAALIY1wkilt/urfEVM5vKzr6utOeDWCUczmWX/RX4ljpRdgF+5fAIS4vH
23 | tmXkqpSCOVeWUrJV9QvZn6L227ZwuE15cWi8DCDal3Ue90WgAJJZMfTshN4OI8cq
24 | W9E4EG9wglbEtMnObHlms8F3CHmrw3k6KmUkWGoa+/ENmcVl68u/cMRl1JbW2bM+
25 | /3A+SAg2c6iPDlehczKx2oa95QW0SkPPWGuNA/CE8CpyANIhu9XFrj3RQ3EqeRcS
26 | AQQod1RNuHpfETLU/A2gMmvn/w/sx7TB3W5BPs6rprOA37tutPq9u6FTZOcG1Oqj
27 | C/B7yTqgI7rbyvox7DEXoX7rIiEqyNNUguTk/u3SZ4VXE2kmxdmSh3TQvybfbnXV
28 | 4JbCZVaqiZraqc7oZMnRoWrXRG3ztbnbes/9qhRGI7PqXqeKJBztxRTEVj8ONs1d
29 | WN5szTwaPIvhkhO3CO5ErU2rVdUr89wKpNXbBODFKRtgxUT70YpmJ46VVaqdAhOZ
30 | D9EUUn4YaeLaS8AjSF/h7UkjOibNc4qVDiPP+rkehFWM66PVnP1Msh93tc+taIfC
31 | EYVMxjh8zNbFuoc7fzvvrFILLe7ifvEIUqSVIC/AzplM/Jxw7buXFeGP1qVCBEHq
32 | 391d/9RAfaZ12zkwFsl+IKwE/OZxW8AHa9i1p4GO0YSNuczzEm4=
33 | -----END CERTIFICATE-----
--------------------------------------------------------------------------------
/g4f/Provider/helper.py:
--------------------------------------------------------------------------------
1 | from ..providers.helper import *
2 | from ..cookies import get_cookies
3 | from ..requests.aiohttp import get_connector
--------------------------------------------------------------------------------
/g4f/Provider/needs_auth/Groq.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from .Openai import Openai
4 | from ...typing import AsyncResult, Messages
5 |
6 | class Groq(Openai):
7 | label = "Groq"
8 | url = "https://console.groq.com/playground"
9 | working = True
10 | default_model = "mixtral-8x7b-32768"
11 | models = ["mixtral-8x7b-32768", "llama2-70b-4096", "gemma-7b-it"]
12 | model_aliases = {"mixtral-8x7b": "mixtral-8x7b-32768", "llama2-70b": "llama2-70b-4096"}
13 |
14 | @classmethod
15 | def create_async_generator(
16 | cls,
17 | model: str,
18 | messages: Messages,
19 | api_base: str = "https://api.groq.com/openai/v1",
20 | **kwargs
21 | ) -> AsyncResult:
22 | return super().create_async_generator(
23 | model, messages, api_base=api_base, **kwargs
24 | )
--------------------------------------------------------------------------------
/g4f/Provider/needs_auth/OpenRouter.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import requests
4 |
5 | from .Openai import Openai
6 | from ...typing import AsyncResult, Messages
7 |
8 | class OpenRouter(Openai):
9 | label = "OpenRouter"
10 | url = "https://openrouter.ai"
11 | working = True
12 | default_model = "mistralai/mistral-7b-instruct:free"
13 |
14 | @classmethod
15 | def get_models(cls):
16 | if not cls.models:
17 | url = 'https://openrouter.ai/api/v1/models'
18 | models = requests.get(url).json()["data"]
19 | cls.models = [model['id'] for model in models]
20 | return cls.models
21 |
22 | @classmethod
23 | def create_async_generator(
24 | cls,
25 | model: str,
26 | messages: Messages,
27 | api_base: str = "https://openrouter.ai/api/v1",
28 | **kwargs
29 | ) -> AsyncResult:
30 | return super().create_async_generator(
31 | model, messages, api_base=api_base, **kwargs
32 | )
--------------------------------------------------------------------------------
/g4f/Provider/needs_auth/OpenaiAccount.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from .OpenaiChat import OpenaiChat
4 |
5 | class OpenaiAccount(OpenaiChat):
6 | needs_auth = True
7 | parent = "OpenaiChat"
8 | image_models = ["dall-e"]
--------------------------------------------------------------------------------
/g4f/Provider/needs_auth/PerplexityApi.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from .Openai import Openai
4 | from ...typing import AsyncResult, Messages
5 |
6 | class PerplexityApi(Openai):
7 | label = "Perplexity API"
8 | url = "https://www.perplexity.ai"
9 | working = True
10 | default_model = "llama-3-sonar-large-32k-online"
11 | models = [
12 | "llama-3-sonar-small-32k-chat",
13 | "llama-3-sonar-small-32k-online",
14 | "llama-3-sonar-large-32k-chat",
15 | "llama-3-sonar-large-32k-online",
16 | "llama-3-8b-instruct",
17 | "llama-3-70b-instruct",
18 | "mixtral-8x7b-instruct"
19 | ]
20 |
21 | @classmethod
22 | def create_async_generator(
23 | cls,
24 | model: str,
25 | messages: Messages,
26 | api_base: str = "https://api.perplexity.ai",
27 | **kwargs
28 | ) -> AsyncResult:
29 | return super().create_async_generator(
30 | model, messages, api_base=api_base, **kwargs
31 | )
--------------------------------------------------------------------------------
/g4f/Provider/needs_auth/Raycast.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 |
5 | import requests
6 |
7 | from ...typing import CreateResult, Messages
8 | from ..base_provider import AbstractProvider
9 |
10 |
11 | class Raycast(AbstractProvider):
12 | url = "https://raycast.com"
13 | supports_gpt_35_turbo = True
14 | supports_gpt_4 = True
15 | supports_stream = True
16 | needs_auth = True
17 | working = True
18 |
19 | @staticmethod
20 | def create_completion(
21 | model: str,
22 | messages: Messages,
23 | stream: bool,
24 | proxy: str = None,
25 | **kwargs,
26 | ) -> CreateResult:
27 | auth = kwargs.get('auth')
28 | headers = {
29 | 'Accept': 'application/json',
30 | 'Accept-Language': 'en-US,en;q=0.9',
31 | 'Authorization': f'Bearer {auth}',
32 | 'Content-Type': 'application/json',
33 | 'User-Agent': 'Raycast/0 CFNetwork/1410.0.3 Darwin/22.6.0',
34 | }
35 | parsed_messages = [
36 | {'author': message['role'], 'content': {'text': message['content']}}
37 | for message in messages
38 | ]
39 | data = {
40 | "debug": False,
41 | "locale": "en-CN",
42 | "messages": parsed_messages,
43 | "model": model,
44 | "provider": "openai",
45 | "source": "ai_chat",
46 | "system_instruction": "markdown",
47 | "temperature": 0.5
48 | }
49 | response = requests.post(
50 | "https://backend.raycast.com/api/v1/ai/chat_completions",
51 | headers=headers,
52 | json=data,
53 | stream=True,
54 | proxies={"https": proxy}
55 | )
56 | for token in response.iter_lines():
57 | if b'data: ' not in token:
58 | continue
59 | completion_chunk = json.loads(token.decode().replace('data: ', ''))
60 | token = completion_chunk['text']
61 | if token != None:
62 | yield token
63 |
--------------------------------------------------------------------------------
/g4f/Provider/needs_auth/ThebApi.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from ...typing import CreateResult, Messages
4 | from .Openai import Openai
5 |
6 | models = {
7 | "theb-ai": "TheB.AI",
8 | "gpt-3.5-turbo": "GPT-3.5",
9 | "gpt-3.5-turbo-16k": "GPT-3.5-16K",
10 | "gpt-4-turbo": "GPT-4 Turbo",
11 | "gpt-4": "GPT-4",
12 | "gpt-4-32k": "GPT-4 32K",
13 | "claude-2": "Claude 2",
14 | "claude-1": "Claude",
15 | "claude-1-100k": "Claude 100K",
16 | "claude-instant-1": "Claude Instant",
17 | "claude-instant-1-100k": "Claude Instant 100K",
18 | "palm-2": "PaLM 2",
19 | "palm-2-codey": "Codey",
20 | "vicuna-13b-v1.5": "Vicuna v1.5 13B",
21 | "llama-2-7b-chat": "Llama 2 7B",
22 | "llama-2-13b-chat": "Llama 2 13B",
23 | "llama-2-70b-chat": "Llama 2 70B",
24 | "code-llama-7b": "Code Llama 7B",
25 | "code-llama-13b": "Code Llama 13B",
26 | "code-llama-34b": "Code Llama 34B",
27 | "qwen-7b-chat": "Qwen 7B"
28 | }
29 |
30 | class ThebApi(Openai):
31 | label = "TheB.AI API"
32 | url = "https://theb.ai"
33 | working = True
34 | needs_auth = True
35 | default_model = "gpt-3.5-turbo"
36 | models = list(models)
37 |
38 | @classmethod
39 | def create_async_generator(
40 | cls,
41 | model: str,
42 | messages: Messages,
43 | api_base: str = "https://api.theb.ai/v1",
44 | temperature: float = 1,
45 | top_p: float = 1,
46 | **kwargs
47 | ) -> CreateResult:
48 | if "auth" in kwargs:
49 | kwargs["api_key"] = kwargs["auth"]
50 | system_message = "\n".join([message["content"] for message in messages if message["role"] == "system"])
51 | if not system_message:
52 | system_message = "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture."
53 | messages = [message for message in messages if message["role"] != "system"]
54 | data = {
55 | "model_params": {
56 | "system_prompt": system_message,
57 | "temperature": temperature,
58 | "top_p": top_p,
59 | }
60 | }
61 | return super().create_async_generator(model, messages, api_base=api_base, extra_data=data, **kwargs)
--------------------------------------------------------------------------------
/g4f/Provider/needs_auth/__init__.py:
--------------------------------------------------------------------------------
1 | from .Gemini import Gemini
2 | from .Raycast import Raycast
3 | from .Theb import Theb
4 | from .ThebApi import ThebApi
5 | from .OpenaiChat import OpenaiChat
6 | from .Poe import Poe
7 | from .Openai import Openai
8 | from .Groq import Groq
9 | from .OpenRouter import OpenRouter
10 | from .OpenaiAccount import OpenaiAccount
11 | from .PerplexityApi import PerplexityApi
--------------------------------------------------------------------------------
/g4f/Provider/not_working/Bestim.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from ...typing import Messages
4 | from ..base_provider import BaseProvider, CreateResult
5 | from ...requests import get_session_from_browser
6 | from uuid import uuid4
7 |
8 | class Bestim(BaseProvider):
9 | url = "https://chatgpt.bestim.org"
10 | working = False
11 | supports_gpt_35_turbo = True
12 | supports_message_history = True
13 | supports_stream = True
14 |
15 | @classmethod
16 | def create_completion(
17 | cls,
18 | model: str,
19 | messages: Messages,
20 | stream: bool,
21 | proxy: str = None,
22 | **kwargs
23 | ) -> CreateResult:
24 | session = get_session_from_browser(cls.url, proxy=proxy)
25 | headers = {
26 | 'Accept': 'application/json, text/event-stream',
27 | }
28 | data = {
29 | "messagesHistory": [{
30 | "id": str(uuid4()),
31 | "content": m["content"],
32 | "from": "you" if m["role"] == "user" else "bot"
33 | } for m in messages],
34 | "type": "chat",
35 | }
36 | response = session.post(
37 | url="https://chatgpt.bestim.org/chat/send2/",
38 | json=data,
39 | headers=headers,
40 | stream=True
41 | )
42 | response.raise_for_status()
43 | for line in response.iter_lines():
44 | if not line.startswith(b"event: trylimit"):
45 | yield line.decode().removeprefix("data: ")
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
--------------------------------------------------------------------------------
/g4f/Provider/not_working/ChatgptDemoAi.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 | from aiohttp import ClientSession
5 |
6 | from ...typing import AsyncResult, Messages
7 | from ..base_provider import AsyncGeneratorProvider
8 | from ..helper import get_random_string
9 |
10 | class ChatgptDemoAi(AsyncGeneratorProvider):
11 | url = "https://chat.chatgptdemo.ai"
12 | working = False
13 | supports_gpt_35_turbo = True
14 | supports_message_history = True
15 |
16 | @classmethod
17 | async def create_async_generator(
18 | cls,
19 | model: str,
20 | messages: Messages,
21 | proxy: str = None,
22 | **kwargs
23 | ) -> AsyncResult:
24 | headers = {
25 | "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
26 | "Accept": "*/*",
27 | "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
28 | "Accept-Encoding": "gzip, deflate, br",
29 | "Referer": f"{cls.url}/",
30 | "Content-Type": "application/json",
31 | "Origin": cls.url,
32 | "Connection": "keep-alive",
33 | "Sec-Fetch-Dest": "empty",
34 | "Sec-Fetch-Mode": "cors",
35 | "Sec-Fetch-Site": "same-origin",
36 | "TE": "trailers"
37 | }
38 | async with ClientSession(headers=headers) as session:
39 | data = {
40 | "botId": "default",
41 | "customId": "8824fe9bdb323a5d585a3223aaa0cb6e",
42 | "session": "N/A",
43 | "chatId": get_random_string(12),
44 | "contextId": 2,
45 | "messages": messages,
46 | "newMessage": messages[-1]["content"],
47 | "stream": True
48 | }
49 | async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
50 | response.raise_for_status()
51 | async for chunk in response.content:
52 | response.raise_for_status()
53 | if chunk.startswith(b"data: "):
54 | data = json.loads(chunk[6:])
55 | if data["type"] == "live":
56 | yield data["data"]
--------------------------------------------------------------------------------
/g4f/Provider/not_working/Chatxyz.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 | from aiohttp import ClientSession
5 |
6 | from ...typing import AsyncResult, Messages
7 | from ..base_provider import AsyncGeneratorProvider
8 |
9 | class Chatxyz(AsyncGeneratorProvider):
10 | url = "https://chat.3211000.xyz"
11 | working = False
12 | supports_gpt_35_turbo = True
13 | supports_message_history = True
14 |
15 | @classmethod
16 | async def create_async_generator(
17 | cls,
18 | model: str,
19 | messages: Messages,
20 | proxy: str = None,
21 | **kwargs
22 | ) -> AsyncResult:
23 | headers = {
24 | 'Accept': 'text/event-stream',
25 | 'Accept-Encoding': 'gzip, deflate, br',
26 | 'Accept-Language': 'en-US,en;q=0.5',
27 | 'Alt-Used': 'chat.3211000.xyz',
28 | 'Content-Type': 'application/json',
29 | 'Host': 'chat.3211000.xyz',
30 | 'Origin': 'https://chat.3211000.xyz',
31 | 'Referer': 'https://chat.3211000.xyz/',
32 | 'Sec-Fetch-Dest': 'empty',
33 | 'Sec-Fetch-Mode': 'cors',
34 | 'Sec-Fetch-Site': 'same-origin',
35 | 'TE': 'trailers',
36 | 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:121.0) Gecko/20100101 Firefox/121.0',
37 | 'x-requested-with': 'XMLHttpRequest'
38 | }
39 | async with ClientSession(headers=headers) as session:
40 | data = {
41 | "messages": messages,
42 | "stream": True,
43 | "model": "gpt-3.5-turbo",
44 | "temperature": 0.5,
45 | "presence_penalty": 0,
46 | "frequency_penalty": 0,
47 | "top_p": 1,
48 | **kwargs
49 | }
50 | async with session.post(f'{cls.url}/api/openai/v1/chat/completions', json=data, proxy=proxy) as response:
51 | response.raise_for_status()
52 | async for chunk in response.content:
53 | line = chunk.decode()
54 | if line.startswith("data: [DONE]"):
55 | break
56 | elif line.startswith("data: "):
57 | line = json.loads(line[6:])
58 | chunk = line["choices"][0]["delta"].get("content")
59 | if(chunk):
60 | yield chunk
--------------------------------------------------------------------------------
/g4f/Provider/not_working/Gpt6.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 | from aiohttp import ClientSession
5 |
6 | from ...typing import AsyncResult, Messages
7 | from ..base_provider import AsyncGeneratorProvider
8 |
9 | class Gpt6(AsyncGeneratorProvider):
10 | url = "https://gpt6.ai"
11 | working = False
12 | supports_gpt_35_turbo = True
13 |
14 | @classmethod
15 | async def create_async_generator(
16 | cls,
17 | model: str,
18 | messages: Messages,
19 | proxy: str = None,
20 | **kwargs
21 | ) -> AsyncResult:
22 | headers = {
23 | "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
24 | "Accept": "*/*",
25 | "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
26 | "Accept-Encoding": "gzip, deflate, br",
27 | "Content-Type": "application/json",
28 | "Origin": "https://gpt6.ai",
29 | "Connection": "keep-alive",
30 | "Referer": "https://gpt6.ai/",
31 | "Sec-Fetch-Dest": "empty",
32 | "Sec-Fetch-Mode": "cors",
33 | "Sec-Fetch-Site": "cross-site",
34 | "TE": "trailers",
35 | }
36 | async with ClientSession(headers=headers) as session:
37 | data = {
38 | "prompts":messages,
39 | "geoInfo":{"ip":"100.90.100.222","hostname":"ip-100-090-100-222.um36.pools.vodafone-ip.de","city":"Muenchen","region":"North Rhine-Westphalia","country":"DE","loc":"44.0910,5.5827","org":"AS3209 Vodafone GmbH","postal":"41507","timezone":"Europe/Berlin"},
40 | "paid":False,
41 | "character":{"textContent":"","id":"52690ad6-22e4-4674-93d4-1784721e9944","name":"GPT6","htmlContent":""}
42 | }
43 | async with session.post(f"https://seahorse-app-d29hu.ondigitalocean.app/api/v1/query", json=data, proxy=proxy) as response:
44 | response.raise_for_status()
45 | async for line in response.content:
46 | print(line)
47 | if line.startswith(b"data: [DONE]"):
48 | break
49 | elif line.startswith(b"data: "):
50 | line = json.loads(line[6:-1])
51 |
52 | chunk = line["choices"][0]["delta"].get("content")
53 | if chunk:
54 | yield chunk
--------------------------------------------------------------------------------
/g4f/Provider/not_working/GptChatly.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from ...requests import Session, get_session_from_browser
4 | from ...typing import Messages
5 | from ..base_provider import AsyncProvider
6 |
7 |
8 | class GptChatly(AsyncProvider):
9 | url = "https://gptchatly.com"
10 | working = False
11 | supports_message_history = True
12 | supports_gpt_35_turbo = True
13 |
14 | @classmethod
15 | async def create_async(
16 | cls,
17 | model: str,
18 | messages: Messages,
19 | proxy: str = None,
20 | timeout: int = 120,
21 | session: Session = None,
22 | **kwargs
23 | ) -> str:
24 | if not session:
25 | session = get_session_from_browser(cls.url, proxy=proxy, timeout=timeout)
26 | if model.startswith("gpt-4"):
27 | chat_url = f"{cls.url}/fetch-gpt4-response"
28 | else:
29 | chat_url = f"{cls.url}/felch-response"
30 | data = {
31 | "past_conversations": messages
32 | }
33 | response = session.post(chat_url, json=data)
34 | response.raise_for_status()
35 | return response.json()["chatGPTResponse"]
--------------------------------------------------------------------------------
/g4f/Provider/not_working/GptGod.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import secrets
4 | import json
5 | from aiohttp import ClientSession
6 |
7 | from ...typing import AsyncResult, Messages
8 | from ..base_provider import AsyncGeneratorProvider
9 | from ..helper import format_prompt
10 |
11 | class GptGod(AsyncGeneratorProvider):
12 | url = "https://gptgod.site"
13 | working = False
14 | supports_gpt_35_turbo = True
15 |
16 | @classmethod
17 | async def create_async_generator(
18 | cls,
19 | model: str,
20 | messages: Messages,
21 | proxy: str = None,
22 | **kwargs
23 | ) -> AsyncResult:
24 |
25 | headers = {
26 | "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
27 | "Accept": "text/event-stream",
28 | "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
29 | "Accept-Encoding": "gzip, deflate, br",
30 | "Alt-Used": "gptgod.site",
31 | "Connection": "keep-alive",
32 | "Referer": f"{cls.url}/",
33 | "Sec-Fetch-Dest": "empty",
34 | "Sec-Fetch-Mode": "cors",
35 | "Sec-Fetch-Site": "same-origin",
36 | "Pragma": "no-cache",
37 | "Cache-Control": "no-cache",
38 | }
39 |
40 | async with ClientSession(headers=headers) as session:
41 | prompt = format_prompt(messages)
42 | data = {
43 | "content": prompt,
44 | "id": secrets.token_hex(16).zfill(32)
45 | }
46 | async with session.get(f"{cls.url}/api/session/free/gpt3p5", params=data, proxy=proxy) as response:
47 | response.raise_for_status()
48 | event = None
49 | async for line in response.content:
50 | # print(line)
51 |
52 | if line.startswith(b'event: '):
53 | event = line[7:-1]
54 |
55 | elif event == b"data" and line.startswith(b"data: "):
56 | data = json.loads(line[6:-1])
57 | if data:
58 | yield data
59 |
60 | elif event == b"done":
61 | break
--------------------------------------------------------------------------------
/g4f/Provider/not_working/OnlineGpt.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 | from aiohttp import ClientSession
5 |
6 | from ...typing import AsyncResult, Messages
7 | from ..base_provider import AsyncGeneratorProvider
8 | from ..helper import get_random_string
9 |
10 | class OnlineGpt(AsyncGeneratorProvider):
11 | url = "https://onlinegpt.org"
12 | working = False
13 | supports_gpt_35_turbo = True
14 | supports_message_history = False
15 |
16 | @classmethod
17 | async def create_async_generator(
18 | cls,
19 | model: str,
20 | messages: Messages,
21 | proxy: str = None,
22 | **kwargs
23 | ) -> AsyncResult:
24 | headers = {
25 | "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
26 | "Accept": "text/event-stream",
27 | "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
28 | "Accept-Encoding": "gzip, deflate, br",
29 | "Referer": f"{cls.url}/chat/",
30 | "Content-Type": "application/json",
31 | "Origin": cls.url,
32 | "Alt-Used": "onlinegpt.org",
33 | "Connection": "keep-alive",
34 | "Sec-Fetch-Dest": "empty",
35 | "Sec-Fetch-Mode": "cors",
36 | "Sec-Fetch-Site": "same-origin",
37 | "TE": "trailers"
38 | }
39 | async with ClientSession(headers=headers) as session:
40 | data = {
41 | "botId": "default",
42 | "customId": None,
43 | "session": get_random_string(12),
44 | "chatId": get_random_string(),
45 | "contextId": 9,
46 | "messages": messages,
47 | "newMessage": messages[-1]["content"],
48 | "newImageId": None,
49 | "stream": True
50 | }
51 | async with session.post(f"{cls.url}/chatgpt/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
52 | response.raise_for_status()
53 | async for chunk in response.content:
54 | if chunk.startswith(b"data: "):
55 | data = json.loads(chunk[6:])
56 | if data["type"] == "live":
57 | yield data["data"]
--------------------------------------------------------------------------------
/g4f/Provider/not_working/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | from .AItianhu import AItianhu
3 | from .Bestim import Bestim
4 | from .ChatBase import ChatBase
5 | from .ChatgptDemo import ChatgptDemo
6 | from .ChatgptDemoAi import ChatgptDemoAi
7 | from .ChatgptLogin import ChatgptLogin
8 | from .Chatxyz import Chatxyz
9 | from .Gpt6 import Gpt6
10 | from .GptChatly import GptChatly
11 | from .GptForLove import GptForLove
12 | from .GptGo import GptGo
13 | from .GptGod import GptGod
14 | from .OnlineGpt import OnlineGpt
--------------------------------------------------------------------------------
/g4f/Provider/npm/node_modules/.package-lock.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "npm",
3 | "lockfileVersion": 2,
4 | "requires": true,
5 | "packages": {
6 | "node_modules/crypto-js": {
7 | "version": "4.2.0",
8 | "resolved": "https://registry.npmjs.org/crypto-js/-/crypto-js-4.2.0.tgz",
9 | "integrity": "sha512-KALDyEYgpY+Rlob/iriUtjV6d5Eq+Y191A5g4UqLAi8CyGP9N1+FdVbkc1SxKc2r4YAYqG8JzO2KGL+AizD70Q=="
10 | }
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/g4f/Provider/npm/package-lock.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "npm",
3 | "lockfileVersion": 2,
4 | "requires": true,
5 | "packages": {
6 | "": {
7 | "dependencies": {
8 | "crypto-js": "^4.2.0"
9 | }
10 | },
11 | "node_modules/crypto-js": {
12 | "version": "4.2.0",
13 | "resolved": "https://registry.npmjs.org/crypto-js/-/crypto-js-4.2.0.tgz",
14 | "integrity": "sha512-KALDyEYgpY+Rlob/iriUtjV6d5Eq+Y191A5g4UqLAi8CyGP9N1+FdVbkc1SxKc2r4YAYqG8JzO2KGL+AizD70Q=="
15 | }
16 | },
17 | "dependencies": {
18 | "crypto-js": {
19 | "version": "4.2.0",
20 | "resolved": "https://registry.npmjs.org/crypto-js/-/crypto-js-4.2.0.tgz",
21 | "integrity": "sha512-KALDyEYgpY+Rlob/iriUtjV6d5Eq+Y191A5g4UqLAi8CyGP9N1+FdVbkc1SxKc2r4YAYqG8JzO2KGL+AizD70Q=="
22 | }
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/g4f/Provider/npm/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "dependencies": {
3 | "crypto-js": "^4.2.0"
4 | }
5 | }
6 |
--------------------------------------------------------------------------------
/g4f/Provider/openai/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meowmurmur/gpt4free/fea9e7a198fd21aa22c9ced90977c941c4ceffb6/g4f/Provider/openai/__init__.py
--------------------------------------------------------------------------------
/g4f/Provider/openai/crypt.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 | import base64
5 | import hashlib
6 | import random
7 | from Crypto.Cipher import AES
8 |
9 | def pad(data: str) -> bytes:
10 | # Convert the string to bytes and calculate the number of bytes to pad
11 | data_bytes = data.encode()
12 | padding = 16 - (len(data_bytes) % 16)
13 | # Append the padding bytes with their value
14 | return data_bytes + bytes([padding] * padding)
15 |
16 | def encrypt(data, key):
17 | salt = ""
18 | salted = ""
19 | dx = bytes()
20 |
21 | # Generate salt, as 8 random lowercase letters
22 | salt = "".join(random.choice("abcdefghijklmnopqrstuvwxyz") for _ in range(8))
23 |
24 | # Our final key and IV come from the key and salt being repeatedly hashed
25 | for x in range(3):
26 | dx = hashlib.md5(dx + key.encode() + salt.encode()).digest()
27 | salted += dx.hex()
28 |
29 | # Pad the data before encryption
30 | data = pad(data)
31 |
32 | aes = AES.new(
33 | bytes.fromhex(salted[:64]), AES.MODE_CBC, bytes.fromhex(salted[64:96])
34 | )
35 |
36 | return json.dumps(
37 | {
38 | "ct": base64.b64encode(aes.encrypt(data)).decode(),
39 | "iv": salted[64:96],
40 | "s": salt.encode().hex(),
41 | }
42 | )
43 |
44 | def unpad(data: bytes) -> bytes:
45 | # Extract the padding value from the last byte and remove padding
46 | padding_value = data[-1]
47 | return data[:-padding_value]
48 |
49 | def decrypt(data: str, key: str):
50 | # Parse JSON data
51 | parsed_data = json.loads(base64.b64decode(data))
52 | ct = base64.b64decode(parsed_data["ct"])
53 | iv = bytes.fromhex(parsed_data["iv"])
54 | salt = bytes.fromhex(parsed_data["s"])
55 |
56 | salted = ''
57 | dx = b''
58 | for x in range(3):
59 | dx = hashlib.md5(dx + key.encode() + salt).digest()
60 | salted += dx.hex()
61 |
62 | aes = AES.new(
63 | bytes.fromhex(salted[:64]), AES.MODE_CBC, iv
64 | )
65 |
66 | data = aes.decrypt(ct)
67 | if data.startswith(b'[{"key":'):
68 | return unpad(data).decode()
--------------------------------------------------------------------------------
/g4f/Provider/openai/proofofwork.py:
--------------------------------------------------------------------------------
1 | import random
2 | import hashlib
3 | import json
4 | import base64
5 | from datetime import datetime, timezone
6 |
7 |
8 | def generate_proof_token(required: bool, seed: str = "", difficulty: str = "", user_agent: str = None, proofTokens: list = None):
9 | if not required:
10 | return
11 |
12 | if proofTokens:
13 | config = proofTokens[-1]
14 | else:
15 | screen = random.choice([3008, 4010, 6000]) * random.choice([1, 2, 4])
16 | # Get current UTC time
17 | now_utc = datetime.now(timezone.utc)
18 | parse_time = now_utc.strftime('%a, %d %b %Y %H:%M:%S GMT')
19 | config = [
20 | screen, parse_time,
21 | None, 0, user_agent,
22 | "https://tcr9i.chat.openai.com/v2/35536E1E-65B4-4D96-9D97-6ADB7EFF8147/api.js",
23 | "dpl=1440a687921de39ff5ee56b92807faaadce73f13","en","en-US",
24 | None,
25 | "plugins−[object PluginArray]",
26 | random.choice(["_reactListeningcfilawjnerp", "_reactListening9ne2dfo1i47", "_reactListening410nzwhan2a"]),
27 | random.choice(["alert", "ontransitionend", "onprogress"])
28 | ]
29 |
30 | diff_len = len(difficulty)
31 | for i in range(100000):
32 | config[3] = i
33 | json_data = json.dumps(config)
34 | base = base64.b64encode(json_data.encode()).decode()
35 | hash_value = hashlib.sha3_512((seed + base).encode()).digest()
36 |
37 | if hash_value.hex()[:diff_len] <= difficulty:
38 | return "gAAAAAB" + base
39 |
40 | fallback_base = base64.b64encode(f'"{seed}"'.encode()).decode()
41 | return "gAAAAABwQ8Lk5FbGpA2NcR9dShT6gYjU7VxZ4D" + fallback_base
42 |
--------------------------------------------------------------------------------
/g4f/Provider/selenium/MyShell.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import time, json
4 |
5 | from ...typing import CreateResult, Messages
6 | from ..base_provider import AbstractProvider
7 | from ..helper import format_prompt
8 | from ...webdriver import WebDriver, WebDriverSession, bypass_cloudflare
9 |
10 | class MyShell(AbstractProvider):
11 | url = "https://app.myshell.ai/chat"
12 | working = True
13 | supports_gpt_35_turbo = True
14 | supports_stream = True
15 |
16 | @classmethod
17 | def create_completion(
18 | cls,
19 | model: str,
20 | messages: Messages,
21 | stream: bool,
22 | proxy: str = None,
23 | timeout: int = 120,
24 | webdriver: WebDriver = None,
25 | **kwargs
26 | ) -> CreateResult:
27 | with WebDriverSession(webdriver, "", proxy=proxy) as driver:
28 | bypass_cloudflare(driver, cls.url, timeout)
29 |
30 | # Send request with message
31 | data = {
32 | "botId": "4738",
33 | "conversation_scenario": 3,
34 | "message": format_prompt(messages),
35 | "messageType": 1
36 | }
37 | script = """
38 | response = await fetch("https://api.myshell.ai/v1/bot/chat/send_message", {
39 | "headers": {
40 | "accept": "application/json",
41 | "content-type": "application/json",
42 | "myshell-service-name": "organics-api",
43 | "visitor-id": localStorage.getItem("mix_visitorId")
44 | },
45 | "body": '{body}',
46 | "method": "POST"
47 | })
48 | window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
49 | """
50 | driver.execute_script(script.replace("{body}", json.dumps(data)))
51 | script = """
52 | chunk = await window._reader.read();
53 | if (chunk.done) {
54 | return null;
55 | }
56 | content = '';
57 | chunk.value.split('\\n').forEach((line, index) => {
58 | if (line.startsWith('data: ')) {
59 | try {
60 | const data = JSON.parse(line.substring('data: '.length));
61 | if ('content' in data) {
62 | content += data['content'];
63 | }
64 | } catch(e) {}
65 | }
66 | });
67 | return content;
68 | """
69 | while True:
70 | chunk = driver.execute_script(script)
71 | if chunk:
72 | yield chunk
73 | elif chunk != "":
74 | break
75 | else:
76 | time.sleep(0.1)
--------------------------------------------------------------------------------
/g4f/Provider/selenium/__init__.py:
--------------------------------------------------------------------------------
1 | from .AItianhuSpace import AItianhuSpace
2 | from .MyShell import MyShell
3 | from .PerplexityAi import PerplexityAi
4 | from .Phind import Phind
5 | from .TalkAi import TalkAi
6 | from .Bard import Bard
--------------------------------------------------------------------------------
/g4f/Provider/unfinished/AiChatting.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from urllib.parse import unquote
4 |
5 | from ...typing import AsyncResult, Messages
6 | from ..base_provider import AbstractProvider
7 | from ...webdriver import WebDriver
8 | from ...requests import Session, get_session_from_browser
9 |
10 | class AiChatting(AbstractProvider):
11 | url = "https://www.aichatting.net"
12 | supports_gpt_35_turbo = True
13 | _session: Session = None
14 |
15 | @classmethod
16 | def create_completion(
17 | cls,
18 | model: str,
19 | messages: Messages,
20 | stream: bool,
21 | proxy: str = None,
22 | timeout: int = 120,
23 | webdriver: WebDriver = None,
24 | **kwargs
25 | ) -> AsyncResult:
26 | if not cls._session:
27 | cls._session = get_session_from_browser(cls.url, webdriver, proxy, timeout)
28 | visitorId = unquote(cls._session.cookies.get("aichatting.website.visitorId"))
29 |
30 | headers = {
31 | "accept": "application/json, text/plain, */*",
32 | "lang": "en",
33 | "source": "web"
34 | }
35 | data = {
36 | "roleId": 0,
37 | }
38 | try:
39 | response = cls._session.post("https://aga-api.aichatting.net/aigc/chat/record/conversation/create", json=data, headers=headers)
40 | response.raise_for_status()
41 | conversation_id = response.json()["data"]["conversationId"]
42 | except Exception as e:
43 | cls.reset()
44 | raise e
45 | headers = {
46 | "authority": "aga-api.aichatting.net",
47 | "accept": "text/event-stream,application/json, text/event-stream",
48 | "lang": "en",
49 | "source": "web",
50 | "vtoken": visitorId,
51 | }
52 | data = {
53 | "spaceHandle": True,
54 | "roleId": 0,
55 | "messages": messages,
56 | "conversationId": conversation_id,
57 | }
58 | response = cls._session.post("https://aga-api.aichatting.net/aigc/chat/v2/stream", json=data, headers=headers, stream=True)
59 | response.raise_for_status()
60 | for chunk in response.iter_lines():
61 | if chunk.startswith(b"data:"):
62 | yield chunk[5:].decode().replace("-=- --", " ").replace("-=-n--", "\n").replace("--@DONE@--", "")
63 |
64 | @classmethod
65 | def reset(cls):
66 | cls._session = None
--------------------------------------------------------------------------------
/g4f/Provider/unfinished/ChatAiGpt.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import re
4 | from aiohttp import ClientSession
5 |
6 | from ...typing import AsyncResult, Messages
7 | from ..base_provider import AsyncGeneratorProvider
8 | from ..helper import format_prompt
9 |
10 |
11 | class ChatAiGpt(AsyncGeneratorProvider):
12 | url = "https://chataigpt.org"
13 | supports_gpt_35_turbo = True
14 | _nonce = None
15 | _post_id = None
16 |
17 | @classmethod
18 | async def create_async_generator(
19 | cls,
20 | model: str,
21 | messages: Messages,
22 | proxy: str = None,
23 | **kwargs
24 | ) -> AsyncResult:
25 | headers = {
26 | "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
27 | "Accept": "*/*",
28 | "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
29 | "Accept-Encoding": "gzip, deflate, br",
30 | "Origin": cls.url,
31 | "Alt-Used": cls.url,
32 | "Connection": "keep-alive",
33 | "Referer": cls.url,
34 | "Pragma": "no-cache",
35 | "Cache-Control": "no-cache",
36 | "TE": "trailers",
37 | "Sec-Fetch-Dest": "empty",
38 | "Sec-Fetch-Mode": "cors",
39 | "Sec-Fetch-Site": "same-origin",
40 | }
41 | async with ClientSession(headers=headers) as session:
42 | if not cls._nonce:
43 | async with session.get(f"{cls.url}/", proxy=proxy) as response:
44 | response.raise_for_status()
45 | response = await response.text()
46 |
47 | result = re.search(
48 | r'data-nonce=(.*?) data-post-id=([0-9]+)', response
49 | )
50 |
51 | if result:
52 | cls._nonce, cls._post_id = result.group(1), result.group(2)
53 | else:
54 | raise RuntimeError("No nonce found")
55 | prompt = format_prompt(messages)
56 | data = {
57 | "_wpnonce": cls._nonce,
58 | "post_id": cls._post_id,
59 | "url": cls.url,
60 | "action": "wpaicg_chat_shortcode_message",
61 | "message": prompt,
62 | "bot_id": 0
63 | }
64 | async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, proxy=proxy) as response:
65 | response.raise_for_status()
66 | async for chunk in response.content:
67 | if chunk:
68 | yield chunk.decode()
--------------------------------------------------------------------------------
/g4f/Provider/unfinished/Komo.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 |
5 | from ...requests import StreamSession
6 | from ...typing import AsyncGenerator
7 | from ..base_provider import AsyncGeneratorProvider, format_prompt
8 |
9 | class Komo(AsyncGeneratorProvider):
10 | url = "https://komo.ai/api/ask"
11 | supports_gpt_35_turbo = True
12 |
13 | @classmethod
14 | async def create_async_generator(
15 | cls,
16 | model: str,
17 | messages: list[dict[str, str]],
18 | **kwargs
19 | ) -> AsyncGenerator:
20 | async with StreamSession(impersonate="chrome107") as session:
21 | prompt = format_prompt(messages)
22 | data = {
23 | "query": prompt,
24 | "FLAG_URLEXTRACT": "false",
25 | "token": "",
26 | "FLAG_MODELA": "1",
27 | }
28 | headers = {
29 | 'authority': 'komo.ai',
30 | 'accept': 'text/event-stream',
31 | 'cache-control': 'no-cache',
32 | 'referer': 'https://komo.ai/',
33 | }
34 |
35 | async with session.get(cls.url, params=data, headers=headers) as response:
36 | response.raise_for_status()
37 | next = False
38 | async for line in response.iter_lines():
39 | if line == b"event: line":
40 | next = True
41 | elif next and line.startswith(b"data: "):
42 | yield json.loads(line[6:])
43 | next = False
44 |
45 |
--------------------------------------------------------------------------------
/g4f/Provider/unfinished/__init__.py:
--------------------------------------------------------------------------------
1 | from .MikuChat import MikuChat
2 | from .Komo import Komo
3 | from .ChatAiGpt import ChatAiGpt
4 | from .AiChatting import AiChatting
--------------------------------------------------------------------------------
/g4f/Provider/you/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meowmurmur/gpt4free/fea9e7a198fd21aa22c9ced90977c941c4ceffb6/g4f/Provider/you/__init__.py
--------------------------------------------------------------------------------
/g4f/api/_logging.py:
--------------------------------------------------------------------------------
1 | import sys,logging
2 |
3 | from loguru import logger
4 |
5 | def __exception_handle(e_type, e_value, e_traceback):
6 | if issubclass(e_type, KeyboardInterrupt):
7 | print('\nBye...')
8 | sys.exit(0)
9 |
10 | sys.__excepthook__(e_type, e_value, e_traceback)
11 |
12 | class __InterceptHandler(logging.Handler):
13 | def emit(self, record):
14 | try:
15 | level = logger.level(record.levelname).name
16 | except ValueError:
17 | level = record.levelno
18 |
19 | frame, depth = logging.currentframe(), 2
20 | while frame.f_code.co_filename == logging.__file__:
21 | frame = frame.f_back
22 | depth += 1
23 |
24 | logger.opt(depth=depth, exception=record.exc_info).log(
25 | level, record.getMessage()
26 | )
27 |
28 | def hook_except_handle():
29 | sys.excepthook = __exception_handle
30 |
31 | def hook_logging(**kwargs):
32 | logging.basicConfig(handlers=[__InterceptHandler()], **kwargs)
33 |
--------------------------------------------------------------------------------
/g4f/api/_tokenizer.py:
--------------------------------------------------------------------------------
1 | # import tiktoken
2 | # from typing import Union
3 |
4 | # def tokenize(text: str, model: str = 'gpt-3.5-turbo') -> Union[int, str]:
5 | # encoding = tiktoken.encoding_for_model(model)
6 | # encoded = encoding.encode(text)
7 | # num_tokens = len(encoded)
8 |
9 | # return num_tokens, encoded
--------------------------------------------------------------------------------
/g4f/api/run.py:
--------------------------------------------------------------------------------
1 | import g4f.api
2 |
3 | if __name__ == "__main__":
4 | g4f.api.run_api(debug=True)
5 |
--------------------------------------------------------------------------------
/g4f/client/__init__.py:
--------------------------------------------------------------------------------
1 | from .stubs import ChatCompletion, ChatCompletionChunk, ImagesResponse
2 | from .client import Client
3 | from .async_client import AsyncClient
--------------------------------------------------------------------------------
/g4f/client/helper.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import re
4 | from typing import Iterable, AsyncIterator
5 |
6 | def filter_json(text: str) -> str:
7 | """
8 | Parses JSON code block from a string.
9 |
10 | Args:
11 | text (str): A string containing a JSON code block.
12 |
13 | Returns:
14 | dict: A dictionary parsed from the JSON code block.
15 | """
16 | match = re.search(r"```(json|)\n(?P[\S\s]+?)\n```", text)
17 | if match:
18 | return match.group("code")
19 | return text
20 |
21 | def find_stop(stop, content: str, chunk: str = None):
22 | first = -1
23 | word = None
24 | if stop is not None:
25 | for word in list(stop):
26 | first = content.find(word)
27 | if first != -1:
28 | content = content[:first]
29 | break
30 | if chunk is not None and first != -1:
31 | first = chunk.find(word)
32 | if first != -1:
33 | chunk = chunk[:first]
34 | else:
35 | first = 0
36 | return first, content, chunk
37 |
38 | def filter_none(**kwargs) -> dict:
39 | return {
40 | key: value
41 | for key, value in kwargs.items()
42 | if value is not None
43 | }
44 |
45 | async def cast_iter_async(iter: Iterable) -> AsyncIterator:
46 | for chunk in iter:
47 | yield chunk
--------------------------------------------------------------------------------
/g4f/client/image_models.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from .types import Client, ImageProvider
4 |
5 | from ..Provider.BingCreateImages import BingCreateImages
6 | from ..Provider.needs_auth import Gemini, OpenaiChat
7 | from ..Provider.You import You
8 |
9 | class ImageModels():
10 | gemini = Gemini
11 | openai = OpenaiChat
12 | you = You
13 |
14 | def __init__(self, client: Client) -> None:
15 | self.client = client
16 | self.default = BingCreateImages(proxy=self.client.get_proxy())
17 |
18 | def get(self, name: str, default: ImageProvider = None) -> ImageProvider:
19 | return getattr(self, name) if hasattr(self, name) else default or self.default
20 |
--------------------------------------------------------------------------------
/g4f/client/types.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import os
4 |
5 | from .stubs import ChatCompletion, ChatCompletionChunk
6 | from ..providers.types import BaseProvider, ProviderType, FinishReason
7 | from typing import Union, Iterator, AsyncIterator
8 |
9 | ImageProvider = Union[BaseProvider, object]
10 | Proxies = Union[dict, str]
11 | IterResponse = Iterator[Union[ChatCompletion, ChatCompletionChunk]]
12 | AsyncIterResponse = AsyncIterator[Union[ChatCompletion, ChatCompletionChunk]]
13 |
14 | class ClientProxyMixin():
15 | def get_proxy(self) -> Union[str, None]:
16 | if isinstance(self.proxies, str):
17 | return self.proxies
18 | elif self.proxies is None:
19 | return os.environ.get("G4F_PROXY")
20 | elif "all" in self.proxies:
21 | return self.proxies["all"]
22 | elif "https" in self.proxies:
23 | return self.proxies["https"]
24 |
25 | class Client(ClientProxyMixin):
26 | def __init__(
27 | self,
28 | api_key: str = None,
29 | proxies: Proxies = None,
30 | **kwargs
31 | ) -> None:
32 | self.api_key: str = api_key
33 | self.proxies: Proxies = proxies
--------------------------------------------------------------------------------
/g4f/debug.py:
--------------------------------------------------------------------------------
1 | from .providers.types import ProviderType
2 |
3 | logging: bool = False
4 | version_check: bool = True
5 | last_provider: ProviderType = None
6 | last_model: str = None
7 | version: str = None
--------------------------------------------------------------------------------
/g4f/errors.py:
--------------------------------------------------------------------------------
1 | class ProviderNotFoundError(Exception):
2 | ...
3 |
4 | class ProviderNotWorkingError(Exception):
5 | ...
6 |
7 | class StreamNotSupportedError(Exception):
8 | ...
9 |
10 | class ModelNotFoundError(Exception):
11 | ...
12 |
13 | class ModelNotAllowedError(Exception):
14 | ...
15 |
16 | class RetryProviderError(Exception):
17 | ...
18 |
19 | class RetryNoProviderError(Exception):
20 | ...
21 |
22 | class VersionNotFoundError(Exception):
23 | ...
24 |
25 | class ModelNotSupportedError(Exception):
26 | ...
27 |
28 | class MissingRequirementsError(Exception):
29 | ...
30 |
31 | class NestAsyncioError(MissingRequirementsError):
32 | ...
33 |
34 | class MissingAuthError(Exception):
35 | ...
36 |
37 | class NoImageResponseError(Exception):
38 | ...
39 |
40 | class RateLimitError(Exception):
41 | ...
42 |
43 | class ResponseError(Exception):
44 | ...
45 |
46 | class ResponseStatusError(Exception):
47 | ...
--------------------------------------------------------------------------------
/g4f/gui/__init__.py:
--------------------------------------------------------------------------------
1 | from ..errors import MissingRequirementsError
2 |
3 | try:
4 | from .server.app import app
5 | from .server.website import Website
6 | from .server.backend import Backend_Api
7 | import_error = None
8 | except ImportError as e:
9 | import_error = e
10 |
11 | def run_gui(host: str = '0.0.0.0', port: int = 8080, debug: bool = False) -> None:
12 | if import_error is not None:
13 | raise MissingRequirementsError(f'Install "gui" requirements | pip install -U g4f[gui]\n{import_error}')
14 |
15 | config = {
16 | 'host' : host,
17 | 'port' : port,
18 | 'debug': debug
19 | }
20 |
21 | site = Website(app)
22 | for route in site.routes:
23 | app.add_url_rule(
24 | route,
25 | view_func = site.routes[route]['function'],
26 | methods = site.routes[route]['methods'],
27 | )
28 |
29 | backend_api = Backend_Api(app)
30 | for route in backend_api.routes:
31 | app.add_url_rule(
32 | route,
33 | view_func = backend_api.routes[route]['function'],
34 | methods = backend_api.routes[route]['methods'],
35 | )
36 |
37 | print(f"Running on port {config['port']}")
38 | app.run(**config)
39 | print(f"Closing port {config['port']}")
40 |
--------------------------------------------------------------------------------
/g4f/gui/client/static/css/dracula.min.css:
--------------------------------------------------------------------------------
1 | /*!
2 | Theme: Dracula
3 | Author: Mike Barkmin (http://github.com/mikebarkmin) based on Dracula Theme (http://github.com/dracula)
4 | License: ~ MIT (or more permissive) [via base16-schemes-source]
5 | Maintainer: @highlightjs/core-team
6 | Version: 2021.09.0
7 | */pre code.hljs{display:block;overflow-x:auto;padding:1em}code.hljs{padding:3px 5px}.hljs{color:#e9e9f4;background:#282936}.hljs ::selection,.hljs::selection{background-color:#4d4f68;color:#e9e9f4}.hljs-comment{color:#626483}.hljs-tag{color:#62d6e8}.hljs-operator,.hljs-punctuation,.hljs-subst{color:#e9e9f4}.hljs-operator{opacity:.7}.hljs-bullet,.hljs-deletion,.hljs-name,.hljs-selector-tag,.hljs-template-variable,.hljs-variable{color:#ea51b2}.hljs-attr,.hljs-link,.hljs-literal,.hljs-number,.hljs-symbol,.hljs-variable.constant_{color:#b45bcf}.hljs-class .hljs-title,.hljs-title,.hljs-title.class_{color:#00f769}.hljs-strong{font-weight:700;color:#00f769}.hljs-addition,.hljs-code,.hljs-string,.hljs-title.class_.inherited__{color:#ebff87}.hljs-built_in,.hljs-doctag,.hljs-keyword.hljs-atrule,.hljs-quote,.hljs-regexp{color:#a1efe4}.hljs-attribute,.hljs-function .hljs-title,.hljs-section,.hljs-title.function_,.ruby .hljs-property{color:#62d6e8}.diff .hljs-meta,.hljs-keyword,.hljs-template-tag,.hljs-type{color:#b45bcf}.hljs-emphasis{color:#b45bcf;font-style:italic}.hljs-meta,.hljs-meta .hljs-keyword,.hljs-meta .hljs-string{color:#00f769}.hljs-meta .hljs-keyword,.hljs-meta-keyword{font-weight:700}
--------------------------------------------------------------------------------
/g4f/gui/client/static/img/android-chrome-192x192.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meowmurmur/gpt4free/fea9e7a198fd21aa22c9ced90977c941c4ceffb6/g4f/gui/client/static/img/android-chrome-192x192.png
--------------------------------------------------------------------------------
/g4f/gui/client/static/img/android-chrome-512x512.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meowmurmur/gpt4free/fea9e7a198fd21aa22c9ced90977c941c4ceffb6/g4f/gui/client/static/img/android-chrome-512x512.png
--------------------------------------------------------------------------------
/g4f/gui/client/static/img/apple-touch-icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meowmurmur/gpt4free/fea9e7a198fd21aa22c9ced90977c941c4ceffb6/g4f/gui/client/static/img/apple-touch-icon.png
--------------------------------------------------------------------------------
/g4f/gui/client/static/img/favicon-16x16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meowmurmur/gpt4free/fea9e7a198fd21aa22c9ced90977c941c4ceffb6/g4f/gui/client/static/img/favicon-16x16.png
--------------------------------------------------------------------------------
/g4f/gui/client/static/img/favicon-32x32.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meowmurmur/gpt4free/fea9e7a198fd21aa22c9ced90977c941c4ceffb6/g4f/gui/client/static/img/favicon-32x32.png
--------------------------------------------------------------------------------
/g4f/gui/client/static/img/gpt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meowmurmur/gpt4free/fea9e7a198fd21aa22c9ced90977c941c4ceffb6/g4f/gui/client/static/img/gpt.png
--------------------------------------------------------------------------------
/g4f/gui/client/static/img/site.webmanifest:
--------------------------------------------------------------------------------
1 | {
2 | "name": "",
3 | "short_name": "",
4 | "icons": [
5 | {
6 | "src": "/assets/img/android-chrome-192x192.png",
7 | "sizes": "192x192",
8 | "type": "image/png"
9 | },
10 | {
11 | "src": "/assets/img/android-chrome-512x512.png",
12 | "sizes": "512x512",
13 | "type": "image/png"
14 | }
15 | ],
16 | "theme_color": "#ffffff",
17 | "background_color": "#ffffff",
18 | "display": "standalone"
19 | }
--------------------------------------------------------------------------------
/g4f/gui/client/static/img/user.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meowmurmur/gpt4free/fea9e7a198fd21aa22c9ced90977c941c4ceffb6/g4f/gui/client/static/img/user.png
--------------------------------------------------------------------------------
/g4f/gui/client/static/js/highlightjs-copy.min.js:
--------------------------------------------------------------------------------
1 | class CopyButtonPlugin{constructor(options={}){self.hook=options.hook;self.callback=options.callback}"after:highlightElement"({el,text}){let button=Object.assign(document.createElement("button"),{innerHTML:"Copy",className:"hljs-copy-button"});button.dataset.copied=false;el.parentElement.classList.add("hljs-copy-wrapper");el.parentElement.appendChild(button);el.parentElement.style.setProperty("--hljs-theme-background",window.getComputedStyle(el).backgroundColor);button.onclick=function(){if(!navigator.clipboard)return;let newText=text;if(hook&&typeof hook==="function"){newText=hook(text,el)||text}navigator.clipboard.writeText(newText).then(function(){button.innerHTML="Copied!";button.dataset.copied=true;let alert=Object.assign(document.createElement("div"),{role:"status",className:"hljs-copy-alert",innerHTML:"Copied to clipboard"});el.parentElement.appendChild(alert);setTimeout(()=>{button.innerHTML="Copy";button.dataset.copied=false;el.parentElement.removeChild(alert);alert=null},2e3)}).then(function(){if(typeof callback==="function")return callback(newText,el)})}}}
--------------------------------------------------------------------------------
/g4f/gui/client/static/js/text_to_speech/index.js:
--------------------------------------------------------------------------------
1 | (()=>{"use strict";var e={m:{},u:e=>e+".index.js"};e.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),e.o=(e,c)=>Object.prototype.hasOwnProperty.call(e,c),(()=>{var c;e.g.importScripts&&(c=e.g.location+"");var t=e.g.document;if(!c&&t&&(t.currentScript&&(c=t.currentScript.src),!c)){var a=t.getElementsByTagName("script");if(a.length)for(var r=a.length-1;r>-1&&(!c||!/^http(s?):/.test(c));)c=a[r--].src}if(!c)throw new Error("Automatic publicPath is not supported in this browser");c=c.replace(/#.*$/,"").replace(/\?.*$/,"").replace(/\/[^\/]+$/,"/"),e.p=c})(),e.b=document.baseURI||self.location.href;const c={};c.current||(c.current=new Worker(new URL(e.p+e.u(630),e.b),{type:void 0})),window.doSpeech=!1,c.current.addEventListener("message",(e=>{switch(e.data.status){case"error":window.onSpeechResponse(null),window.doSpeech=!1;break;case"complete":const c=URL.createObjectURL(e.data.output);window.onSpeechResponse(c),window.doSpeech=!1}})),window.SPEAKERS={"US female 1":"cmu_us_slt_arctic-wav-arctic_a0001","US female 2":"cmu_us_clb_arctic-wav-arctic_a0001","US male 1":"cmu_us_bdl_arctic-wav-arctic_a0003","US male 2":"cmu_us_rms_arctic-wav-arctic_a0003","Canadian male":"cmu_us_jmk_arctic-wav-arctic_a0002","Scottish male":"cmu_us_awb_arctic-wav-arctic_b0002","Indian male":"cmu_us_ksp_arctic-wav-arctic_a0007"},window.handleGenerateSpeech=(e,t="cmu_us_slt_arctic-wav-arctic_a0001")=>{window.doSpeech=!0,c.current.postMessage({text:e,speaker_id:t})},window.onSpeechResponse=e=>console.log(e)})();
--------------------------------------------------------------------------------
/g4f/gui/gui_parser.py:
--------------------------------------------------------------------------------
1 | from argparse import ArgumentParser
2 |
3 | def gui_parser():
4 | parser = ArgumentParser(description="Run the GUI")
5 | parser.add_argument("-host", type=str, default="0.0.0.0", help="hostname")
6 | parser.add_argument("-port", type=int, default=8080, help="port")
7 | parser.add_argument("-debug", action="store_true", help="debug mode")
8 | parser.add_argument("--ignore-cookie-files", action="store_true", help="Don't read .har and cookie files.")
9 | return parser
--------------------------------------------------------------------------------
/g4f/gui/run.py:
--------------------------------------------------------------------------------
1 | from .gui_parser import gui_parser
2 | from ..cookies import read_cookie_files
3 | import g4f.debug
4 |
5 | def run_gui_args(args):
6 | if args.debug:
7 | g4f.debug.logging = True
8 | if not args.ignore_cookie_files:
9 | read_cookie_files()
10 | from g4f.gui import run_gui
11 | host = args.host
12 | port = args.port
13 | debug = args.debug
14 | run_gui(host, port, debug)
15 |
16 | if __name__ == "__main__":
17 | parser = gui_parser()
18 | args = parser.parse_args()
19 | run_gui_args(args)
--------------------------------------------------------------------------------
/g4f/gui/server/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meowmurmur/gpt4free/fea9e7a198fd21aa22c9ced90977c941c4ceffb6/g4f/gui/server/__init__.py
--------------------------------------------------------------------------------
/g4f/gui/server/android_gallery.py:
--------------------------------------------------------------------------------
1 | from kivy.logger import Logger
2 | from kivy.clock import Clock
3 |
4 | from jnius import autoclass
5 | from jnius import cast
6 | from android import activity
7 |
8 | PythonActivity = autoclass('org.kivy.android.PythonActivity')
9 | Intent = autoclass('android.content.Intent')
10 | Uri = autoclass('android.net.Uri')
11 |
12 | MEDIA_DATA = "_data"
13 | RESULT_LOAD_IMAGE = 1
14 |
15 | Activity = autoclass('android.app.Activity')
16 |
17 | def user_select_image(on_selection):
18 | """Open Gallery Activity and call callback with absolute image filepath of image user selected.
19 | None if user canceled.
20 | """
21 |
22 | currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
23 |
24 | # Forum discussion: https://groups.google.com/forum/#!msg/kivy-users/bjsG2j9bptI/-Oe_aGo0newJ
25 | def on_activity_result(request_code, result_code, intent):
26 | if request_code != RESULT_LOAD_IMAGE:
27 | Logger.warning('user_select_image: ignoring activity result that was not RESULT_LOAD_IMAGE')
28 | return
29 |
30 | if result_code == Activity.RESULT_CANCELED:
31 | Clock.schedule_once(lambda dt: on_selection(None), 0)
32 | return
33 |
34 | if result_code != Activity.RESULT_OK:
35 | # This may just go into the void...
36 | raise NotImplementedError('Unknown result_code "{}"'.format(result_code))
37 |
38 | selectedImage = intent.getData(); # Uri
39 | filePathColumn = [MEDIA_DATA]; # String[]
40 | # Cursor
41 | cursor = currentActivity.getContentResolver().query(selectedImage,
42 | filePathColumn, None, None, None);
43 | cursor.moveToFirst();
44 |
45 | # int
46 | columnIndex = cursor.getColumnIndex(filePathColumn[0]);
47 | # String
48 | picturePath = cursor.getString(columnIndex);
49 | cursor.close();
50 | Logger.info('android_ui: user_select_image() selected %s', picturePath)
51 |
52 | # This is possibly in a different thread?
53 | Clock.schedule_once(lambda dt: on_selection(picturePath), 0)
54 |
55 | # See: http://pyjnius.readthedocs.org/en/latest/android.html
56 | activity.bind(on_activity_result=on_activity_result)
57 |
58 | intent = Intent()
59 |
60 | # http://programmerguru.com/android-tutorial/how-to-pick-image-from-gallery/
61 | # http://stackoverflow.com/questions/18416122/open-gallery-app-in-android
62 | intent.setAction(Intent.ACTION_PICK)
63 | # TODO internal vs external?
64 | intent.setData(Uri.parse('content://media/internal/images/media'))
65 | # TODO setType(Image)?
66 |
67 | currentActivity.startActivityForResult(intent, RESULT_LOAD_IMAGE)
--------------------------------------------------------------------------------
/g4f/gui/server/app.py:
--------------------------------------------------------------------------------
1 | import sys, os
2 | from flask import Flask
3 |
4 | if getattr(sys, 'frozen', False):
5 | template_folder = os.path.join(sys._MEIPASS, "client")
6 | else:
7 | template_folder = "../client"
8 |
9 | app = Flask(__name__, template_folder=template_folder, static_folder=f"{template_folder}/static")
--------------------------------------------------------------------------------
/g4f/gui/server/website.py:
--------------------------------------------------------------------------------
1 | import uuid
2 | from flask import render_template, redirect
3 |
4 | class Website:
5 | def __init__(self, app) -> None:
6 | self.app = app
7 | def redirect_home():
8 | return redirect('/chat')
9 | self.routes = {
10 | '/': {
11 | 'function': redirect_home,
12 | 'methods': ['GET', 'POST']
13 | },
14 | '/chat/': {
15 | 'function': self._index,
16 | 'methods': ['GET', 'POST']
17 | },
18 | '/chat/': {
19 | 'function': self._chat,
20 | 'methods': ['GET', 'POST']
21 | },
22 | '/menu/': {
23 | 'function': redirect_home,
24 | 'methods': ['GET', 'POST']
25 | },
26 | '/settings/': {
27 | 'function': redirect_home,
28 | 'methods': ['GET', 'POST']
29 | },
30 | }
31 |
32 | def _chat(self, conversation_id):
33 | if '-' not in conversation_id:
34 | return redirect('/chat')
35 | return render_template('index.html', chat_id=conversation_id)
36 |
37 | def _index(self):
38 | return render_template('index.html', chat_id=str(uuid.uuid4()))
--------------------------------------------------------------------------------
/g4f/gui/webview.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import sys
4 | import os.path
5 | import webview
6 | try:
7 | from platformdirs import user_config_dir
8 | has_platformdirs = True
9 | except ImportError:
10 | has_platformdirs = False
11 |
12 | from g4f.gui.gui_parser import gui_parser
13 | from g4f.gui.server.js_api import JsApi
14 | import g4f.version
15 | import g4f.debug
16 |
17 | def run_webview(
18 | debug: bool = False,
19 | http_port: int = None,
20 | ssl: bool = True,
21 | storage_path: str = None,
22 | gui: str = None
23 | ):
24 | if getattr(sys, 'frozen', False):
25 | dirname = sys._MEIPASS
26 | else:
27 | dirname = os.path.dirname(__file__)
28 | webview.settings['OPEN_EXTERNAL_LINKS_IN_BROWSER'] = True
29 | webview.settings['ALLOW_DOWNLOADS'] = True
30 | webview.create_window(
31 | f"g4f - {g4f.version.utils.current_version}",
32 | os.path.join(dirname, "client/index.html"),
33 | text_select=True,
34 | js_api=JsApi(),
35 | )
36 | if has_platformdirs and storage_path is None:
37 | storage_path = user_config_dir("g4f-webview")
38 | webview.start(
39 | private_mode=False,
40 | storage_path=storage_path,
41 | debug=debug,
42 | http_port=http_port,
43 | ssl=ssl,
44 | gui=gui
45 | )
46 |
47 | if __name__ == "__main__":
48 | parser = gui_parser()
49 | args = parser.parse_args()
50 | if args.debug:
51 | g4f.debug.logging = True
52 | run_webview(args.debug, args.port)
--------------------------------------------------------------------------------
/g4f/local/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from ..typing import Union, Messages
4 | from ..locals.provider import LocalProvider
5 | from ..locals.models import get_models
6 | from ..client.client import iter_response, filter_none
7 | from ..client.types import IterResponse
8 |
9 | class LocalClient():
10 | def __init__(self, **kwargs) -> None:
11 | self.chat: Chat = Chat(self)
12 |
13 | @staticmethod
14 | def list_models():
15 | return list(get_models())
16 |
17 | class Completions():
18 | def __init__(self, client: LocalClient):
19 | self.client: LocalClient = client
20 |
21 | def create(
22 | self,
23 | messages: Messages,
24 | model: str,
25 | stream: bool = False,
26 | response_format: dict = None,
27 | max_tokens: int = None,
28 | stop: Union[list[str], str] = None,
29 | **kwargs
30 | ) -> IterResponse:
31 | stop = [stop] if isinstance(stop, str) else stop
32 | response = LocalProvider.create_completion(
33 | model, messages, stream,
34 | **filter_none(
35 | max_tokens=max_tokens,
36 | stop=stop,
37 | ),
38 | **kwargs
39 | )
40 | response = iter_response(response, stream, response_format, max_tokens, stop)
41 | return response if stream else next(response)
42 |
43 | class Chat():
44 | completions: Completions
45 |
46 | def __init__(self, client: LocalClient):
47 | self.completions = Completions(client)
--------------------------------------------------------------------------------
/g4f/locals/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meowmurmur/gpt4free/fea9e7a198fd21aa22c9ced90977c941c4ceffb6/g4f/locals/__init__.py
--------------------------------------------------------------------------------
/g4f/locals/models.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import os
4 | import requests
5 | import json
6 |
7 | from ..requests.raise_for_status import raise_for_status
8 |
9 | def load_models():
10 | response = requests.get("https://gpt4all.io/models/models3.json")
11 | raise_for_status(response)
12 | return format_models(response.json())
13 |
14 | def get_model_name(filename: str) -> str:
15 | name = filename.split(".", 1)[0]
16 | for replace in ["-v1_5", "-v1", "-q4_0", "_v01", "-v0", "-f16", "-gguf2", "-newbpe"]:
17 | name = name.replace(replace, "")
18 | return name
19 |
20 | def format_models(models: list) -> dict:
21 | return {get_model_name(model["filename"]): {
22 | "path": model["filename"],
23 | "ram": model["ramrequired"],
24 | "prompt": model["promptTemplate"] if "promptTemplate" in model else None,
25 | "system": model["systemPrompt"] if "systemPrompt" in model else None,
26 | } for model in models}
27 |
28 | def read_models(file_path: str):
29 | with open(file_path, "rb") as f:
30 | return json.load(f)
31 |
32 | def save_models(file_path: str, data):
33 | with open(file_path, 'w') as f:
34 | json.dump(data, f, indent=4)
35 |
36 | def get_model_dir() -> str:
37 | local_dir = os.path.dirname(os.path.abspath(__file__))
38 | project_dir = os.path.dirname(os.path.dirname(local_dir))
39 | model_dir = os.path.join(project_dir, "models")
40 | if not os.path.exists(model_dir):
41 | os.mkdir(model_dir)
42 | return model_dir
43 |
44 |
45 | def get_models() -> dict[str, dict]:
46 | model_dir = get_model_dir()
47 | file_path = os.path.join(model_dir, "models.json")
48 | if os.path.isfile(file_path):
49 | return read_models(file_path)
50 | else:
51 | models = load_models()
52 | save_models(file_path, models)
53 | return models
54 |
--------------------------------------------------------------------------------
/g4f/providers/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meowmurmur/gpt4free/fea9e7a198fd21aa22c9ced90977c941c4ceffb6/g4f/providers/__init__.py
--------------------------------------------------------------------------------
/g4f/providers/conversation.py:
--------------------------------------------------------------------------------
1 | class BaseConversation:
2 | ...
--------------------------------------------------------------------------------
/g4f/providers/helper.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import random
4 | import string
5 |
6 | from ..typing import Messages, Cookies
7 |
8 | def format_prompt(messages: Messages, add_special_tokens=False) -> str:
9 | """
10 | Format a series of messages into a single string, optionally adding special tokens.
11 |
12 | Args:
13 | messages (Messages): A list of message dictionaries, each containing 'role' and 'content'.
14 | add_special_tokens (bool): Whether to add special formatting tokens.
15 |
16 | Returns:
17 | str: A formatted string containing all messages.
18 | """
19 | if not add_special_tokens and len(messages) <= 1:
20 | return messages[0]["content"]
21 | formatted = "\n".join([
22 | f'{message["role"].capitalize()}: {message["content"]}'
23 | for message in messages
24 | ])
25 | return f"{formatted}\nAssistant:"
26 |
27 | def get_random_string(length: int = 10) -> str:
28 | """
29 | Generate a random string of specified length, containing lowercase letters and digits.
30 |
31 | Args:
32 | length (int, optional): Length of the random string to generate. Defaults to 10.
33 |
34 | Returns:
35 | str: A random string of the specified length.
36 | """
37 | return ''.join(
38 | random.choice(string.ascii_lowercase + string.digits)
39 | for _ in range(length)
40 | )
41 |
42 | def get_random_hex(length: int = 32) -> str:
43 | """
44 | Generate a random hexadecimal string with n length.
45 |
46 | Returns:
47 | str: A random hexadecimal string of n characters.
48 | """
49 | return ''.join(
50 | random.choice("abcdef" + string.digits)
51 | for _ in range(length)
52 | )
53 |
54 | def filter_none(**kwargs) -> dict:
55 | return {
56 | key: value
57 | for key, value in kwargs.items()
58 | if value is not None
59 | }
60 |
61 | def format_cookies(cookies: Cookies) -> str:
62 | return "; ".join([f"{k}={v}" for k, v in cookies.items()])
--------------------------------------------------------------------------------
/g4f/requests/aiohttp.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from aiohttp import ClientSession, ClientResponse, ClientTimeout, BaseConnector, FormData
4 | from typing import AsyncIterator, Any, Optional
5 |
6 | from .defaults import DEFAULT_HEADERS
7 | from ..errors import MissingRequirementsError
8 |
9 | class StreamResponse(ClientResponse):
10 | async def iter_lines(self) -> AsyncIterator[bytes]:
11 | async for line in self.content:
12 | yield line.rstrip(b"\r\n")
13 |
14 | async def iter_content(self) -> AsyncIterator[bytes]:
15 | async for chunk in self.content.iter_any():
16 | yield chunk
17 |
18 | async def json(self, content_type: str = None) -> Any:
19 | return await super().json(content_type=content_type)
20 |
21 | class StreamSession(ClientSession):
22 | def __init__(
23 | self,
24 | headers: dict = {},
25 | timeout: int = None,
26 | connector: BaseConnector = None,
27 | proxy: str = None,
28 | proxies: dict = {},
29 | impersonate = None,
30 | **kwargs
31 | ):
32 | if impersonate:
33 | headers = {
34 | **DEFAULT_HEADERS,
35 | **headers
36 | }
37 | connect = None
38 | if isinstance(timeout, tuple):
39 | connect, timeout = timeout;
40 | if timeout is not None:
41 | timeout = ClientTimeout(timeout, connect)
42 | if proxy is None:
43 | proxy = proxies.get("all", proxies.get("https"))
44 | super().__init__(
45 | **kwargs,
46 | timeout=timeout,
47 | response_class=StreamResponse,
48 | connector=get_connector(connector, proxy),
49 | headers=headers
50 | )
51 |
52 | def get_connector(connector: BaseConnector = None, proxy: str = None, rdns: bool = False) -> Optional[BaseConnector]:
53 | if proxy and not connector:
54 | try:
55 | from aiohttp_socks import ProxyConnector
56 | if proxy.startswith("socks5h://"):
57 | proxy = proxy.replace("socks5h://", "socks5://")
58 | rdns = True
59 | connector = ProxyConnector.from_url(proxy, rdns=rdns)
60 | except ImportError:
61 | raise MissingRequirementsError('Install "aiohttp_socks" package for proxy support')
62 | return connector
--------------------------------------------------------------------------------
/g4f/requests/defaults.py:
--------------------------------------------------------------------------------
1 | try:
2 | import brotli
3 | has_brotli = True
4 | except ImportError:
5 | has_brotli = False
6 |
7 | DEFAULT_HEADERS = {
8 | "accept": "*/*",
9 | "accept-encoding": "gzip, deflate" + (", br" if has_brotli else ""),
10 | "accept-language": "en-US",
11 | "referer": "",
12 | "sec-ch-ua": "\"Google Chrome\";v=\"123\", \"Not:A-Brand\";v=\"8\", \"Chromium\";v=\"123\"",
13 | "sec-ch-ua-arch": "\"x86\"",
14 | "sec-ch-ua-bitness": "\"64\"",
15 | "sec-ch-ua-full-version": "\"123.0.6312.122\"",
16 | "sec-ch-ua-full-version-list": "\"Google Chrome\";v=\"123.0.6312.122\", \"Not:A-Brand\";v=\"8.0.0.0\", \"Chromium\";v=\"123.0.6312.122\"",
17 | "sec-ch-ua-mobile": "?0",
18 | "sec-ch-ua-model": "\"\"",
19 | "sec-ch-ua-platform": "\"Windows\"",
20 | "sec-ch-ua-platform-version": '"15.0.0"',
21 | "sec-fetch-dest": "empty",
22 | "sec-fetch-mode": "cors",
23 | "sec-fetch-site": "same-origin",
24 | "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36",
25 | }
26 | WEBVIEW_HAEDERS = {
27 | "Accept": "*/*",
28 | "Accept-Encoding": "gzip, deflate, br",
29 | "Accept-Language": "",
30 | "Referer": "",
31 | "Sec-Fetch-Dest": "empty",
32 | "Sec-Fetch-Mode": "cors",
33 | "Sec-Fetch-Site": "same-origin",
34 | "User-Agent": "",
35 | }
--------------------------------------------------------------------------------
/g4f/requests/raise_for_status.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from typing import Union
4 | from aiohttp import ClientResponse
5 | from requests import Response as RequestsResponse
6 |
7 | from ..errors import ResponseStatusError, RateLimitError
8 | from . import Response, StreamResponse
9 |
10 | class CloudflareError(ResponseStatusError):
11 | ...
12 |
13 | def is_cloudflare(text: str) -> bool:
14 | return '' in text or "Just a moment... " in text
15 |
16 | def is_openai(text: str) -> bool:
17 | return "Unable to load site
" in text
18 |
19 | async def raise_for_status_async(response: Union[StreamResponse, ClientResponse], message: str = None):
20 | if response.status in (429, 402):
21 | raise RateLimitError(f"Response {response.status}: Rate limit reached")
22 | message = await response.text() if not response.ok and message is None else message
23 | if response.status == 403 and is_cloudflare(message):
24 | raise CloudflareError(f"Response {response.status}: Cloudflare detected")
25 | elif response.status == 403 and is_openai(message):
26 | raise ResponseStatusError(f"Response {response.status}: Bot are detected")
27 | elif not response.ok:
28 | raise ResponseStatusError(f"Response {response.status}: {message}")
29 |
30 | def raise_for_status(response: Union[Response, StreamResponse, ClientResponse, RequestsResponse], message: str = None):
31 | if hasattr(response, "status"):
32 | return raise_for_status_async(response, message)
33 |
34 | if response.status_code in (429, 402):
35 | raise RateLimitError(f"Response {response.status_code}: Rate limit reached")
36 | elif response.status_code == 403 and is_cloudflare(response.text):
37 | raise CloudflareError(f"Response {response.status_code}: Cloudflare detected")
38 | elif not response.ok:
39 | raise ResponseStatusError(f"Response {response.status_code}: {response.text if message is None else message}")
--------------------------------------------------------------------------------
/g4f/typing.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from typing import Any, AsyncGenerator, Generator, AsyncIterator, Iterator, NewType, Tuple, Union, List, Dict, Type, IO, Optional
3 |
4 | try:
5 | from PIL.Image import Image
6 | except ImportError:
7 | from typing import Type as Image
8 |
9 | if sys.version_info >= (3, 8):
10 | from typing import TypedDict
11 | else:
12 | from typing_extensions import TypedDict
13 |
14 | SHA256 = NewType('sha_256_hash', str)
15 | CreateResult = Iterator[str]
16 | AsyncResult = AsyncIterator[str]
17 | Messages = List[Dict[str, Union[str,List[Dict[str,Union[str,Dict[str,str]]]]]]]
18 | Cookies = Dict[str, str]
19 | ImageType = Union[str, bytes, IO, Image, None]
20 |
21 | __all__ = [
22 | 'Any',
23 | 'AsyncGenerator',
24 | 'Generator',
25 | 'AsyncIterator',
26 | 'Iterator'
27 | 'Tuple',
28 | 'Union',
29 | 'List',
30 | 'Dict',
31 | 'Type',
32 | 'IO',
33 | 'Optional',
34 | 'TypedDict',
35 | 'SHA256',
36 | 'CreateResult',
37 | 'AsyncResult',
38 | 'Messages',
39 | 'Cookies',
40 | 'Image',
41 | 'ImageType'
42 | ]
43 |
--------------------------------------------------------------------------------
/generated_images/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meowmurmur/gpt4free/fea9e7a198fd21aa22c9ced90977c941c4ceffb6/generated_images/.gitkeep
--------------------------------------------------------------------------------
/har_and_cookies/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meowmurmur/gpt4free/fea9e7a198fd21aa22c9ced90977c941c4ceffb6/har_and_cookies/.gitkeep
--------------------------------------------------------------------------------
/models/.local-model-here:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meowmurmur/gpt4free/fea9e7a198fd21aa22c9ced90977c941c4ceffb6/models/.local-model-here
--------------------------------------------------------------------------------
/projects/text_to_speech/README.md:
--------------------------------------------------------------------------------
1 | Fork from:
2 |
3 | https://github.com/xenova/transformers.js/tree/main/examples/text-to-speech-client
--------------------------------------------------------------------------------
/projects/text_to_speech/constants.js:
--------------------------------------------------------------------------------
1 | export const SPEAKERS = {
2 | "US female 1": "cmu_us_slt_arctic-wav-arctic_a0001",
3 | "US female 2": "cmu_us_clb_arctic-wav-arctic_a0001",
4 | "US male 1": "cmu_us_bdl_arctic-wav-arctic_a0003",
5 | "US male 2": "cmu_us_rms_arctic-wav-arctic_a0003",
6 | "Canadian male": "cmu_us_jmk_arctic-wav-arctic_a0002",
7 | "Scottish male": "cmu_us_awb_arctic-wav-arctic_b0002",
8 | "Indian male": "cmu_us_ksp_arctic-wav-arctic_a0007",
9 | }
10 |
11 | export const DEFAULT_SPEAKER = "cmu_us_slt_arctic-wav-arctic_a0001";
--------------------------------------------------------------------------------
/projects/text_to_speech/index.js:
--------------------------------------------------------------------------------
1 | const worker = {}
2 | if (!worker.current) {
3 | // Create the worker if it does not yet exist.
4 | worker.current = new Worker(new URL('./worker.js', import.meta.url), {
5 | type: 'module'
6 | });
7 | }
8 |
9 | window.doSpeech = false;
10 |
11 | const onMessageReceived = (e) => {
12 | switch (e.data.status) {
13 | case 'error':
14 | window.onSpeechResponse(null);
15 | window.doSpeech = false;
16 | break;
17 | case 'complete':
18 | const blobUrl = URL.createObjectURL(e.data.output);
19 | window.onSpeechResponse(blobUrl);
20 | window.doSpeech = false;
21 | break;
22 | }
23 | };
24 | worker.current.addEventListener('message', onMessageReceived);
25 |
26 | import { DEFAULT_SPEAKER, SPEAKERS } from './constants';
27 |
28 | const handleGenerateSpeech = (text, speaker_id=DEFAULT_SPEAKER) => {
29 | window.doSpeech = true;
30 | worker.current.postMessage({
31 | text,
32 | speaker_id: speaker_id,
33 | });
34 | };
35 |
36 | window.SPEAKERS = SPEAKERS;
37 | window.handleGenerateSpeech = handleGenerateSpeech;
38 | window.onSpeechResponse = (url) => console.log(url);
--------------------------------------------------------------------------------
/projects/text_to_speech/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "main": "index.js",
3 | "dependencies": {
4 | "@xenova/transformers": "^2.16.1",
5 | "webpack": "^5.91.0",
6 | "webpack-node-externals": "^3.0.0"
7 | },
8 | "bundledDependencies": [
9 | "@xenova/transformers"
10 | ],
11 | "devDependencies": {
12 | "pack": "^2.2.0",
13 | "web": "^0.0.2",
14 | "webpack-cli": "^5.1.4"
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/projects/text_to_speech/utils.js:
--------------------------------------------------------------------------------
1 | // Adapted from https://www.npmjs.com/package/audiobuffer-to-wav
2 |
3 | export function encodeWAV(samples) {
4 | let offset = 44;
5 | const buffer = new ArrayBuffer(offset + samples.length * 4);
6 | const view = new DataView(buffer);
7 | const sampleRate = 16000;
8 |
9 | /* RIFF identifier */
10 | writeString(view, 0, 'RIFF')
11 | /* RIFF chunk length */
12 | view.setUint32(4, 36 + samples.length * 4, true)
13 | /* RIFF type */
14 | writeString(view, 8, 'WAVE')
15 | /* format chunk identifier */
16 | writeString(view, 12, 'fmt ')
17 | /* format chunk length */
18 | view.setUint32(16, 16, true)
19 | /* sample format (raw) */
20 | view.setUint16(20, 3, true)
21 | /* channel count */
22 | view.setUint16(22, 1, true)
23 | /* sample rate */
24 | view.setUint32(24, sampleRate, true)
25 | /* byte rate (sample rate * block align) */
26 | view.setUint32(28, sampleRate * 4, true)
27 | /* block align (channel count * bytes per sample) */
28 | view.setUint16(32, 4, true)
29 | /* bits per sample */
30 | view.setUint16(34, 32, true)
31 | /* data chunk identifier */
32 | writeString(view, 36, 'data')
33 | /* data chunk length */
34 | view.setUint32(40, samples.length * 4, true)
35 |
36 | for (let i = 0; i < samples.length; ++i, offset += 4) {
37 | view.setFloat32(offset, samples[i], true)
38 | }
39 |
40 | return buffer
41 | }
42 |
43 | function writeString(view, offset, string) {
44 | for (let i = 0; i < string.length; ++i) {
45 | view.setUint8(offset + i, string.charCodeAt(i))
46 | }
47 | }
--------------------------------------------------------------------------------
/projects/text_to_speech/webpack.config.js:
--------------------------------------------------------------------------------
1 | const path = require('path');
2 | const webpack = require('webpack');
3 |
4 | module.exports = {
5 | mode: 'production',
6 | entry: {
7 | server: './index.js',
8 | },
9 | output: {
10 | path: path.join(__dirname, 'build'),
11 | filename: 'index.js'
12 | },
13 | module: {
14 | rules: [
15 | {
16 | exclude: /node_modules/
17 | }
18 | ]
19 | }
20 | };
--------------------------------------------------------------------------------
/projects/windows/copy.sh:
--------------------------------------------------------------------------------
1 | cp -r * /var/win/shared/
2 | cp -r projects/windows/* /var/win/shared/
3 | cp setup.py /var/win/shared/
4 | cp README.md /var/win/shared/
5 | #git clone https://github.com/pyinstaller/pyinstaller/ /var/win/shared/pyinstaller
--------------------------------------------------------------------------------
/projects/windows/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "3"
2 | services:
3 | windows:
4 | image: dockurr/windows
5 | container_name: windows
6 | environment:
7 | VERSION: "win11"
8 | devices:
9 | - /dev/kvm
10 | cap_add:
11 | - NET_ADMIN
12 | ports:
13 | - 8006:8006
14 | - 3389:3389/tcp
15 | - 3389:3389/udp
16 | stop_grace_period: 2m
17 | restart: on-failure
18 | volumes:
19 | - /var/win:/storage
--------------------------------------------------------------------------------
/projects/windows/icon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meowmurmur/gpt4free/fea9e7a198fd21aa22c9ced90977c941c4ceffb6/projects/windows/icon.ico
--------------------------------------------------------------------------------
/projects/windows/main.py:
--------------------------------------------------------------------------------
1 | import ssl
2 | import certifi
3 | from functools import partial
4 |
5 | ssl.default_ca_certs = certifi.where()
6 | ssl.create_default_context = partial(
7 | ssl.create_default_context,
8 | cafile=certifi.where()
9 | )
10 |
11 | from g4f.gui.run import run_gui_args, gui_parser
12 | import g4f.debug
13 | g4f.debug.version_check = False
14 | g4f.debug.version = "0.3.1.7"
15 |
16 | if __name__ == "__main__":
17 | parser = gui_parser()
18 | args = parser.parse_args()
19 | run_gui_args(args)
--------------------------------------------------------------------------------
/projects/windows/main.spec:
--------------------------------------------------------------------------------
1 | # -*- mode: python ; coding: utf-8 -*-
2 |
3 |
4 | a = Analysis(
5 | ['main.py'],
6 | pathex=[],
7 | binaries=[],
8 | datas=[],
9 | hiddenimports=[],
10 | hookspath=[],
11 | hooksconfig={},
12 | runtime_hooks=[],
13 | excludes=[],
14 | noarchive=False,
15 | )
16 | pyz = PYZ(a.pure)
17 |
18 | exe = EXE(
19 | pyz,
20 | a.scripts,
21 | [],
22 | exclude_binaries=True,
23 | name='g4f',
24 | debug=False,
25 | bootloader_ignore_signals=False,
26 | strip=False,
27 | upx=True,
28 | console=True,
29 | disable_windowed_traceback=False,
30 | argv_emulation=False,
31 | target_arch=None,
32 | codesign_identity=None,
33 | entitlements_file=None,
34 | icon='icon.ico',
35 | )
36 | coll = COLLECT(
37 | exe,
38 | a.binaries,
39 | Tree('//host.lan/Data/g4f/gui/client', prefix='client'),
40 | a.datas,
41 | strip=False,
42 | upx=True,
43 | upx_exclude=[],
44 | name='g4f',
45 | )
46 |
--------------------------------------------------------------------------------
/requirements-min.txt:
--------------------------------------------------------------------------------
1 | requests
2 | aiohttp
3 | brotli
4 | pycryptodome
5 | curl_cffi>=0.6.2
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | requests
2 | pycryptodome
3 | curl_cffi>=0.6.2
4 | aiohttp
5 | certifi
6 | browser_cookie3
7 | PyExecJS
8 | duckduckgo-search>=5.0
9 | nest_asyncio
10 | werkzeug
11 | loguru
12 | pillow
13 | platformdirs
14 | fastapi
15 | uvicorn
16 | flask
17 | brotli
18 | beautifulsoup4
19 | aiohttp_socks
20 | pywebview
21 | plyer
22 | cryptography
23 | nodriver
24 |
--------------------------------------------------------------------------------