├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── empty-issue-template.md │ └── feature_request.md └── workflows │ ├── binaries-minimal-publish.yml │ ├── binaries-publish.yml │ ├── close-inactive-issue.yml │ ├── debian-minimal-publish.yml │ ├── debian-publish.yml │ ├── python-package.yml │ └── python-publish.yml ├── .gitignore ├── LICENSE ├── Makefile ├── assets ├── Figure_1.png ├── deb │ ├── DEBIAN │ │ ├── control │ │ ├── postinst │ │ ├── postrm │ │ ├── preinst │ │ └── prerm │ └── usr │ │ └── share │ │ └── applications │ │ ├── icons │ │ └── pytgpt.png │ │ └── pytgpt.desktop ├── demo-1.gif ├── logo.png └── py-tgpt.png ├── docs ├── CHANGELOG.md └── README.md ├── main.py ├── requirements.txt ├── setup.py ├── src └── pytgpt │ ├── __init__.py │ ├── __main__.py │ ├── ai4chat │ ├── __init__.py │ └── main.py │ ├── api │ ├── __init__.py │ ├── __main__.py │ ├── utils.py │ └── v1.py │ ├── async_providers.py │ ├── auto │ ├── __init__.py │ ├── errors.py │ └── main.py │ ├── base.py │ ├── console.py │ ├── deepseek │ ├── __init__.py │ └── main.py │ ├── exceptions.py │ ├── gpt4all │ ├── __init__.py │ └── main.py │ ├── gpt4free │ ├── __init__.py │ ├── main.py │ └── utils.py │ ├── groq │ ├── __init__.py │ └── main.py │ ├── imager │ ├── __init__.py │ └── imager.py │ ├── koboldai │ ├── __init__.py │ └── main.py │ ├── novita │ ├── __init__.py │ └── main.py │ ├── openai │ ├── __init__.py │ └── main.py │ ├── perplexity │ ├── __init__.py │ └── main.py │ ├── phind │ ├── __init__.py │ └── main.py │ ├── poe │ ├── __init__.py │ └── main.py │ ├── providers.py │ └── utils.py └── tests ├── __init__.py ├── base.py ├── test_ai4chat_tgpt.py ├── test_api.py ├── test_auto_tgpt.py ├── test_deepseek.py ├── test_gpt4all.py ├── test_gpt4free.py ├── test_groq.py ├── test_imager_tgpt.py ├── test_koboldai.py ├── test_novita.py ├── test_openai.py ├── test_perplexity.py ├── test_phind_tgpt.py ├── test_poe.py ├── test_utils.py └── test_webchatgpt.py /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: "[BUG]: Your unexpected behavior with python-tgpt!" 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots or Code snippets** 24 | If applicable, add screenshots or code snippets to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS [e.g. Windows, Mac OS, Linux]: 28 | - Python-tgpt version [v0.5.3]: 29 | - Binary config [x86, x64, x32]: 30 | - Python version [If running from source]: 31 | 32 | **Additional context** 33 | Add any other context about the problem here. 34 | 35 | **Workaround** 36 | Is there a way of achieving the desired action without encountering the error? Describe. 37 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/empty-issue-template.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Empty issue template 3 | about: Describe this issue your facing as subject. 4 | title: Describe this issue your facing as subject. 5 | labels: question 6 | assignees: '' 7 | 8 | --- 9 | 10 | 13 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: "[Feature]: Your expected missing feature in python-tgpt. " 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/workflows/binaries-minimal-publish.yml: -------------------------------------------------------------------------------- 1 | 2 | name: Make minimal executables 3 | 4 | on: 5 | push: 6 | tags: 7 | - '*' 8 | 9 | jobs: 10 | publish: 11 | name: Minimal executable for ${{ matrix.os }} 12 | runs-on: ${{ matrix.os }} 13 | strategy: 14 | matrix: 15 | include: 16 | - os: ubuntu-latest 17 | artifact_name: Linux 18 | artifact_upload_name: Linux 19 | asset_name: pytgpt-linux-amd64-minimal 20 | - os: macos-latest 21 | artifact_name: MacOS 22 | artifact_upload_name: MacOS 23 | asset_name: pytgpt-macos-amd64-minimal 24 | - os: windows-latest 25 | artifact_name: Windows 26 | artifact_upload_name: Windows.exe 27 | asset_name: pytgpt-windows-amd64-minimal.exe 28 | 29 | steps: 30 | - uses: actions/checkout@v3 31 | - name: Set up Python 32 | uses: actions/setup-python@v3 33 | with: 34 | python-version: '3.13' 35 | - name: Install dependencies and python-tgpt 36 | run: | 37 | python -m pip install --upgrade pip pyinstaller pillow 38 | python -m pip install -r requirements.txt 39 | python -m pip install . 40 | python -m pip install g4f==0.2.6.1 41 | - name: Build executable 42 | run: | 43 | pyinstaller main.py --onefile --exclude pandas --distpath dist1 --workpath build1 --log-level INFO --exclude numpy --exclude matplotlib --exclude PyQt5 --exclude PyQt6 --exclude share --contents-directory . --icon assets/logo.png --noconfirm --name ${{ matrix.artifact_name }} 44 | - name: Upload binaries to release 45 | uses: svenstaro/upload-release-action@v2 46 | with: 47 | repo_token: ${{ secrets.GITHUB_TOKEN }} 48 | file: dist1/${{ matrix.artifact_upload_name }} 49 | asset_name: ${{ matrix.asset_name }} 50 | tag: ${{ github.ref }} 51 | overwrite: true -------------------------------------------------------------------------------- /.github/workflows/binaries-publish.yml: -------------------------------------------------------------------------------- 1 | 2 | name: Make executables 3 | 4 | on: 5 | push: 6 | tags: 7 | - '*' 8 | 9 | jobs: 10 | publish: 11 | name: Executables for ${{ matrix.os }} 12 | runs-on: ${{ matrix.os }} 13 | strategy: 14 | matrix: 15 | include: 16 | - os: ubuntu-latest 17 | artifact_name: Linux 18 | artifact_upload_name: Linux 19 | asset_name: pytgpt-linux-amd64 20 | - os: macos-latest 21 | artifact_name: MacOS 22 | artifact_upload_name: MacOS 23 | asset_name: pytgpt-macos-amd64 24 | - os: windows-latest 25 | artifact_name: Windows 26 | artifact_upload_name: Windows.exe 27 | asset_name: pytgpt-windows-amd64.exe 28 | 29 | steps: 30 | - uses: actions/checkout@v3 31 | - name: Set up Python 32 | uses: actions/setup-python@v3 33 | with: 34 | python-version: '3.13' 35 | - name: Install dependencies and python-tgpt 36 | run: | 37 | python -m pip install --upgrade pip pyinstaller pillow 38 | python -m pip install -r requirements.txt 39 | python -m pip install . 40 | python -m pip install g4f[all]==0.2.6.1 41 | - name: Build executable 42 | run: | 43 | pyinstaller main.py --onefile --exclude pandas --distpath dist --workpath build --log-level INFO --exclude numpy --exclude matplotlib --exclude PyQt5 --exclude PyQt6 --exclude share --contents-directory . --icon assets/logo.png --noconfirm --name ${{ matrix.artifact_name }} 44 | - name: Upload binaries to release 45 | uses: svenstaro/upload-release-action@v2 46 | with: 47 | repo_token: ${{ secrets.GITHUB_TOKEN }} 48 | file: dist/${{ matrix.artifact_upload_name }} 49 | asset_name: ${{ matrix.asset_name }} 50 | tag: ${{ github.ref }} 51 | overwrite: true -------------------------------------------------------------------------------- /.github/workflows/close-inactive-issue.yml: -------------------------------------------------------------------------------- 1 | name: Close inactive issues 2 | 3 | on: 4 | schedule: 5 | - cron: "5 0 * * *" 6 | 7 | jobs: 8 | close-issues: 9 | runs-on: ubuntu-latest 10 | permissions: 11 | issues: write 12 | pull-requests: write 13 | steps: 14 | - uses: actions/stale@v5 15 | with: 16 | days-before-issue-stale: 14 17 | days-before-issue-close: 14 18 | 19 | days-before-pr-stale: 14 20 | days-before-pr-close: 14 21 | 22 | stale-issue-label: "stale" 23 | stale-pr-label: "stale" 24 | 25 | stale-issue-message: "Bumping this issue because it has been open for 14 days with no activity. Closing automatically in 14 days unless it becomes active again." 26 | close-issue-message: "Closing due to inactivity." 27 | 28 | stale-pr-message: "Bumping this pull request because it has been open for 14 days with no activity. Closing automatically in 14 days unless it becomes active again." 29 | close-pr-message: "Closing due to inactivity." 30 | 31 | repo-token: ${{ secrets.GITHUB_TOKEN }} -------------------------------------------------------------------------------- /.github/workflows/debian-minimal-publish.yml: -------------------------------------------------------------------------------- 1 | 2 | name: Debian Minimal Release 3 | 4 | on: 5 | push: 6 | tags: 7 | - '*' 8 | 9 | jobs: 10 | publish: 11 | name: Minimal executable for ${{ matrix.os }} 12 | runs-on: ${{ matrix.os }} 13 | strategy: 14 | matrix: 15 | python-version: ["3.13"] 16 | include: 17 | - os: ubuntu-latest 18 | artifact_name: pytgpt.deb 19 | asset_name: pytgpt-linux-amd64-minimal.deb 20 | 21 | steps: 22 | - uses: actions/checkout@v3 23 | - name: Set up Python 24 | uses: actions/setup-python@v3 25 | with: 26 | python-version: '3.13' 27 | - name: Install pip and pyinstaller 28 | run: python -m pip install --upgrade pip pyinstaller 29 | - name: Install python-tgpt 30 | run: python -m pip install . 31 | - name: Build .deb file 32 | run: | 33 | make build-minimal-deb 34 | - name: Upload .deb to release 35 | uses: svenstaro/upload-release-action@v2 36 | with: 37 | repo_token: ${{ secrets.GITHUB_TOKEN }} 38 | file: ${{ matrix.artifact_name }} 39 | asset_name: ${{ matrix.asset_name }} 40 | tag: ${{ github.ref }} 41 | overwrite: true -------------------------------------------------------------------------------- /.github/workflows/debian-publish.yml: -------------------------------------------------------------------------------- 1 | 2 | name: Debian Release 3 | 4 | on: 5 | push: 6 | tags: 7 | - '*' 8 | 9 | jobs: 10 | publish: 11 | name: Executables for ${{ matrix.os }} 12 | runs-on: ${{ matrix.os }} 13 | strategy: 14 | matrix: 15 | python-version: ["3.13"] 16 | include: 17 | - os: ubuntu-latest 18 | artifact_name: pytgpt.deb 19 | asset_name: pytgpt-linux-amd64.deb 20 | 21 | steps: 22 | - uses: actions/checkout@v3 23 | - name: Set up Python 24 | uses: actions/setup-python@v3 25 | with: 26 | python-version: '3.13' 27 | - name: Install pip and pyinstaller 28 | run: python -m pip install --upgrade pip pyinstaller 29 | - name: Install python-tgpt 30 | run: python -m pip install . 31 | - name: Build .deb file 32 | run: | 33 | make build-deb 34 | - name: Upload .deb to release 35 | uses: svenstaro/upload-release-action@v2 36 | with: 37 | repo_token: ${{ secrets.GITHUB_TOKEN }} 38 | file: ${{ matrix.artifact_name }} 39 | asset_name: ${{ matrix.asset_name }} 40 | tag: ${{ github.ref }} 41 | overwrite: true -------------------------------------------------------------------------------- /.github/workflows/python-package.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python 3 | 4 | name: Python package 5 | 6 | on: 7 | push: 8 | branches: [ "main", "master" ] 9 | pull_request: 10 | branches: [ "main", "master" ] 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | strategy: 17 | fail-fast: false 18 | matrix: 19 | python-version: [ "3.13", "3.12", "3.11" ] 20 | 21 | steps: 22 | - uses: actions/checkout@v3 23 | - name: Set up Python ${{ matrix.python-version }} 24 | uses: actions/setup-python@v3 25 | with: 26 | python-version: ${{ matrix.python-version }} 27 | - name: Install dependencies 28 | run: | 29 | make install 30 | python -m pip install flake8 31 | - name: Lint with flake8 32 | run: | 33 | # stop the build if there are Python syntax errors or undefined names 34 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics 35 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 36 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics 37 | - name: Test with Unittest 38 | run: | 39 | make test-utils test-tgpt test-api 40 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries 3 | 4 | # This workflow uses actions that are not certified by GitHub. 5 | # They are provided by a third-party and are governed by 6 | # separate terms of service, privacy policy, and support 7 | # documentation. 8 | 9 | name: Upload Python Package 10 | 11 | on: 12 | release: 13 | types: [published] 14 | 15 | permissions: 16 | contents: read 17 | 18 | jobs: 19 | deploy: 20 | 21 | runs-on: ubuntu-latest 22 | 23 | steps: 24 | - uses: actions/checkout@v3 25 | - name: Set up Python 26 | uses: actions/setup-python@v3 27 | with: 28 | python-version: '3.13' 29 | - name: Install dependencies 30 | run: | 31 | python -m pip install --upgrade pip 32 | python -m pip install -r requirements.txt 33 | pip install build 34 | - name: Build package 35 | run: python -m build 36 | - name: Publish package 37 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 38 | with: 39 | user: __token__ 40 | password: ${{ secrets.PYPI_API_TOKEN }} 41 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | *.vscode 162 | *.env 163 | *test.py 164 | *recons 165 | README.md 166 | *.txt 167 | test-*.py 168 | make-*.py 169 | *.deb 170 | *.spec 171 | assets/deb/DEBIAN/control 172 | assets/deb/usr/share/applications/pytgpt.desktop 173 | *.jpeg 174 | *.png -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Smartwa 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Define targets 2 | .PHONY: install install-minimal test test-tgpt test-g4f test-api test-utils build build-deb build-minimal-deb clean 3 | 4 | # Define variables 5 | PYTHON := python3 6 | PI := $(PYTHON) -m pip 7 | PYINSTALLER := $(PYTHON) -m PyInstaller 8 | DEB := $(shell pwd)/assets/deb 9 | DEBLIB := $(DEB)/usr/lib 10 | 11 | 12 | # Default target 13 | default: install test build 14 | 15 | # Target to install dependencies 16 | install: clean 17 | $(PI) install -U pip 18 | $(PI) install -r requirements.txt 19 | $(PI) install -e . 20 | $(PI) install --upgrade g4f[all] 21 | 22 | # Target to install minimal dependencies 23 | install-minimal: clean 24 | $(PI) install -U pip 25 | $(PI) install -r requirements.txt 26 | $(PI) install -e . 27 | 28 | # Target to run tests 29 | test: 30 | $(PYTHON) -m unittest discover -s tests -p 'test_*.py' -f -v 31 | 32 | # Target to run tgpt providers test 33 | test-tgpt: 34 | $(PYTHON) -m unittest discover -s tests -p 'test_*_tgpt.py' -f -v 35 | 36 | # Target to run REST-api test 37 | test-api: 38 | $(PYTHON) -m unittest discover -s tests -p 'test_api.py' -f -v 39 | 40 | # Target to run gpt4free test 41 | test-g4f: 42 | $(PYTHON) -m unittest discover -s tests -p 'test_gpt4free.py' -f -v 43 | 44 | # Target to run pytgpt utils test 45 | test-utils: 46 | $(PYTHON) -m unittest discover -s tests -p 'test_utils.py' -f -v 47 | 48 | # Target to create an executable using PyInstaller 49 | build: install 50 | $(PI) install --upgrade pyinstaller 51 | $(PI) uninstall gpt4all -y 52 | $(PYINSTALLER) main.py \ 53 | --onefile \ 54 | --exclude pandas \ 55 | --paths $(shell pwd) \ 56 | --distpath dist/$(shell uname) \ 57 | --workpath build/$(shell uname) \ 58 | --log-level INFO \ 59 | --exclude numpy \ 60 | --exclude matplotlib \ 61 | --exclude PyQt5 \ 62 | --exclude PyQt6 \ 63 | --exclude share \ 64 | --icon assets/logo.png \ 65 | --noconfirm 66 | 67 | # Target to create .deb file 68 | build-deb: install 69 | $(PI) install --upgrade pyinstaller 70 | $(PI) uninstall gpt4all -y 71 | $(PYINSTALLER) main.py \ 72 | --onedir \ 73 | --exclude pandas \ 74 | --paths $(shell pwd) \ 75 | --distpath $(DEBLIB) \ 76 | --workpath build/$(shell uname) \ 77 | --log-level INFO \ 78 | --exclude numpy \ 79 | --exclude matplotlib \ 80 | --exclude PyQt5 \ 81 | --exclude PyQt6 \ 82 | --exclude share \ 83 | --name pytgpt \ 84 | --contents-directory . \ 85 | --noconfirm 86 | 87 | echo "Version: $(shell pytgpt --version | grep -oP 'version \K[\d.]+')" >> $(DEB)/DEBIAN/control 88 | echo "Version=$(shell pytgpt --version | grep -oP 'version \K[\d.]+')" >> $(DEB)/usr/share/applications/pytgpt.desktop 89 | 90 | echo "/usr/lib/pytgpt\n"\ 91 | "/usr/bin/pytgpt\n"\ 92 | "/usr/share/applications/icons/pytgpt.png\n"\ 93 | "/usr/share/applications/pytgpt.desktop" > $(DEBLIB)/pytgpt/entries.txt 94 | 95 | echo "Installed-Size: $(shell du -sh -B KB $(DEB) | awk '{print $$1}')" >> $(DEB)/DEBIAN/control 96 | 97 | dpkg-deb --build -Zxz $(DEB) pytgpt.deb 98 | 99 | # Target to build minimal deb 100 | build-minimal-deb: install-minimal 101 | $(PI) install --upgrade pyinstaller 102 | $(PYINSTALLER) main.py \ 103 | --onedir \ 104 | --exclude pandas \ 105 | --paths $(shell pwd) \ 106 | --distpath $(DEBLIB) \ 107 | --workpath build/$(shell uname) \ 108 | --log-level INFO \ 109 | --exclude numpy \ 110 | --exclude matplotlib \ 111 | --exclude PyQt5 \ 112 | --exclude PyQt6 \ 113 | --exclude share \ 114 | --name pytgpt \ 115 | --contents-directory . \ 116 | --noconfirm 117 | 118 | echo "Version: $(shell pytgpt --version | grep -oP 'version \K[\d.]+')" >> $(DEB)/DEBIAN/control 119 | echo "Version=$(shell pytgpt --version | grep -oP 'version \K[\d.]+')" >> $(DEB)/usr/share/applications/pytgpt.desktop 120 | 121 | echo "/usr/lib/pytgpt\n"\ 122 | "/usr/bin/pytgpt\n"\ 123 | "/usr/share/applications/icons/pytgpt.png\n"\ 124 | "/usr/share/applications/pytgpt.desktop" > $(DEBLIB)/pytgpt/entries.txt 125 | 126 | echo "Installed-Size: $(shell du -sh -B KB $(DEB) | awk '{print $$1}')" >> $(DEB)/DEBIAN/control 127 | 128 | dpkg-deb --build -Zxz $(DEB) pytgpt.deb 129 | 130 | # Target to clean up build artifacts 131 | clean: 132 | rm -rf build/ dist/ *.spec *.deb 133 | -------------------------------------------------------------------------------- /assets/Figure_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Simatwa/python-tgpt/6a49c156d99eed6c5f7b3045823b60087a870d97/assets/Figure_1.png -------------------------------------------------------------------------------- /assets/deb/DEBIAN/control: -------------------------------------------------------------------------------- 1 | Package: pytgpt 2 | Homepage: http://github.com/Simatwa/python-tgpt 3 | Section: utils 4 | Priority: optional 5 | Architecture: amd64 6 | Maintainer: Smartwa Caleb 7 | Description: An application that uses free LLM providers to generate text. 8 | Depends: libc6 (>= 2.17) 9 | -------------------------------------------------------------------------------- /assets/deb/DEBIAN/postinst: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | ln -s /usr/lib/pytgpt/pytgpt /usr/bin/pytgpt 4 | 5 | echo "To get the most out of this package, install it from pypi " 6 | echo "Submit any bug at https://github.com/Simatwa/python-tgpt/issues/new/choose" -------------------------------------------------------------------------------- /assets/deb/DEBIAN/postrm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rm /usr/bin/pytgpt 4 | echo "You might want to run pytgpt from source - " 5 | echo "Submit any bug at " -------------------------------------------------------------------------------- /assets/deb/DEBIAN/preinst: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | FILE_LIST=/usr/lib/pytgpt/entries.txt 4 | 5 | if [ -f "$FILE_LIST" ]; then 6 | while read -r ENTRY; do 7 | if [ -d "$ENTRY" ]; then 8 | rm -rf "$ENTRY" 9 | elif [ -f "$ENTRY" ]; then 10 | rm -f "$ENTRY" 11 | else 12 | echo "" 13 | fi 14 | done < "$FILE_LIST" 15 | else 16 | echo "The file list $FILE_LIST does not exist. Nothing to do." 17 | fi -------------------------------------------------------------------------------- /assets/deb/DEBIAN/prerm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | FILE_LIST=/usr/lib/pytgpt/entries.txt 4 | 5 | if [ -f "$FILE_LIST" ]; then 6 | while read -r ENTRY; do 7 | if [ -d "$ENTRY" ]; then 8 | rm -rf "$ENTRY" 9 | elif [ -f "$ENTRY" ]; then 10 | rm -f "$ENTRY" 11 | else 12 | echo "" 13 | fi 14 | done < "$FILE_LIST" 15 | else 16 | echo "The file list $FILE_LIST does not exist. Nothing to do." 17 | fi -------------------------------------------------------------------------------- /assets/deb/usr/share/applications/icons/pytgpt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Simatwa/python-tgpt/6a49c156d99eed6c5f7b3045823b60087a870d97/assets/deb/usr/share/applications/icons/pytgpt.png -------------------------------------------------------------------------------- /assets/deb/usr/share/applications/pytgpt.desktop: -------------------------------------------------------------------------------- 1 | [Desktop Entry] 2 | Name=pytgpt 3 | GenericName=pytgpt 4 | Type=Application 5 | Comment=Interact with free LLMs. 6 | Exec=/usr/bin/pytgpt 7 | Terminal=true 8 | Icon=/usr/share/applications/icons/pytgpt.png 9 | -------------------------------------------------------------------------------- /assets/demo-1.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Simatwa/python-tgpt/6a49c156d99eed6c5f7b3045823b60087a870d97/assets/demo-1.gif -------------------------------------------------------------------------------- /assets/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Simatwa/python-tgpt/6a49c156d99eed6c5f7b3045823b60087a870d97/assets/logo.png -------------------------------------------------------------------------------- /assets/py-tgpt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Simatwa/python-tgpt/6a49c156d99eed6c5f7b3045823b60087a870d97/assets/py-tgpt.png -------------------------------------------------------------------------------- /docs/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## v0.0.1 2 | 3 | **What's new?** 4 | 5 | - Initial release. 6 | 7 | ## v0.0.2 8 | 9 | **What's new?** 10 | 11 | - Enhanced response generation 12 | - Prompt optimizers added - *[code, shell_command]* 13 | - Prompt optimizers added - *Console* 14 | - Clear console - *Console* 15 | 16 | ## v0.0.3 17 | 18 | **What's new?** 19 | 20 | - Busy bar index bug **fixed** - *(Console)* 21 | - Other minor fixed. 22 | 23 | ## v0.0.4 24 | 25 | **What's new?** 26 | 27 | - Minor fixes 28 | 29 | ## v0.0.5 30 | 31 | **What's new?** 32 | 33 | - Multiple variables renamed. 34 | - First release under `python-tgpt` 35 | 36 | ## v0.0.6 37 | 38 | **What's new?** 39 | 40 | - `generate` is the default command. Thanks to @sameedzahoor 41 | - Control response framing `--quiet` 42 | - `generate` based prompt optimizaion - `--code` & `--shell` 43 | 44 | ## v0.0.7 45 | 46 | **What's new?** 47 | 48 | - Chat conversationally - *(Experimental)* 49 | - Maintain chat history *.txt* 50 | - Load chat history from file 51 | - Chain request through *proxies* 52 | 53 | ## v0.0.8 54 | 55 | **What's new?** 56 | 57 | - Reading piped input as prompt - *(Console)*. Thanks to @sameedzahoor 58 | - Reset conversation - *(Console)* 59 | - View conversation history - *(Console)* 60 | - Other minor fixes 61 | 62 | ## v0.0.9 63 | 64 | **What's new?** 65 | 66 | - Chatting conversationally - **Stable** 67 | 68 | ## v0.1.0 69 | 70 | **What's new?** 71 | 72 | - Chatting conversationally - **Default Mode** 73 | 74 | ## v0.1.1 75 | 76 | **What's new?** 77 | 78 | - Bug fixed - *file not found error* 79 | 80 | ## v0.1.2 81 | 82 | **What's new?** 83 | 84 | - Check version - `--version` 85 | - Minor fixes. 86 | 87 | ## v0.1.3 88 | 89 | **What's new?** 90 | 91 | - Invalid response - **fixed** 92 | 93 | ## v0.1.4 94 | 95 | **What's new?** 96 | 97 | - Incomplete response - **fixed** 98 | 99 | ## v0.2.0 100 | 101 | **What's new?** 102 | 103 | - Multiple LLM providers 104 | 105 | ## v0.2.1 106 | 107 | **What's new?** 108 | 109 | - Fakeopen rendering issue fixed - [#7](https://github.com/Simatwa/python-tgpt/issues/7) 110 | 111 | ## v0.2.2 112 | 113 | **What's new?** 114 | 115 | - Package renamed to **pytgpt** - [#7](https://github.com/Simatwa/python-tgpt/issues/7) 116 | - Visible vertical overflow - [#7](https://github.com/Simatwa/python-tgpt/issues/7) 117 | 118 | ## v0.2.3 119 | 120 | **What's new?** 121 | 122 | - Tabulated help info - `h` 123 | - Control response vertical overflow behaviour. 124 | 125 | ## v0.2.4 126 | 127 | **What's new?** 128 | 129 | - [WebChatGPT](https://github.com/Simatwa/WebChatGPT/) added as provider 130 | - Awesome-prompts manipulation commands = **CRUD** 131 | - Other minor fixes. 132 | 133 | ## v0.2.5 134 | 135 | **What's new?** 136 | 137 | - New provider : [Bard](https://github.com/acheong08/bard) by **Google**. 138 | - Check whole last response 139 | - Other minor fixes. 140 | 141 | ## v0.2.6 142 | 143 | **What's new?** 144 | 145 | - Bug fixed - *reset conversation* - **bard** 146 | - Bug fixed - *low `httpx` logging level*. - **bard** 147 | 148 | ## v0.2.7 149 | 150 | **What's new?** 151 | 152 | - Busy bar disabled when `--quiet` issued in *generate* mode. #12 Thanks to @johnd0e 153 | - `interactive` takes action on `$ pytgpt` otherwise one has to explictly declare the action. #11 154 | 155 | ## v0.2.8 156 | 157 | **What's new?** 158 | 159 | - Auto-quiet on output redirection. Thanks to @johnd0e 160 | - Dropped support for sourcing prompt from `stdin stream` in Windows. #12 161 | - Colorized command prompt. interactive 162 | 163 | ## v0.2.9 164 | 165 | **What's new?** 166 | 167 | - Improved command prompt - *icon & color blending* 168 | - Bug fixed - *multiline prompt in `interactive`* 169 | 170 | ## v0.3.0 171 | 172 | **what's new?** 173 | 174 |
175 | 176 | 177 | 178 | - Improved introductory prompt 179 | 180 | 181 | 182 | *You're a Large Language Model for chatting with people. 183 | Assume role of the LLM and give your response.* 184 | 185 |
186 | 187 | - Combine both of piped and explicitly issued prompt #13 188 | - Support piping input in Windows. #13 189 | - Placeholder for piped input `{{stream}}` and copied text `{{copied}}`. 190 | 191 | ## v0.3.1 192 | 193 | **What's new?** 194 | 195 |
196 | 197 | 198 | 199 | 41 New models. Thanks to [gpt4free](https://github.com/xtekky/gpt4free). 200 | 201 | 202 | 203 | - AiChatOnline 204 | - Aura 205 | - Bard 206 | - Bing 207 | - ChatBase 208 | - ChatForAi 209 | - Chatgpt4Online 210 | - ChatgptAi 211 | - ChatgptDemo 212 | - ChatgptNext 213 | - Chatxyz 214 | - DeepInfra 215 | - FakeGpt 216 | - FreeChatgpt 217 | - GPTalk 218 | - GeekGpt 219 | - GeminiProChat 220 | - Gpt6 221 | - GptChatly 222 | - GptForLove 223 | - GptGo 224 | - GptTalkRu 225 | - Hashnode 226 | - HuggingChat 227 | - Koala 228 | - Liaobots 229 | - Llama2 230 | - MyShell 231 | - OnlineGpt 232 | - OpenaiChat 233 | - PerplexityAi 234 | - Phind 235 | - Pi 236 | - Poe 237 | - Raycast 238 | - TalkAi 239 | - Theb 240 | - ThebApi 241 | - You 242 | - Yqcloud 243 | 244 |
245 | 246 | - **Aura** is the default provider 247 | - Other minor fixes. 248 | 249 | ## v0.3.2 250 | 251 | **What's new?** 252 | 253 | - Added utility commands `utils` `gpt4free` `update` etc 254 | - Support g4f-based non-working providers. 255 | - Drop restriction to specific `g4f` dependency version (https://github.com/Simatwa/python-tgpt/issues/14#issuecomment-1899468911) 256 | - Pass auth value to g4f-based providers. `-k` 257 | - Support proxying in g4f providers 258 | 259 | ## v0.3.3 260 | 261 | **What's new?** 262 | 263 | - **gpt4free** gui interface - `web` 264 | - `FakeGpt` - default provider. 265 | - *Other minor updates.* 266 | 267 | ## v0.3.4 268 | 269 | **What's new?** 270 | 271 | - Auto-detect placeholders (#17) 272 | - `Aura` - default provider 273 | 274 | ## v0.3.5 275 | 276 | **What's new?** 277 | 278 | - *Binaries for all system.* 279 | 280 | ## v0.3.6 281 | 282 | **What's new?** 283 | 284 | - New provider - *Phind* . Resolves #18 285 | - New util command `latest`. 286 | - `phind` is the default provider - *console* 287 | - Fixed : `opengpt` - (#19) 288 | 289 | ## v0.3.7 290 | 291 | **What's new?** 292 | 293 | - *Binaries dependencies fixed.* 294 | 295 | ## v0.3.8 296 | 297 | **What's new?** 298 | 299 | - Phind fixed. #21 300 | - Bard fixed. #23 #15 301 | - Full and minimal executables. 302 | 303 | ## v0.3.9 304 | 305 | **What's new?** 306 | 307 | - Phind fixed. #https://github.com/Simatwa/python-tgpt/issues/21#issuecomment-1925326331 308 | 309 | ## v0.4.0 310 | 311 | **What's new?** 312 | 313 | - New provider - [Llama2](https://www.llama2.ai) 314 | - New provider - [Blackboxai](https://www.blackbox.ai) 315 | - Disable g4f version check. 316 | - Partial or full installation options. 317 | 318 | ## v0.4.1 319 | 320 | **What's new?** 321 | 322 | - Rawdog : generate and execute python code in your system, driven by your prompts. 323 | 324 |
325 | 326 | 327 | For instance: 328 | 329 | 330 | 331 | ```sh 332 | $ pytgpt generate -n -q "Visualize the disk usage using pie chart" --rawdog 333 | ``` 334 | 335 |

336 | 337 |

338 | 339 |
340 | 341 | 342 | ## v0.4.2 343 | 344 | **What's new?** 345 | 346 | 1. RawDog: 347 | - Give consent to script execution 348 | - Execute script internally or externally 349 | - Choose python interpreter name 350 | 351 | ## v0.4.3 352 | 353 | **What's new?** 354 | 355 | - Minor bug fix. RawDog : *generate* 356 | 357 | ## v0.4.4 358 | 359 | **What's new?** 360 | 361 | - Execute python code in responses *(interactive)*- `exec` 362 | - Execute python codes using system installed python interpreter - *default* 363 | - Other minor fixes. 364 | 365 | ## v0.4.5 366 | 367 | **What's new?** 368 | 369 | - New model : **GPT4ALL** - Support offline LLM. 370 | 371 | ## v0.4.6 372 | 373 | **What's new?** 374 | 375 | - Revamped provider `webchatgpt`. 376 | - Dynamic provider `g4fauto`. #29 377 | - Test and save working g4f providers . #29 378 | ```sh 379 | pytgpt gpt4free test -y 380 | ``` 381 | - Order providers in ascending. #31 382 | 383 | ## v0.4.7 384 | 385 | **What's new?** 386 | 387 | - `g4fauto` fixed. 388 | 389 | ## v0.4.8 390 | 391 | **What's new?** 392 | 393 | - Execute scripts native interpreter - *rawdog* 394 | - Typos fixed. 395 | - Other minor fixes. 396 | 397 | ## v0.4.9 398 | 399 | **What's new?** 400 | 401 | - `webchatgpt` supports intro prompt. 402 | - Fix: placeholders consistency - `{{stream}}|{{copied}}` 403 | - Other minor fixes. 404 | 405 | ## v0.5.0 406 | 407 | **What's new?** 408 | 409 | - added: Provider [poe](https://poe.com). Supports multiple models. Thanks to [snowby666/poe-api-wrapper](https://github.com/snowby666/poe-api-wrapper). 410 | 411 | ## v0.5.1 412 | 413 | **What's new?** 414 | 415 | - added: Load variables from `.env` file. 416 | - patch: Exclude selenium dependents providers from test. [#33](https://github.com/Simatwa/python-tgpt/issues/33) 417 | - patch: Show more gpt4free models. 418 | - added: Test logging flag. 419 | 420 | ## v0.5.2 421 | 422 | **What's new?** 423 | 424 | - fix: llama2. #34 425 | 426 | ## v0.5.3 427 | 428 | **What's new?** 429 | 430 | - added: Support for image generation. 431 | 432 | ## v0.5.4 433 | 434 | **What's new?** 435 | 436 | - fix: Gemini (Bard) provider. 437 | - added: awesome prompts info. 438 | 439 | ## v0.5.5 440 | 441 | **What's new?** 442 | 443 | - patch: `FreeGpt` default g4f-based provider. 444 | - added: `g4f` made required dependency. 445 | - fix: `openai` response repetition. [#39](https://github.com/Simatwa/python-tgpt/issues/39) 446 | - Other minor fixes. 447 | 448 | ## v0.5.6 449 | 450 | **What's New?** 451 | 452 | - added: Provider [Groq](https://console.groq.com/) 453 | 454 | 455 | ## v0.5.7 456 | 457 | **What's New?** 458 | 459 | - fix: Provider `Opengpt`. 460 | 461 | ## v0.5.8 462 | 463 | **What's New?** 464 | 465 | - added: Provider `perplexity`. Thanks to [HelpingAI/Helpingai_T2](https://github.com/HelpingAI/Helpingai_T2) 466 | 467 | ## v0.6.0 468 | 469 | **What's new?** 470 | 471 | - added : [FastAPI.](https://python-tgpt.onrender.com/docs) 472 | - New extra - `api` 473 | 474 | ## v0.6.1 475 | 476 | **What's new?** 477 | 478 | - added : FastAPI - Image generation 479 | - added : FastAPI - Providers endpoint. 480 | - patch : FastAPI - existing endpoints 481 | 482 | ## v0.6.2 483 | 484 | **What's new?** 485 | 486 | - patch : FastAPI - `/images` amount > 10 causing http status_code 500. 487 | - added : Hosted api health-status monitor. 488 | 489 | ## v0.6.3 490 | 491 | **What's new?** 492 | 493 | - added : API static contents - `clear` command. 494 | * `$ pytgpt api clear images` 495 | - patch : Streaming redirected outputs. 496 | * `pytgpt generate --raw "Write a short story" > shortstory.txt` #43 497 | - other minor updates 498 | 499 | ## v0.6.4 500 | 501 | **What's new?** 502 | 503 | - feat : Dynamic provider - `auto` : *Working provider overally*. 504 | - Other minor feats. 505 | 506 | ## v0.6.5 507 | 508 | **What's new?** 509 | 510 | - feat: New text provider - [YepChat](https://yep.com) 511 | - feat: New image provider [Prodia](prodia.com) 512 | - feat: Speech synthesise responses. `--talk-to-me` shortform `-ttm` 513 | - feat: Speech synthesise - **FastAPI** - `/audio` 514 | 515 | ## v0.6.6 516 | 517 | **What's new?** 518 | 519 | - feat: Minor mods 520 | - addon: Telegram-bot - [pytgpt-bot](https://github.com/Simatwa/pytgpt-bot) 521 | 522 | ## v0.6.7 523 | 524 | **What's new?** 525 | 526 | - feat: Override chat intro. 527 | - Other minor fixes. 528 | 529 | ## v0.6.8 530 | 531 | **What's new?** 532 | 533 | - fix: Failure to include intro in chat history - from `v0.6.6` 534 | 535 | ## v0.6.9 536 | 537 | **What's new?** 538 | 539 | - fix: Improper incomplete chat history generation. 540 | - feat: Load intro from chat history file. *(first line)* 541 | - feat: Set new `intro` on the fly. *(console)* 542 | - feat: `en-US-Wavenet-C` - default voice for speech synthesis. 543 | - fix: Perplexity AI raising `json.decoder.JSONDecodeError` 544 | 545 | ## v0.7.0 546 | 547 | **What's new?** 548 | 549 | > [!NOTE] 550 | > This is a remarkable milestone. We have to do it in style. 551 | 552 | - feat: **Asynchronous** implementation to all providers except a few. 553 | - feat: **Asynchronus** implementation to all `FastAPI` endpoints. 554 | 555 | 556 | ## v0.7.1 557 | 558 | **What's new?** 559 | 560 | - fix: Failure to support g4f providers - `FastApi` 561 | - feat: Rename `/audio` endpoints to `/voice` - `FastAPI` 562 | 563 | ## v0.7.2 564 | 565 | **What's new?** 566 | 567 | - feat: Extra installation requirement for Termux - `pip install python-tgpt[termux]` 568 | - fix: Limit g4f requirement for binaries to **v0.2.6.1** 569 | 570 | ## v0.7.3 571 | 572 | **What's new?** 573 | 574 | - feat: Termux extra installation options : `termux-cli`, `termux-api` and `termux-all` 575 | 576 | ## v0.7.4 577 | 578 | **What's new?** 579 | 580 | - fix : Image generation failure. 581 | 582 | ## v0.7.5 583 | 584 | **What's new?** 585 | 586 | - feat: New provider - [Novita](https://novita.ai) 587 | 588 | ## v0.7.8 589 | 590 | **What's new?** 591 | 592 | - feat: Added prompt completions in interactive mode `cli` 593 | 594 | ## v0.7.9 595 | 596 | **What's new?** 597 | 598 | - fix: Remove providers: yepchat, opengpt, leo etc 599 | 600 | 601 | ## v0.8.1 602 | 603 | **What's new?** 604 | 605 | - feat: New provider **Ai4chat** -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | # Create executable 2 | # pyinstaller main.py 3 | from pytgpt.console import main 4 | 5 | main() 6 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | requests[socks]>=2.32.3 2 | click==8.1.3 3 | rich==13.3.4 4 | clipman==3.1.0 5 | pyperclip==1.8.2 6 | appdirs==1.4.4 7 | colorama==0.4.6 8 | g4f>=0.3.3.3 9 | pyyaml==6.0.1 10 | matplotlib 11 | gpt4all==2.2.0 12 | poe-api-wrapper==1.7.0 13 | python-dotenv==1.0.0 14 | brotli==1.1.0 15 | Helpingai-T2==0.5 16 | fastapi[standard]==0.115.4 17 | python-vlc>=3.0.20 18 | httpx>=0.27.2 19 | prompt-toolkit==3.0.48 -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from setuptools import setup 4 | 5 | from setuptools import find_packages 6 | 7 | 8 | INSTALL_REQUIRE = [ 9 | "requests[socks]>=2.32.3", 10 | "appdirs==1.4.4", 11 | "pyyaml==6.0.1", 12 | "poe-api-wrapper==1.7.0", 13 | "brotli==1.1.0", 14 | "g4f>=0.3.3.3", 15 | "Helpingai-T2==0.5", 16 | "python-vlc>=3.0.20", 17 | "httpx==0.27.2", 18 | "click==8.1.3", 19 | ] 20 | 21 | cli_reqs = [ 22 | "rich==13.3.4", 23 | "clipman==3.1.0", 24 | "pyperclip==1.8.2", 25 | "colorama==0.4.6", 26 | "python-dotenv==1.0.0", 27 | "prompt-toolkit==3.0.48", 28 | ] 29 | 30 | api = [ 31 | "fastapi[standard]==0.115.4", 32 | ] 33 | 34 | termux = [ 35 | "g4f==0.2.6.1", 36 | ] 37 | 38 | EXTRA_REQUIRE = { 39 | "termux": termux, 40 | "termux-cli": termux + cli_reqs, 41 | "termux-api": termux + api, 42 | "termux-all": termux + cli_reqs + api, 43 | "cli": cli_reqs, 44 | "api": api, 45 | "all": ["g4f[all]>=0.3.3.3", "matplotlib", "gpt4all==2.2.0"] + cli_reqs + api, 46 | } 47 | 48 | DOCS_PATH = Path(__file__).parents[0] / "docs/README.md" 49 | PATH = Path("README.md") 50 | if not PATH.exists(): 51 | with Path.open(DOCS_PATH, encoding="utf-8") as f1: 52 | with Path.open(PATH, "w+", encoding="utf-8") as f2: 53 | f2.write(f1.read()) 54 | 55 | setup( 56 | name="python-tgpt", 57 | version="0.8.3", 58 | license="MIT", 59 | author="Smartwa", 60 | maintainer="Smartwa", 61 | author_email="simatwacaleb@proton.me", 62 | description="Interact with AI without API key", 63 | packages=find_packages("src"), 64 | package_dir={"": "src"}, 65 | url="https://github.com/Simatwa/python-tgpt", 66 | project_urls={ 67 | "Bug Report": "https://github.com/Simatwa/python-tgpt/issues/new", 68 | "Homepage": "https://github.com/Simatwa/python-tgpt", 69 | "Source Code": "https://github.com/Simatwa/python-tgpt", 70 | "Issue Tracker": "https://github.com/Simatwa/python-tgpt/issues", 71 | "Download": "https://github.com/Simatwa/python-tgpt/releases", 72 | "Documentation": "https://github.com/Simatwa/python-tgpt/blob/main/docs", 73 | }, 74 | entry_points={ 75 | "console_scripts": [ 76 | "pytgpt = pytgpt.console:main", 77 | ], 78 | }, 79 | install_requires=INSTALL_REQUIRE, 80 | extras_require=EXTRA_REQUIRE, 81 | python_requires=">=3.10", 82 | keywords=[ 83 | "chatgpt", 84 | "gpt", 85 | "tgpt", 86 | "pytgpt", 87 | "chatgpt-cli", 88 | "chatgpt-sdk", 89 | "chatgpt-api", 90 | "llama-api", 91 | "koboldai", 92 | "openai", 93 | "gpt4free", 94 | "gpt4all-cli", 95 | "gptcli", 96 | "poe-api", 97 | "perplexity", 98 | "novita", 99 | "gpt4free", 100 | "ai4chat", 101 | ], 102 | long_description=Path.open("README.md", encoding="utf-8").read(), 103 | long_description_content_type="text/markdown", 104 | classifiers=[ 105 | "License :: OSI Approved :: MIT License", 106 | "Intended Audience :: Developers", 107 | "Natural Language :: English", 108 | "License :: Free For Home Use", 109 | "Intended Audience :: Customer Service", 110 | "Programming Language :: Python", 111 | "Topic :: Software Development :: Libraries :: Python Modules", 112 | "Topic :: Scientific/Engineering :: Artificial Intelligence", 113 | "Programming Language :: Python :: 3 :: Only", 114 | "Programming Language :: Python :: 3.10", 115 | "Programming Language :: Python :: 3.11", 116 | "Programming Language :: Python :: 3.12", 117 | "Programming Language :: Python :: 3.13", 118 | ], 119 | ) 120 | -------------------------------------------------------------------------------- /src/pytgpt/__init__.py: -------------------------------------------------------------------------------- 1 | import g4f 2 | from importlib import metadata 3 | import logging 4 | 5 | try: 6 | __version__ = metadata.version("python-tgpt") 7 | except metadata.PackageNotFoundError: 8 | __version__ = "0.0.0" 9 | 10 | __author__ = "Smartwa" 11 | __repo__ = "https://github.com/Simatwa/python-tgpt" 12 | 13 | tgpt_providers = [ 14 | "auto", 15 | "openai", 16 | "koboldai", 17 | "phind", 18 | "gpt4all", 19 | "g4fauto", 20 | "poe", 21 | "groq", 22 | "perplexity", 23 | "novita", 24 | "ai4chat", 25 | "deepseek", 26 | ] 27 | 28 | gpt4free_providers = [ 29 | provider.__name__ for provider in g4f.Provider.__providers__ # if provider.working 30 | ] 31 | 32 | available_providers = tgpt_providers + gpt4free_providers 33 | 34 | __all__ = [ 35 | "appdir", 36 | "imager", 37 | ] + available_providers 38 | 39 | logging.getLogger("httpx").setLevel(logging.ERROR) 40 | logging.getLogger("websocket").setLevel(logging.ERROR) 41 | -------------------------------------------------------------------------------- /src/pytgpt/__main__.py: -------------------------------------------------------------------------------- 1 | from pytgpt.console import main 2 | 3 | main() 4 | -------------------------------------------------------------------------------- /src/pytgpt/ai4chat/__init__.py: -------------------------------------------------------------------------------- 1 | from .main import AI4CHAT 2 | from .main import AsyncAI4CHAT 3 | from .main import session 4 | 5 | 6 | __info__ = "Interact with AI4Chat's model." 7 | 8 | __all__ = ["AI4CHAT", "AsyncAI4CHAT", "session"] 9 | -------------------------------------------------------------------------------- /src/pytgpt/ai4chat/main.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import httpx 4 | import html 5 | import re 6 | import pytgpt.exceptions as exceptions 7 | from pytgpt.utils import Optimizers 8 | from pytgpt.utils import Conversation 9 | from pytgpt.utils import AwesomePrompts 10 | from pytgpt.utils import sanitize_stream 11 | from pytgpt.base import Provider, AsyncProvider 12 | from typing import AsyncGenerator 13 | 14 | session = requests.Session() 15 | 16 | model = "gpt-4" 17 | 18 | 19 | class AI4CHAT(Provider): 20 | def __init__( 21 | self, 22 | is_conversation: bool = True, 23 | max_tokens: int = 600, 24 | temperature: float = 1, 25 | presence_penalty: int = 0, 26 | frequency_penalty: int = 0, 27 | top_p: float = 1, 28 | model: str = model, 29 | timeout: int = 30, 30 | intro: str = None, 31 | filepath: str = None, 32 | update_file: bool = True, 33 | proxies: dict = {}, 34 | history_offset: int = 10250, 35 | act: str = None, 36 | ): 37 | """Instantiates AI4CHAT 38 | 39 | Args: 40 | is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True. 41 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600. 42 | temperature (float, optional): Charge of the generated text's randomness. Defaults to 1. 43 | presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0. 44 | frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0. 45 | top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999. 46 | model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo". 47 | timeout (int, optional): Http request timeout. Defaults to 30. 48 | intro (str, optional): Conversation introductory prompt. Defaults to None. 49 | filepath (str, optional): Path to file containing conversation history. Defaults to None. 50 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True. 51 | proxies (dict, optional): Http request proxies. Defaults to {}. 52 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250. 53 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None. 54 | """ 55 | self.is_conversation = is_conversation 56 | self.max_tokens_to_sample = max_tokens 57 | self.model = model 58 | self.temperature = temperature 59 | self.presence_penalty = presence_penalty 60 | self.frequency_penalty = frequency_penalty 61 | self.top_p = top_p 62 | self.chat_endpoint = "https://www.ai4chat.co/generate-response" 63 | self.stream_chunk_size = 64 64 | self.timeout = timeout 65 | self.last_response = {} 66 | self.headers = { 67 | "accept": "*/*", 68 | "accept-language": "en-US,en;q=0.9", 69 | "content-type": "application/json", 70 | "referer": "https://www.ai4chat.co/gpt/talkdirtytome", 71 | "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36", 72 | } 73 | 74 | self.__available_optimizers = ( 75 | method 76 | for method in dir(Optimizers) 77 | if callable(getattr(Optimizers, method)) and not method.startswith("__") 78 | ) 79 | session.headers.update(self.headers) 80 | Conversation.intro = ( 81 | AwesomePrompts().get_act( 82 | act, raise_not_found=True, default=None, case_insensitive=True 83 | ) 84 | if act 85 | else intro or Conversation.intro 86 | ) 87 | self.conversation = Conversation( 88 | is_conversation, self.max_tokens_to_sample, filepath, update_file 89 | ) 90 | self.conversation.history_offset = history_offset 91 | session.proxies = proxies 92 | 93 | def ask( 94 | self, 95 | prompt: str, 96 | stream: bool = False, 97 | raw: bool = False, 98 | optimizer: str = None, 99 | conversationally: bool = False, 100 | ) -> dict: 101 | """Chat with AI 102 | 103 | Args: 104 | prompt (str): Prompt to be send. 105 | stream (bool, optional): Flag for streaming response. Defaults to False. 106 | raw (bool, optional): Stream back raw response as received. Defaults to False. 107 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. 108 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. 109 | Returns: 110 | dict[str, str] 111 | ```json 112 | { 113 | "message" : "How can I help you?" 114 | } 115 | ``` 116 | """ 117 | conversation_prompt = self.conversation.gen_complete_prompt(prompt) 118 | if optimizer: 119 | if optimizer in self.__available_optimizers: 120 | conversation_prompt = getattr(Optimizers, optimizer)( 121 | conversation_prompt if conversationally else prompt 122 | ) 123 | else: 124 | raise exceptions.UnsupportedOptimizer( 125 | f"Optimizer '{optimizer}' is not one of {self.__available_optimizers}" 126 | ) 127 | session.headers.update(self.headers) 128 | payload = {"messages": [{"role": "user", "content": conversation_prompt}]} 129 | 130 | def for_stream(): 131 | response = session.post( 132 | self.chat_endpoint, json=payload, stream=True, timeout=self.timeout 133 | ) 134 | if not response.ok: 135 | raise exceptions.FailedToGenerateResponseError( 136 | f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}" 137 | ) 138 | for value in response.iter_lines( 139 | decode_unicode=True, 140 | delimiter="", 141 | chunk_size=self.stream_chunk_size, 142 | ): 143 | try: 144 | json_result: dict[str, str] = json.loads(value) 145 | message = json_result.get("message", "") 146 | clean_message = html.unescape(re.sub(r"<[^>]+>", "", message)) 147 | json_result["message"] = clean_message 148 | self.last_response.update(json_result) 149 | yield value if raw else json_result 150 | except json.decoder.JSONDecodeError: 151 | pass 152 | self.conversation.update_chat_history( 153 | prompt, self.get_message(self.last_response) 154 | ) 155 | 156 | def for_non_stream(): 157 | for _ in for_stream(): 158 | pass 159 | return self.last_response 160 | 161 | return for_stream() if stream else for_non_stream() 162 | 163 | def chat( 164 | self, 165 | prompt: str, 166 | stream: bool = False, 167 | optimizer: str = None, 168 | conversationally: bool = False, 169 | ) -> str: 170 | """Generate response `str` 171 | Args: 172 | prompt (str): Prompt to be send. 173 | stream (bool, optional): Flag for streaming response. Defaults to False. 174 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. 175 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. 176 | Returns: 177 | str: Response generated 178 | """ 179 | 180 | def for_stream(): 181 | for response in self.ask( 182 | prompt, True, optimizer=optimizer, conversationally=conversationally 183 | ): 184 | yield self.get_message(response) 185 | 186 | def for_non_stream(): 187 | return self.get_message( 188 | self.ask( 189 | prompt, 190 | False, 191 | optimizer=optimizer, 192 | conversationally=conversationally, 193 | ) 194 | ) 195 | 196 | return for_stream() if stream else for_non_stream() 197 | 198 | def get_message(self, response: dict) -> str: 199 | """Retrieves message only from response 200 | 201 | Args: 202 | response (dict): Response generated by `self.ask` 203 | 204 | Returns: 205 | str: Message extracted 206 | """ 207 | assert isinstance(response, dict), "Response should be of dict data-type only" 208 | return response.get("message", "") 209 | 210 | 211 | class AsyncAI4CHAT(AsyncProvider): 212 | def __init__( 213 | self, 214 | is_conversation: bool = True, 215 | max_tokens: int = 600, 216 | temperature: float = 1, 217 | presence_penalty: int = 0, 218 | frequency_penalty: int = 0, 219 | top_p: float = 1, 220 | model: str = model, 221 | timeout: int = 30, 222 | intro: str = None, 223 | filepath: str = None, 224 | update_file: bool = True, 225 | proxies: dict = {}, 226 | history_offset: int = 10250, 227 | act: str = None, 228 | ): 229 | """Instantiates AsyncAI4CHAT 230 | 231 | Args: 232 | is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True. 233 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600. 234 | temperature (float, optional): Charge of the generated text's randomness. Defaults to 1. 235 | presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0. 236 | frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0. 237 | top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999. 238 | model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo". 239 | timeout (int, optional): Http request timeout. Defaults to 30. 240 | intro (str, optional): Conversation introductory prompt. Defaults to None. 241 | filepath (str, optional): Path to file containing conversation history. Defaults to None. 242 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True. 243 | proxies (dict, optional): Http request proxies. Defaults to {}. 244 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250. 245 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None. 246 | """ 247 | self.is_conversation = is_conversation 248 | self.max_tokens_to_sample = max_tokens 249 | self.model = model 250 | self.temperature = temperature 251 | self.presence_penalty = presence_penalty 252 | self.frequency_penalty = frequency_penalty 253 | self.top_p = top_p 254 | self.chat_endpoint = "https://www.ai4chat.co/generate-response" 255 | self.stream_chunk_size = 64 256 | self.timeout = timeout 257 | self.last_response = {} 258 | self.headers = { 259 | "accept": "*/*", 260 | "accept-language": "en-US,en;q=0.9", 261 | "content-type": "application/json", 262 | "referer": "https://www.ai4chat.co/gpt/talkdirtytome", 263 | "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36", 264 | } 265 | 266 | self.__available_optimizers = ( 267 | method 268 | for method in dir(Optimizers) 269 | if callable(getattr(Optimizers, method)) and not method.startswith("__") 270 | ) 271 | Conversation.intro = ( 272 | AwesomePrompts().get_act( 273 | act, raise_not_found=True, default=None, case_insensitive=True 274 | ) 275 | if act 276 | else intro or Conversation.intro 277 | ) 278 | self.conversation = Conversation( 279 | is_conversation, self.max_tokens_to_sample, filepath, update_file 280 | ) 281 | self.conversation.history_offset = history_offset 282 | self.session = httpx.AsyncClient( 283 | headers=self.headers, 284 | proxies=proxies, 285 | ) 286 | 287 | async def ask( 288 | self, 289 | prompt: str, 290 | stream: bool = False, 291 | raw: bool = False, 292 | optimizer: str = None, 293 | conversationally: bool = False, 294 | ) -> dict | AsyncGenerator: 295 | """Chat with AI asynchronously. 296 | 297 | Args: 298 | prompt (str): Prompt to be send. 299 | stream (bool, optional): Flag for streaming response. Defaults to False. 300 | raw (bool, optional): Stream back raw response as received. Defaults to False. 301 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. 302 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. 303 | Returns: 304 | dict|AsyncGenerator : ai content. 305 | ```json 306 | { 307 | "message" : "How can I help you?" 308 | } 309 | ``` 310 | """ 311 | conversation_prompt = self.conversation.gen_complete_prompt(prompt) 312 | if optimizer: 313 | if optimizer in self.__available_optimizers: 314 | conversation_prompt = getattr(Optimizers, optimizer)( 315 | conversation_prompt if conversationally else prompt 316 | ) 317 | else: 318 | raise exceptions.UnsupportedOptimizer( 319 | f"Optimizer '{optimizer}' is not one of {self.__available_optimizers}" 320 | ) 321 | payload = {"messages": [{"role": "user", "content": conversation_prompt}]} 322 | 323 | async def for_stream(): 324 | async with self.session.stream( 325 | "POST", self.chat_endpoint, json=payload, timeout=self.timeout 326 | ) as response: 327 | if not response.is_success: 328 | raise Exception( 329 | f"Failed to generate response - ({response.status_code}, {response.reason_phrase})" 330 | ) 331 | 332 | async for value in response.aiter_lines(): 333 | try: 334 | json_result: dict[str, str] = json.loads(value) 335 | message = json_result.get("message", "") 336 | clean_message = html.unescape(re.sub(r"<[^>]+>", "", message)) 337 | json_result["message"] = clean_message 338 | self.last_response.update(json_result) 339 | yield value if raw else json_result 340 | 341 | except json.decoder.JSONDecodeError: 342 | pass 343 | self.conversation.update_chat_history( 344 | prompt, await self.get_message(self.last_response) 345 | ) 346 | 347 | async def for_non_stream(): 348 | async for _ in for_stream(): 349 | pass 350 | 351 | return self.last_response 352 | 353 | return for_stream() if stream else await for_non_stream() 354 | 355 | async def chat( 356 | self, 357 | prompt: str, 358 | stream: bool = False, 359 | optimizer: str = None, 360 | conversationally: bool = False, 361 | ) -> str | AsyncGenerator: 362 | """Generate response `str` asynchronously. 363 | Args: 364 | prompt (str): Prompt to be send. 365 | stream (bool, optional): Flag for streaming response. Defaults to False. 366 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. 367 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. 368 | Returns: 369 | str|AsyncGenerator: Response generated 370 | """ 371 | 372 | async def for_stream(): 373 | async_ask = await self.ask( 374 | prompt, True, optimizer=optimizer, conversationally=conversationally 375 | ) 376 | async for response in async_ask: 377 | yield await self.get_message(response) 378 | 379 | async def for_non_stream(): 380 | return await self.get_message( 381 | await self.ask( 382 | prompt, 383 | False, 384 | optimizer=optimizer, 385 | conversationally=conversationally, 386 | ) 387 | ) 388 | 389 | return for_stream() if stream else await for_non_stream() 390 | 391 | async def get_message(self, response: dict) -> str: 392 | """Retrieves message only from response asynchronously. 393 | 394 | Args: 395 | response (dict): Response generated by `self.ask` 396 | 397 | Returns: 398 | str: Message extracted 399 | """ 400 | assert isinstance(response, dict), "Response should be of dict data-type only" 401 | return response.get("message", "") 402 | -------------------------------------------------------------------------------- /src/pytgpt/api/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = [ 2 | "v1", 3 | ] 4 | 5 | from fastapi import FastAPI 6 | from fastapi.responses import RedirectResponse 7 | from fastapi.staticfiles import StaticFiles 8 | from pytgpt import __version__ 9 | from pytgpt.utils import api_static_dir 10 | from pydantic import BaseModel 11 | from datetime import datetime, UTC 12 | from . import v1 13 | 14 | app = FastAPI( 15 | title="python-tgpt", 16 | summary="Interact with AI without API key", 17 | description=( 18 | "For **text** generation, **text-to-image** and **text-to-audio** conversions." 19 | "\n\n" 20 | "Access redoc at [/redoc](/redoc) endpoint." 21 | "\n\n" 22 | "Other documentation is available at official repo : [Simatwa/python-tgpt](https://github.com/Simatwa/python-tgpt)." 23 | ), 24 | version=__version__, 25 | contact={ 26 | "name": "Smartwa", 27 | "email": "simatwacaleb@proton.me", 28 | "url": "https://simatwa.vercel.app", 29 | }, 30 | license_info={ 31 | "name": "GNU v3", 32 | "url": "https://github.com/Simatwa/python-tgpt/blob/main/LICENSE?raw=true", 33 | }, 34 | ) 35 | 36 | 37 | class ServerStatus(BaseModel): 38 | is_alive: bool = True 39 | as_at: datetime 40 | 41 | 42 | @app.get("/", name="redirect-to-docs", include_in_schema=False) 43 | async def home(): 44 | """Redirect to docs""" 45 | return RedirectResponse("/docs") 46 | 47 | 48 | @app.get("/status", tags=["status"]) 49 | async def server_status() -> ServerStatus: 50 | """Server running status 51 | - `is_alive` : status 52 | - `as_at` : Time checked. 53 | """ 54 | return ServerStatus(as_at=datetime.now(UTC)) 55 | 56 | 57 | app.include_router(v1.app, prefix="/v1", tags=["v1"]) 58 | 59 | app.mount("/static", StaticFiles(directory=api_static_dir), name="static") 60 | -------------------------------------------------------------------------------- /src/pytgpt/api/__main__.py: -------------------------------------------------------------------------------- 1 | from pytgpt.console import API 2 | 3 | API.run() 4 | -------------------------------------------------------------------------------- /src/pytgpt/api/utils.py: -------------------------------------------------------------------------------- 1 | from functools import wraps 2 | from typing import Callable 3 | from fastapi import HTTPException, status 4 | from requests.exceptions import ProxyError, InvalidProxyURL, SSLError 5 | 6 | get_exception_string: str = lambda e: ( 7 | e.args[1] if e.args and len(e.args) > 1 else str(e) 8 | ) 9 | 10 | 11 | def api_exception_handler(func: Callable): 12 | """Auto-handles common exceptions raised accordingly including proxy related. 13 | 14 | Args: 15 | func (Callable): FastAPI endpoint 16 | """ 17 | 18 | @wraps(func) 19 | async def decorator(*args, **kwargs): 20 | try: 21 | return await func(*args, **kwargs) 22 | 23 | except (ProxyError, InvalidProxyURL, SSLError) as e: 24 | raise HTTPException( 25 | status_code=status.HTTP_400_BAD_REQUEST, 26 | detail=dict(message=f"Proxy related error. {get_exception_string(e)}"), 27 | ) 28 | except Exception as e: 29 | raise HTTPException( 30 | status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, 31 | detail=dict(message=get_exception_string(e)), 32 | ) 33 | 34 | return decorator 35 | -------------------------------------------------------------------------------- /src/pytgpt/api/v1.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, HTTPException, status, Request 2 | from fastapi.responses import Response, StreamingResponse, RedirectResponse 3 | from fastapi.encoders import jsonable_encoder 4 | from json import dumps 5 | from pydantic import BaseModel, field_validator, PositiveInt 6 | from typing import Union, Any, AsyncGenerator 7 | from pytgpt import gpt4free_providers 8 | from uuid import uuid4 9 | from .utils import api_exception_handler 10 | 11 | # providers 12 | from pytgpt.gpt4free import AsyncGPT4FREE 13 | from pytgpt.auto import AsyncAUTO 14 | from pytgpt.imager import AsyncImager 15 | from pytgpt.imager import AsyncProdia 16 | from pytgpt.utils import Audio 17 | from pytgpt.async_providers import tgpt_mapper as provider_map 18 | from pytgpt.utils import api_static_image_dir 19 | 20 | provider_map.update({"auto": AsyncAUTO}) 21 | 22 | image_providers = {"default": AsyncImager, "prodia": AsyncProdia} 23 | 24 | supported_providers = list(provider_map.keys()) + gpt4free_providers 25 | 26 | app = APIRouter() 27 | 28 | 29 | class ProvidersModel(BaseModel): 30 | tgpt: list[str] = list(provider_map.keys()) 31 | g4f: list[str] = gpt4free_providers 32 | 33 | model_config = { 34 | "json_schema_extra": { 35 | "examples": [ 36 | { 37 | "tgpt": [ 38 | "phind", 39 | "opengpt", 40 | "koboldai", 41 | ], 42 | "g4f": [ 43 | "Koala", 44 | "Blackbox", 45 | "FreeChatgpt", 46 | ], 47 | } 48 | ] 49 | } 50 | } 51 | 52 | 53 | class TextGenerationPayload(BaseModel): 54 | prompt: str 55 | provider: str = "auto" 56 | # is_conversation: bool = False 57 | whole: bool = False 58 | max_tokens: PositiveInt = 600 59 | timeout: PositiveInt = 30 60 | proxy: Union[dict[str, str], None] = None 61 | 62 | model_config = { 63 | "json_schema_extra": { 64 | "examples": [ 65 | { 66 | "prompt": "Hello there", 67 | "provider": "auto", 68 | "whole": False, 69 | "max_tokens": 600, 70 | "timeout": 30, 71 | "proxy": { 72 | "http": "socks4://38.54.6.39:4000", 73 | "https": "socks4://38.54.6.39:4000", 74 | }, 75 | } 76 | ] 77 | } 78 | } 79 | 80 | @field_validator("provider") 81 | def validate_provider(provider: str) -> object: 82 | if provider not in supported_providers: 83 | raise HTTPException( 84 | status_code=status.HTTP_400_BAD_REQUEST, 85 | detail=dict( 86 | message=f"Provider '{provider}' is not one of [{', '.join(supported_providers)}]", 87 | ), 88 | ) 89 | return provider 90 | 91 | 92 | class ProviderResponse(BaseModel): 93 | """ 94 | - `provider` : Provider name that generated response. 95 | - `text` : Response to the prompt. 96 | - `detail` : Any other special info. 97 | - `model` : Model used to generate response. 98 | """ 99 | 100 | provider: Union[str, None] = None 101 | text: Union[str, None] = None 102 | body: Union[dict, None] = None 103 | detail: Union[Any, None] = None 104 | model: Union[str, None] = "default" 105 | 106 | model_config = { 107 | "json_schema_extra": { 108 | "examples": [ 109 | { 110 | "provider": "phind", 111 | "text": "How can I help you today?", 112 | "body": { 113 | "id": "chatcmpl-qnml7olyfeq5kw2r7pue", 114 | "object": "chat.completion.chunk", 115 | "created": 1712895194, 116 | "model": "trt-llm-phind-model-34b-8k-context", 117 | "choices": [ 118 | { 119 | "index": 0, 120 | "delta": {"content": "How can I help you today?"}, 121 | "finish_reason": None, 122 | } 123 | ], 124 | "detail": None, 125 | "model": None, 126 | }, 127 | "detail": "TypeError: NetworkError when attempting to fetch resource.", 128 | "model": "default", 129 | }, 130 | ], 131 | }, 132 | } 133 | 134 | 135 | class ImagePayload(BaseModel): 136 | prompt: str 137 | amount: PositiveInt = 1 138 | proxy: Union[dict[str, str], None] = None 139 | timeout: PositiveInt = 30 140 | provider: Union[str, None] = "default" 141 | 142 | model_config = { 143 | "json_schema_extra": { 144 | "examples": [ 145 | { 146 | "prompt": "Sunset view from ISS", 147 | "amount": 2, 148 | "proxy": { 149 | "http": "socks4://54.248.238.110:80", 150 | "https": "socks4://54.248.238.110:80", 151 | }, 152 | "timeout": 30, 153 | "provider": "default", 154 | }, 155 | { 156 | "prompt": "Developed Nairobi in 3050", 157 | "amount": 2, 158 | "proxy": None, 159 | "timeout": 30, 160 | }, 161 | ] 162 | } 163 | } 164 | 165 | @field_validator("amount") 166 | def validate_amount(amount: int) -> PositiveInt: 167 | if amount > 10: 168 | raise HTTPException( 169 | status_code=status.HTTP_400_BAD_REQUEST, 170 | detail=dict( 171 | message=f"Amount {amount} is out of range : 1-10", 172 | ), 173 | ) 174 | return amount 175 | 176 | @field_validator("provider") 177 | def validate_provider(provider: Union[str, None]) -> str: 178 | 179 | if provider is not None and not provider in image_providers: 180 | raise HTTPException( 181 | status_code=status.HTTP_400_BAD_REQUEST, 182 | detail=dict( 183 | message=f"Image provider '{provider}' is not one of [{', '.join(list(image_providers.keys()))}]", 184 | ), 185 | ) 186 | return "default" if provider is None else provider 187 | 188 | 189 | class ImageBytesPayload(BaseModel): 190 | prompt: str 191 | proxy: Union[dict[str, str], None] = None 192 | timeout: PositiveInt = 30 193 | provider: Union[str, None] = "default" 194 | 195 | model_config = { 196 | "json_schema_extra": { 197 | "examples": [ 198 | { 199 | "prompt": "Alan Walker performing", 200 | "proxy": { 201 | "http": "socks4://199.229.254.129:4145", 202 | "https": "socks4://199.229.254.129:4145", 203 | }, 204 | "timeout": 30, 205 | "provider": "default", 206 | }, 207 | {"prompt": "Developed Nairobi in 3050", "proxy": None, "timeout": 30}, 208 | ] 209 | } 210 | } 211 | 212 | @field_validator("provider") 213 | def validate_provider(provider: Union[str, None]) -> str: 214 | if provider is not None and not provider in image_providers: 215 | raise HTTPException( 216 | status_code=status.HTTP_400_BAD_REQUEST, 217 | detail=dict( 218 | message=f"Image provider '{provider}' is not one of [{', '.join(list(image_providers.keys()))}]", 219 | ), 220 | ) 221 | return "default" if provider is None else provider 222 | 223 | 224 | class ImageBytesResponse(BaseModel): 225 | image: bytes 226 | 227 | 228 | class ImageResponse(BaseModel): 229 | """ 230 | - `urls` : List of urls 231 | """ 232 | 233 | urls: list[str] 234 | 235 | model_config = { 236 | "json_schema_extra": { 237 | "examples": [ 238 | { 239 | "urls": [ 240 | "http://localhost:8000/static/images/80e374cc-4546-4650-8203-533a04a9c06a.jpeg", 241 | "http://localhost:8000/static/images/80e374cc-4546-4650-8203-533a04a9c06a_1.jpeg", 242 | ] 243 | } 244 | ] 245 | } 246 | } 247 | 248 | 249 | class TextToAudioPayload(BaseModel): 250 | message: str 251 | voice: Union[str, None] = "en-US-Wavenet-C" 252 | proxy: Union[dict[str, str], None] = None 253 | timeout: int = 30 254 | model_config = { 255 | "json_schema_extra": { 256 | "example": { 257 | "message": "There is a place for people like you.", 258 | "voice": "en-US-Wavenet-C", 259 | "proxy": { 260 | "http": "socks4://199.229.254.129:4145", 261 | "https": "socks4://199.229.254.129:4145", 262 | }, 263 | "timeout": 30, 264 | } 265 | } 266 | } 267 | 268 | @field_validator("voice") 269 | def validate_voice(voice) -> str: 270 | if not voice in Audio.all_voices: 271 | raise HTTPException( 272 | status_code=status.HTTP_400_BAD_REQUEST, 273 | detail=dict( 274 | message=f"Voice '{voice}' is not one of '[{', '.join(Audio.all_voices)}]" 275 | ), 276 | ) 277 | return "en-US-Wavenet-C" if not voice else voice 278 | 279 | 280 | class TextToAudioResponse(BaseModel): 281 | """ 282 | - `url` : Link to generated audio file. 283 | """ 284 | 285 | url: str 286 | 287 | model_config = { 288 | "json_schema_extra": { 289 | "example": { 290 | "url": "http://localhost:8000/static/audios/f9d4233f-9b78-4d87-bc27-5d2ab928f673.mp3", 291 | } 292 | } 293 | } 294 | 295 | 296 | async def init_provider(payload: TextGenerationPayload) -> object: 297 | return provider_map.get(payload.provider, AsyncGPT4FREE)( 298 | is_conversation=False, # payload.is_conversation, 299 | max_tokens=payload.max_tokens, 300 | timeout=payload.timeout, 301 | proxies=payload.proxy, 302 | ) 303 | 304 | 305 | @app.get( 306 | "/chat/providers", 307 | ) 308 | async def llm_providers() -> ProvidersModel: 309 | """LLM providers for text generation 310 | 311 | - `tgpt` : List of [python-tgpt](https://github.com/Simatwa/tgpt2)-based providers. 312 | - `g4f` : List of [gpt4free](https://github.com/xtekky/gpt4free)-based providers. 313 | 314 | **Warning** : Not all of *g4f-based* providers are functional. 315 | """ 316 | return ProvidersModel() 317 | 318 | 319 | @app.post("/chat/nostream", name="no-stream") 320 | @api_exception_handler 321 | async def non_stream(payload: TextGenerationPayload) -> ProviderResponse: 322 | """No response streaming. 323 | 324 | - `prompt` : User query. 325 | - `provider` : LLM provider name. 326 | - `whole` : Return whole response body instead of text only. 327 | - `max_tokens` : Maximum number of tokens to be generated upon completion. 328 | - `timeout` : Http request timeout. 329 | - `proxy` : Http request proxy. 330 | 331 | *Ensure `proxy` value is correct otherwise make it `null`* 332 | 333 | **NOTE** : Example values are modified for illustration purposes. 334 | """ 335 | provider_obj: AsyncGPT4FREE = await init_provider(payload) 336 | ai_generated_text: str = await provider_obj.chat(payload.prompt) 337 | return ProviderResponse( 338 | provider=( 339 | provider_obj.provider_name 340 | if payload.provider == "auto" 341 | else payload.provider 342 | ), 343 | text=ai_generated_text, 344 | body=provider_obj.last_response if payload.whole else None, 345 | ) 346 | 347 | 348 | async def generate_streaming_response(payload: TextGenerationPayload) -> AsyncGenerator: 349 | provider_obj = await init_provider(payload) 350 | async_chat = await provider_obj.chat(payload.prompt, stream=True) 351 | async for text in async_chat: 352 | response = ProviderResponse( 353 | provider=( 354 | provider_obj.provider_name 355 | if payload.provider == "auto" 356 | else payload.provider 357 | ), 358 | text=text, 359 | body=provider_obj.last_response if payload.whole else None, 360 | ) 361 | yield dumps(jsonable_encoder(response)) + "\n" 362 | 363 | 364 | @app.post("/chat/stream", name="stream", response_model=ProviderResponse) 365 | @api_exception_handler 366 | async def stream(payload: TextGenerationPayload) -> Any: 367 | """Stream back response as received. 368 | 369 | - `prompt` : User query. 370 | - `provider` : LLM provider name. 371 | - `whole` : Return whole response body instead of text only. 372 | - `max_tokens` : Maximum number of tokens to be generated upon completion. 373 | - `timeout` : Http request timeout. 374 | - `proxy` : Http request proxy. 375 | 376 | **NOTE** : 377 | - *Example values are modified for illustration purposes.* 378 | - *Ensure `proxy` value is correct otherwise make it `null`* 379 | """ 380 | return StreamingResponse( 381 | generate_streaming_response(payload), 382 | media_type="text/event-stream", 383 | ) 384 | 385 | 386 | @app.post("/image", name="prompt-to-image") 387 | @api_exception_handler 388 | async def generate_image(payload: ImagePayload, request: Request) -> ImageResponse: 389 | """Generate images from prompt 390 | 391 | - `prompt` : Image description 392 | - `amount` : Images to be generated. Maximum of 10. 393 | - `timeout` : Http request timeout. 394 | - `proxy` : Http request proxies. 395 | - `provider` : Image provider name ie. *[default, prodia]* 396 | 397 | **NOTE** : *Ensure `proxy` value is correct otherwise make it `null`* 398 | """ 399 | host = f"{request.url.scheme}://{request.url.netloc}" 400 | image_gen_obj = image_providers.get(payload.provider)( 401 | timeout=payload.timeout, proxies=payload.proxy 402 | ) 403 | image_generator = await image_gen_obj.generate( 404 | prompt=payload.prompt, amount=payload.amount, stream=True 405 | ) 406 | image_urls = await image_gen_obj.save( 407 | image_generator, 408 | name=uuid4().__str__(), 409 | dir=api_static_image_dir, 410 | filenames_prefix=f"{host}/static/images/", 411 | ) 412 | return ImageResponse(urls=image_urls) 413 | 414 | 415 | @app.post("/image/bytes", name="prompt-to-image (bytes)") 416 | @api_exception_handler 417 | async def generate_image(payload: ImageBytesPayload, request: Request) -> Response: 418 | """Generate images from prompt and return raw bytes 419 | 420 | - `prompt` : Image description 421 | - `timeout` : Http request timeout. 422 | - `proxy` : Http request proxies. 423 | - `provider` : Image provider name ie. *[default, prodia]* 424 | 425 | **Only one image is generated.** 426 | 427 | **NOTE** : *Ensure `proxy` value is correct otherwise make it `null`* 428 | """ 429 | image_gen_obj = image_providers.get(payload.provider)( 430 | timeout=payload.timeout, proxies=payload.proxy 431 | ) 432 | image_list = await image_gen_obj.generate(prompt=payload.prompt) 433 | response = Response( 434 | image_list[0], 435 | media_type=f"image/{image_gen_obj.image_extension}", 436 | ) 437 | response.headers["Content-Disposition"] = ( 438 | f"attachment; filename={payload.prompt[:25]+'...' if len(payload.prompt)>25 else payload.prompt}.{image_gen_obj.image_extension}" 439 | ) 440 | return response 441 | 442 | 443 | @app.get("/image/bytes", name="prompt-to-image (bytes)") 444 | @api_exception_handler 445 | async def generate_image_return_bytes( 446 | prompt: str, 447 | proxy: Union[str, None] = None, 448 | timeout: int = 30, 449 | provider: str = "default", 450 | ): 451 | """Generate images from prompt and return raw bytes 452 | 453 | - `prompt` : Image description 454 | - `timeout` : Http request timeout. 455 | - `proxy` : Http request proxies. 456 | - `provider` : Image provider name ie. *[default, prodia]* 457 | 458 | **Only one image is generated.** 459 | 460 | **NOTE** : *Ensure `proxy` value is correct otherwise make it `null`* 461 | """ 462 | image_gen_obj = image_providers.get(provider, "default")( 463 | timeout=timeout, proxies={"https": proxy} if proxy else {} 464 | ) 465 | image_list = await image_gen_obj.generate(prompt=prompt) 466 | response = Response( 467 | image_list[0], 468 | media_type=f"image/{image_gen_obj.image_extension}", 469 | ) 470 | response.headers["Content-Disposition"] = ( 471 | f"attachment; filename={prompt[:25]+'...' if len(prompt)>25 else prompt}.{image_gen_obj.image_extension}" 472 | ) 473 | return response 474 | 475 | 476 | @app.get("/image/bytes/redirect", name="prompt-to-image (bytes - redirect) ") 477 | @api_exception_handler 478 | async def redirect_image_generation(prompt: str): 479 | """Redirect image generation request to [pollinations.ai](https://pollinations.ai)""" 480 | return RedirectResponse( 481 | f"https://image.pollinations.ai/prompt/{prompt}", 482 | ) 483 | 484 | 485 | @app.post("/voice", name="text-to-voice") 486 | @api_exception_handler 487 | async def text_to_audio( 488 | payload: TextToAudioPayload, request: Request 489 | ) -> TextToAudioResponse: 490 | """Vocalize text 491 | 492 | - `message` : Text to be synthesised. 493 | - `voice` : The voice to use for speech synthesis. 494 | - `timeout` : Http request timeout in seconds. 495 | - `proxy` : Http request proxy. 496 | 497 | **NOTE** : *Ensure `proxy` value is correct otherwise make it `null`* 498 | """ 499 | host = f"{request.url.scheme}://{request.url.netloc}" 500 | filename = uuid4().__str__() + ".mp3" 501 | await Audio.async_text_to_audio( 502 | message=payload.message, 503 | voice=payload.voice, 504 | proxies=payload.proxy, 505 | timeout=payload.timeout, 506 | save_to=Audio.cache_dir.joinpath(filename).as_posix(), 507 | ) 508 | return TextToAudioResponse(url=f"{host}/static/audios/" + filename) 509 | 510 | 511 | @app.get("/voice", name="text-to-voice (bytes)") 512 | @api_exception_handler 513 | async def text_to_audio_bytes( 514 | message: str, 515 | voice: str = "en-US-Wavenet-C", 516 | timeout: int = 30, 517 | proxy: Union[str, None] = None, 518 | ): 519 | """Return raw audio 520 | 521 | - `message` : Text to be synthesised. 522 | - `voice` : The voice to use for speech synthesis. 523 | - `timeout` : Http request timeout in seconds. 524 | - `proxy` : Http request proxy. 525 | 526 | **NOTE** : *Ensure `proxy` value is correct otherwise make it `null`* 527 | """ 528 | image_bytes = await Audio.async_text_to_audio( 529 | message=message, 530 | voice=voice if voice in Audio.all_voices else "en-US-Wavenet-C", 531 | proxies={"https": proxy} if proxy else {}, 532 | timeout=timeout, 533 | ) 534 | return Response( 535 | content=image_bytes, 536 | media_type="audio/mpeg", 537 | headers={ 538 | "Content-Disposition": f"attachment; filename={uuid4().__str__()}.mp3" 539 | }, 540 | ) 541 | -------------------------------------------------------------------------------- /src/pytgpt/async_providers.py: -------------------------------------------------------------------------------- 1 | from pytgpt.phind import AsyncPHIND 2 | from pytgpt.openai import AsyncOPENAI 3 | from pytgpt.koboldai import AsyncKOBOLDAI 4 | from pytgpt.groq import AsyncGROQ 5 | from pytgpt.novita import AsyncNOVITA 6 | from pytgpt.ai4chat import AsyncAI4CHAT 7 | from pytgpt.gpt4free import AsyncGPT4FREE 8 | 9 | mapper: dict[str, object] = { 10 | "phind": AsyncPHIND, 11 | "koboldai": AsyncKOBOLDAI, 12 | "gpt4free": AsyncGPT4FREE, 13 | "groq": AsyncGROQ, 14 | "openai": AsyncOPENAI, 15 | "novita": AsyncNOVITA, 16 | } 17 | 18 | 19 | tgpt_mapper: dict[str, object] = { 20 | "phind": AsyncPHIND, 21 | "koboldai": AsyncKOBOLDAI, 22 | "ai4chat": AsyncAI4CHAT, 23 | } 24 | -------------------------------------------------------------------------------- /src/pytgpt/auto/__init__.py: -------------------------------------------------------------------------------- 1 | from .main import AUTO 2 | from .main import AsyncAUTO 3 | 4 | __info__ = "Interact with working tgpt-based providers" 5 | 6 | __all__ = ["AUTO", "AsyncAUTO"] 7 | -------------------------------------------------------------------------------- /src/pytgpt/auto/errors.py: -------------------------------------------------------------------------------- 1 | class AllProvidersFailure(Exception): 2 | """None of the providers generated response successfully""" 3 | 4 | pass 5 | -------------------------------------------------------------------------------- /src/pytgpt/auto/main.py: -------------------------------------------------------------------------------- 1 | from pytgpt.base import Provider, AsyncProvider 2 | from pytgpt.koboldai import KOBOLDAI, AsyncKOBOLDAI 3 | from pytgpt.phind import PHIND, AsyncPHIND 4 | from pytgpt.perplexity import PERPLEXITY 5 | from pytgpt.gpt4free import GPT4FREE, AsyncGPT4FREE 6 | from pytgpt.gpt4free.utils import TestProviders 7 | from pytgpt.auto.errors import AllProvidersFailure 8 | from pytgpt.utils import Conversation 9 | from pytgpt.async_providers import tgpt_mapper as async_provider_map 10 | from typing import AsyncGenerator 11 | 12 | from typing import Union 13 | from typing import Any 14 | import logging 15 | 16 | 17 | provider_map: dict[str, Union[KOBOLDAI, PHIND, PERPLEXITY, GPT4FREE]] = { 18 | "phind": PHIND, 19 | "perplexity": PERPLEXITY, 20 | "koboldai": KOBOLDAI, 21 | "gpt4free": GPT4FREE, 22 | } 23 | 24 | 25 | class AUTO(Provider): 26 | def __init__( 27 | self, 28 | is_conversation: bool = True, 29 | max_tokens: int = 600, 30 | timeout: int = 30, 31 | intro: str = None, 32 | filepath: str = None, 33 | update_file: bool = True, 34 | proxies: dict = {}, 35 | history_offset: int = 10250, 36 | act: str = None, 37 | exclude: list[str] = [], 38 | ): 39 | """Instantiates AUTO 40 | 41 | Args: 42 | is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True 43 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600. 44 | timeout (int, optional): Http request timeout. Defaults to 30. 45 | intro (str, optional): Conversation introductory prompt. Defaults to None. 46 | filepath (str, optional): Path to file containing conversation history. Defaults to None. 47 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True. 48 | proxies (dict, optional): Http request proxies. Defaults to {}. 49 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250. 50 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None. 51 | exclude(list[str], optional): List of providers to be excluded. Defaults to []. 52 | """ 53 | self.provider: Union[KOBOLDAI, PHIND, PERPLEXITY, GPT4FREE] = None 54 | self.provider_name: str = None 55 | self.is_conversation = is_conversation 56 | self.max_tokens = max_tokens 57 | self.timeout = timeout 58 | self.intro = intro 59 | self.filepath = filepath 60 | self.update_file = update_file 61 | self.proxies = proxies 62 | self.history_offset = history_offset 63 | self.act = act 64 | self.exclude = exclude 65 | 66 | @property 67 | def last_response(self) -> dict[str, Any]: 68 | return self.provider.last_response 69 | 70 | @property 71 | def conversation(self) -> Conversation: 72 | return self.provider.conversation 73 | 74 | def ask( 75 | self, 76 | prompt: str, 77 | stream: bool = False, 78 | raw: bool = False, 79 | optimizer: str = None, 80 | conversationally: bool = False, 81 | run_new_test: bool = False, 82 | ) -> dict: 83 | """Chat with AI 84 | 85 | Args: 86 | prompt (str): Prompt to be send. 87 | stream (bool, optional): Flag for streaming response. Defaults to False. 88 | raw (bool, optional): Stream back raw response as received. Defaults to False. 89 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. 90 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. 91 | run_new_test (bool, optional): Perform new test on g4f-based providers. Defaults to False. 92 | Returns: 93 | dict : {} 94 | """ 95 | ask_kwargs: dict[str, Union[str, bool]] = { 96 | "prompt": prompt, 97 | "stream": stream, 98 | "raw": raw, 99 | "optimizer": optimizer, 100 | "conversationally": conversationally, 101 | } 102 | 103 | # tgpt-based providers 104 | for provider_name, provider_obj in provider_map.items(): 105 | # continue 106 | if provider_name in self.exclude: 107 | continue 108 | try: 109 | self.provider_name = f"tgpt-{provider_name}" 110 | self.provider = provider_obj( 111 | is_conversation=self.is_conversation, 112 | max_tokens=self.max_tokens, 113 | timeout=self.timeout, 114 | intro=self.intro, 115 | filepath=self.filepath, 116 | update_file=self.update_file, 117 | proxies=self.proxies, 118 | history_offset=self.history_offset, 119 | act=self.act, 120 | ) 121 | 122 | def for_stream(): 123 | for chunk in self.provider.ask(**ask_kwargs): 124 | yield chunk 125 | 126 | def for_non_stream(): 127 | return self.provider.ask(**ask_kwargs) 128 | 129 | return for_stream() if stream else for_non_stream() 130 | 131 | except Exception as e: 132 | logging.debug( 133 | f"Failed to generate response using provider {provider_name} - {e}" 134 | ) 135 | 136 | # g4f-based providers 137 | 138 | for provider_info in TestProviders(timeout=self.timeout).get_results( 139 | run=run_new_test 140 | ): 141 | if provider_info["name"] in self.exclude: 142 | continue 143 | try: 144 | self.provider_name = f"g4f-{provider_info['name']}" 145 | self.provider = GPT4FREE( 146 | provider=provider_info["name"], 147 | is_conversation=self.is_conversation, 148 | max_tokens=self.max_tokens, 149 | intro=self.intro, 150 | filepath=self.filepath, 151 | update_file=self.update_file, 152 | proxies=self.proxies, 153 | history_offset=self.history_offset, 154 | act=self.act, 155 | ) 156 | 157 | def for_stream(): 158 | for chunk in self.provider.ask(**ask_kwargs): 159 | yield chunk 160 | 161 | def for_non_stream(): 162 | return self.provider.ask(**ask_kwargs) 163 | 164 | return for_stream() if stream else for_non_stream() 165 | 166 | except Exception as e: 167 | logging.debug( 168 | f"Failed to generate response using GPT4FREE-based provider {provider_name} - {e}" 169 | ) 170 | 171 | raise AllProvidersFailure( 172 | "None of the providers generated response successfully." 173 | ) 174 | 175 | def chat( 176 | self, 177 | prompt: str, 178 | stream: bool = False, 179 | optimizer: str = None, 180 | conversationally: bool = False, 181 | run_new_test: bool = False, 182 | ) -> str: 183 | """Generate response `str` 184 | Args: 185 | prompt (str): Prompt to be send. 186 | stream (bool, optional): Flag for streaming response. Defaults to False. 187 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. 188 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. 189 | run_new_test (bool, optional): Perform new test on g4f-based providers. Defaults to False. 190 | Returns: 191 | str: Response generated 192 | """ 193 | 194 | def for_stream(): 195 | for response in self.ask( 196 | prompt, 197 | True, 198 | optimizer=optimizer, 199 | conversationally=conversationally, 200 | run_new_test=run_new_test, 201 | ): 202 | yield self.get_message(response) 203 | 204 | def for_non_stream(): 205 | ask_response = self.ask( 206 | prompt, 207 | False, 208 | optimizer=optimizer, 209 | conversationally=conversationally, 210 | run_new_test=run_new_test, 211 | ) 212 | return self.get_message(ask_response) 213 | 214 | return for_stream() if stream else for_non_stream() 215 | 216 | def get_message(self, response: dict) -> str: 217 | """Retrieves message only from response 218 | 219 | Args: 220 | response (dict): Response generated by `self.ask` 221 | 222 | Returns: 223 | str: Message extracted 224 | """ 225 | assert self.provider is not None, "Chat with AI first" 226 | return self.provider.get_message(response) 227 | 228 | 229 | class AsyncAUTO(AsyncProvider): 230 | def __init__( 231 | self, 232 | is_conversation: bool = True, 233 | max_tokens: int = 600, 234 | timeout: int = 30, 235 | intro: str = None, 236 | filepath: str = None, 237 | update_file: bool = True, 238 | proxies: dict = {}, 239 | history_offset: int = 10250, 240 | act: str = None, 241 | exclude: list[str] = [], 242 | ): 243 | """Instantiates AsyncAUTO 244 | 245 | Args: 246 | is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True 247 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600. 248 | timeout (int, optional): Http request timeout. Defaults to 30. 249 | intro (str, optional): Conversation introductory prompt. Defaults to None. 250 | filepath (str, optional): Path to file containing conversation history. Defaults to None. 251 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True. 252 | proxies (dict, optional): Http request proxies. Defaults to {}. 253 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250. 254 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None. 255 | exclude(list[str], optional): List of providers to be excluded. Defaults to []. 256 | """ 257 | self.provider: Union[ 258 | AsyncKOBOLDAI, 259 | AsyncPHIND, 260 | AsyncGPT4FREE, 261 | ] = None 262 | self.provider_name: str = None 263 | self.is_conversation = is_conversation 264 | self.max_tokens = max_tokens 265 | self.timeout = timeout 266 | self.intro = intro 267 | self.filepath = filepath 268 | self.update_file = update_file 269 | self.proxies = proxies 270 | self.history_offset = history_offset 271 | self.act = act 272 | self.exclude = exclude 273 | 274 | @property 275 | def last_response(self) -> dict[str, Any]: 276 | return self.provider.last_response 277 | 278 | @property 279 | def conversation(self) -> Conversation: 280 | return self.provider.conversation 281 | 282 | async def ask( 283 | self, 284 | prompt: str, 285 | stream: bool = False, 286 | raw: bool = False, 287 | optimizer: str = None, 288 | conversationally: bool = False, 289 | run_new_test: bool = False, 290 | ) -> dict | AsyncGenerator: 291 | """Chat with AI asynchronously. 292 | 293 | Args: 294 | prompt (str): Prompt to be send. 295 | stream (bool, optional): Flag for streaming response. Defaults to False. 296 | raw (bool, optional): Stream back raw response as received. Defaults to False. 297 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. 298 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. 299 | run_new_test (bool, optional): Perform new test on g4f-based providers. Defaults to False. 300 | Returns: 301 | dict|AsyncGenerator : ai response. 302 | """ 303 | ask_kwargs: dict[str, Union[str, bool]] = { 304 | "prompt": prompt, 305 | "stream": stream, 306 | "raw": raw, 307 | "optimizer": optimizer, 308 | "conversationally": conversationally, 309 | } 310 | 311 | # tgpt-based providers 312 | for provider_name, provider_obj in async_provider_map.items(): 313 | if provider_name in self.exclude: 314 | continue 315 | try: 316 | self.provider_name = f"tgpt-{provider_name}" 317 | self.provider = provider_obj( 318 | is_conversation=self.is_conversation, 319 | max_tokens=self.max_tokens, 320 | timeout=self.timeout, 321 | intro=self.intro, 322 | filepath=self.filepath, 323 | update_file=self.update_file, 324 | proxies=self.proxies, 325 | history_offset=self.history_offset, 326 | act=self.act, 327 | ) 328 | 329 | async def for_stream(): 330 | async_ask = await self.provider.ask(**ask_kwargs) 331 | async for chunk in async_ask: 332 | yield chunk 333 | 334 | async def for_non_stream(): 335 | return await self.provider.ask(**ask_kwargs) 336 | 337 | return for_stream() if stream else await for_non_stream() 338 | 339 | except Exception as e: 340 | logging.debug( 341 | f"Failed to generate response using provider {provider_name} - {e}" 342 | ) 343 | 344 | # g4f-based providers 345 | 346 | for provider_info in TestProviders(timeout=self.timeout).get_results( 347 | run=run_new_test 348 | ): 349 | if provider_info["name"] in self.exclude: 350 | continue 351 | try: 352 | self.provider_name = f"g4f-{provider_info['name']}" 353 | self.provider = AsyncGPT4FREE( 354 | provider=provider_info["name"], 355 | is_conversation=self.is_conversation, 356 | max_tokens=self.max_tokens, 357 | intro=self.intro, 358 | filepath=self.filepath, 359 | update_file=self.update_file, 360 | proxies=self.proxies, 361 | history_offset=self.history_offset, 362 | act=self.act, 363 | ) 364 | 365 | async def for_stream(): 366 | async_ask = await self.provider.ask(**ask_kwargs) 367 | async for chunk in async_ask: 368 | yield chunk 369 | 370 | async def for_non_stream(): 371 | return await self.provider.ask(**ask_kwargs) 372 | 373 | return for_stream() if stream else await for_non_stream() 374 | 375 | except Exception as e: 376 | logging.debug( 377 | f"Failed to generate response using GPT4FREE-base provider {provider_name} - {e}" 378 | ) 379 | 380 | raise AllProvidersFailure( 381 | "None of the providers generated response successfully." 382 | ) 383 | 384 | async def chat( 385 | self, 386 | prompt: str, 387 | stream: bool = False, 388 | optimizer: str = None, 389 | conversationally: bool = False, 390 | run_new_test: bool = False, 391 | ) -> str | AsyncGenerator: 392 | """Generate response `str` asynchronously. 393 | Args: 394 | prompt (str): Prompt to be send. 395 | stream (bool, optional): Flag for streaming response. Defaults to False. 396 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. 397 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. 398 | run_new_test (bool, optional): Perform new test on g4f-based providers. Defaults to False. 399 | Returns: 400 | str|AsyncGenerator: Response generated 401 | """ 402 | 403 | async def for_stream(): 404 | async_ask = await self.ask( 405 | prompt, 406 | True, 407 | optimizer=optimizer, 408 | conversationally=conversationally, 409 | run_new_test=run_new_test, 410 | ) 411 | async for response in async_ask: 412 | yield await self.get_message(response) 413 | 414 | async def for_non_stream(): 415 | ask_response = await self.ask( 416 | prompt, 417 | False, 418 | optimizer=optimizer, 419 | conversationally=conversationally, 420 | run_new_test=run_new_test, 421 | ) 422 | return await self.get_message(ask_response) 423 | 424 | return for_stream() if stream else await for_non_stream() 425 | 426 | async def get_message(self, response: dict) -> str: 427 | """Retrieves message only from response 428 | 429 | Args: 430 | response (dict): Response generated by `self.ask` 431 | 432 | Returns: 433 | str: Message extracted 434 | """ 435 | assert self.provider is not None, "Chat with AI first" 436 | return await self.provider.get_message(response) 437 | 438 | 439 | if __name__ == "__main__": 440 | import asyncio 441 | 442 | async def main(): 443 | ai = AsyncAUTO() 444 | async_chat = await ai.chat("hello there", True) 445 | async for response in async_chat: 446 | print(response) 447 | 448 | asyncio.run(main()) 449 | -------------------------------------------------------------------------------- /src/pytgpt/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | from abc import abstractmethod 3 | 4 | 5 | class Provider(ABC): 6 | """Base class for providers""" 7 | 8 | @abstractmethod 9 | def ask( 10 | self, 11 | prompt: str, 12 | stream: bool = False, 13 | raw: bool = False, 14 | optimizer: str = None, 15 | conversationally: bool = False, 16 | ) -> dict: 17 | """Chat with AI 18 | 19 | Args: 20 | prompt (str): Prompt to be sent 21 | stream (bool, optional): Flag for streaming response. Defaults to False. 22 | raw (bool, optional): Stream back raw response as received 23 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]` 24 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. 25 | Returns: 26 | dict : {} 27 | ```json 28 | { 29 | "completion": "\nNext: domestic cat breeds with short hair >>", 30 | "stop_reason": null, 31 | "truncated": false, 32 | "stop": null, 33 | "model": "llama-2-13b-chat", 34 | "log_id": "cmpl-3kYiYxSNDvgMShSzFooz6t", 35 | "exception": null 36 | } 37 | ``` 38 | """ 39 | raise NotImplementedError("Method needs to be implemented in subclass") 40 | 41 | @abstractmethod 42 | def chat( 43 | self, 44 | prompt: str, 45 | stream: bool = False, 46 | optimizer: str = None, 47 | conversationally: bool = False, 48 | ) -> str: 49 | """Generate response `str` 50 | Args: 51 | prompt (str): Prompt to be sent 52 | stream (bool, optional): Flag for streaming response. Defaults to False. 53 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]` 54 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. 55 | Returns: 56 | str: Response generated 57 | """ 58 | raise NotImplementedError("Method needs to be implemented in subclass") 59 | 60 | @abstractmethod 61 | def get_message(self, response: dict) -> str: 62 | """Retrieves message only from response 63 | 64 | Args: 65 | response (dict): Response generated by `self.ask` 66 | 67 | Returns: 68 | str: Message extracted 69 | """ 70 | raise NotImplementedError("Method needs to be implemented in subclass") 71 | 72 | 73 | class AsyncProvider(ABC): 74 | """Asynchronous base class for providers""" 75 | 76 | @abstractmethod 77 | async def ask( 78 | self, 79 | prompt: str, 80 | stream: bool = False, 81 | raw: bool = False, 82 | optimizer: str = None, 83 | conversationally: bool = False, 84 | ) -> dict: 85 | """Asynchronously chat with AI 86 | 87 | Args: 88 | prompt (str): Prompt to be sent 89 | stream (bool, optional): Flag for streaming response. Defaults to False. 90 | raw (bool, optional): Stream back raw response as received 91 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]` 92 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. 93 | Returns: 94 | dict : {} 95 | ```json 96 | { 97 | "completion": "\nNext: domestic cat breeds with short hair >>", 98 | "stop_reason": null, 99 | "truncated": false, 100 | "stop": null, 101 | "model": "llama-2-13b-chat", 102 | "log_id": "cmpl-3kYiYxSNDvgMShSzFooz6t", 103 | "exception": null 104 | } 105 | ``` 106 | """ 107 | raise NotImplementedError("Method needs to be implemented in subclass") 108 | 109 | @abstractmethod 110 | async def chat( 111 | self, 112 | prompt: str, 113 | stream: bool = False, 114 | optimizer: str = None, 115 | conversationally: bool = False, 116 | ) -> str: 117 | """Asynchronously generate response `str` 118 | Args: 119 | prompt (str): Prompt to be sent 120 | stream (bool, optional): Flag for streaming response. Defaults to False. 121 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]` 122 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. 123 | Returns: 124 | str: Response generated 125 | """ 126 | raise NotImplementedError("Method needs to be implemented in subclass") 127 | 128 | @abstractmethod 129 | async def get_message(self, response: dict) -> str: 130 | """Asynchronously retrieves message only from response 131 | 132 | Args: 133 | response (dict): Response generated by `self.ask` 134 | 135 | Returns: 136 | str: Message extracted 137 | """ 138 | raise NotImplementedError("Method needs to be implemented in subclass") 139 | -------------------------------------------------------------------------------- /src/pytgpt/deepseek/__init__.py: -------------------------------------------------------------------------------- 1 | from pytgpt.deepseek.main import DEEPSEEK, AsyncDEEPSEEK 2 | from pytgpt.openai.main import session 3 | 4 | 5 | __info__ = "Interact with DeepSeek AI models. " "API key is required" 6 | 7 | __all__ = ["DEEPSEEK", "AsyncDEEPSEEK", "session"] 8 | -------------------------------------------------------------------------------- /src/pytgpt/deepseek/main.py: -------------------------------------------------------------------------------- 1 | from pytgpt.openai import AsyncOPENAI, OPENAI 2 | 3 | model = "deepseek-chat" 4 | """Default model""" 5 | 6 | available_models = ("deepseek-reasoner", model) 7 | 8 | 9 | class DEEPSEEK(OPENAI): 10 | 11 | def __init__(self, *args, **kwargs): 12 | kwargs.setdefault("model", model) 13 | super().__init__(*args, **kwargs) 14 | self.chat_endpoint = "https://api.deepseek.com/chat/completions" 15 | 16 | 17 | class AsyncDEEPSEEK(AsyncOPENAI): 18 | 19 | def __init__(self, *args, **kwargs): 20 | kwargs.setdefault("model", model) 21 | super().__init__(*args, **kwargs) 22 | self.chat_endpoint = "https://api.deepseek.com/chat/completions" 23 | -------------------------------------------------------------------------------- /src/pytgpt/exceptions.py: -------------------------------------------------------------------------------- 1 | class FailedToGenerateResponseError(Exception): 2 | """Provider failed to fetch response""" 3 | 4 | 5 | class UnsupportedModelError(Exception): 6 | """Model passed is not supported by the provider""" 7 | 8 | 9 | class UnsupportedOptimizer(Exception): 10 | """Unknown optimizer passed""" 11 | -------------------------------------------------------------------------------- /src/pytgpt/gpt4all/__init__.py: -------------------------------------------------------------------------------- 1 | from .main import GPT4ALL 2 | 3 | __info__ = "Interact with offline models" 4 | 5 | __all__ = [ 6 | "GPT4ALL", 7 | ] 8 | -------------------------------------------------------------------------------- /src/pytgpt/gpt4all/main.py: -------------------------------------------------------------------------------- 1 | from pytgpt.utils import Optimizers 2 | from pytgpt.utils import Conversation 3 | from pytgpt.utils import AwesomePrompts 4 | from pytgpt.base import Provider 5 | from gpt4all import GPT4All 6 | from gpt4all.gpt4all import empty_chat_session 7 | from gpt4all.gpt4all import append_extension_if_missing 8 | 9 | 10 | import logging 11 | 12 | my_logger = logging.getLogger("gpt4all") 13 | my_logger.setLevel(logging.CRITICAL) 14 | 15 | 16 | class GPT4ALL(Provider): 17 | def __init__( 18 | self, 19 | model: str, 20 | is_conversation: bool = True, 21 | max_tokens: int = 800, 22 | temperature: float = 0.7, 23 | presence_penalty: int = 0, 24 | frequency_penalty: int = 1.18, 25 | top_p: float = 0.4, 26 | intro: str = None, 27 | filepath: str = None, 28 | update_file: bool = True, 29 | history_offset: int = 10250, 30 | act: str = None, 31 | ): 32 | """Instantiates GPT4ALL 33 | 34 | Args: 35 | model (str, optional): Path to LLM model (.gguf or .bin). 36 | is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True. 37 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 800. 38 | temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.7. 39 | presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0. 40 | frequency_penalty (int, optional): Chances of word being repeated. Defaults to 1.18. 41 | top_p (float, optional): Sampling threshold during inference time. Defaults to 0.4. 42 | intro (str, optional): Conversation introductory prompt. Defaults to None. 43 | filepath (str, optional): Path to file containing conversation history. Defaults to None. 44 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True. 45 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250. 46 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None. 47 | """ 48 | self.is_conversation = is_conversation 49 | self.max_tokens_to_sample = max_tokens 50 | self.model = model 51 | self.temperature = temperature 52 | self.presence_penalty = presence_penalty 53 | self.frequency_penalty = frequency_penalty 54 | self.top_p = top_p 55 | self.last_response = {} 56 | 57 | self.__available_optimizers = ( 58 | method 59 | for method in dir(Optimizers) 60 | if callable(getattr(Optimizers, method)) and not method.startswith("__") 61 | ) 62 | Conversation.intro = ( 63 | AwesomePrompts().get_act( 64 | act, raise_not_found=True, default=None, case_insensitive=True 65 | ) 66 | if act 67 | else intro or Conversation.intro 68 | ) 69 | self.conversation = Conversation( 70 | is_conversation, self.max_tokens_to_sample, filepath, update_file 71 | ) 72 | self.conversation.history_offset = history_offset 73 | 74 | def get_model_name_path(): 75 | import os 76 | from pathlib import Path 77 | 78 | initial_model_path = Path(append_extension_if_missing(model)) 79 | if initial_model_path.exists: 80 | if not initial_model_path.is_absolute(): 81 | initial_model_path = Path(os.getcwd()) / initial_model_path 82 | return os.path.split(initial_model_path.as_posix()) 83 | else: 84 | raise FileNotFoundError( 85 | "File does not exist " + initial_model_path.as_posix() 86 | ) 87 | 88 | model_dir, model_name = get_model_name_path() 89 | 90 | self.gpt4all = GPT4All( 91 | model_name=model_name, 92 | model_path=model_dir, 93 | allow_download=False, 94 | verbose=False, 95 | ) 96 | 97 | def ask( 98 | self, 99 | prompt: str, 100 | stream: bool = False, 101 | raw: bool = False, 102 | optimizer: str = None, 103 | conversationally: bool = False, 104 | ) -> dict: 105 | """Chat with AI 106 | 107 | Args: 108 | prompt (str): Prompt to be send. 109 | stream (bool, optional): Flag for streaming response. Defaults to False. 110 | raw (bool, optional): Stream back raw response as received. Defaults to False. 111 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. 112 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. 113 | Returns: 114 | dict : {} 115 | ```json 116 | { 117 | "text" : "How may I help you today?" 118 | } 119 | ``` 120 | """ 121 | conversation_prompt = self.conversation.gen_complete_prompt(prompt) 122 | if optimizer: 123 | if optimizer in self.__available_optimizers: 124 | conversation_prompt = getattr(Optimizers, optimizer)( 125 | conversation_prompt if conversationally else prompt 126 | ) 127 | else: 128 | raise Exception( 129 | f"Optimizer is not one of {self.__available_optimizers}" 130 | ) 131 | 132 | def for_stream(): 133 | response = self.gpt4all.generate( 134 | prompt=conversation_prompt, 135 | max_tokens=self.max_tokens_to_sample, 136 | temp=self.temperature, 137 | top_p=self.top_p, 138 | repeat_penalty=self.frequency_penalty, 139 | streaming=True, 140 | ) 141 | 142 | message_load: str = "" 143 | for token in response: 144 | message_load += token 145 | resp: dict = dict(text=message_load) 146 | yield token if raw else resp 147 | self.last_response.update(resp) 148 | 149 | self.conversation.update_chat_history( 150 | prompt, self.get_message(self.last_response) 151 | ) 152 | self.gpt4all.current_chat_session = empty_chat_session() 153 | 154 | def for_non_stream(): 155 | for _ in for_stream(): 156 | pass 157 | return self.last_response 158 | 159 | return for_stream() if stream else for_non_stream() 160 | 161 | def chat( 162 | self, 163 | prompt: str, 164 | stream: bool = False, 165 | optimizer: str = None, 166 | conversationally: bool = False, 167 | ) -> str: 168 | """Generate response `str` 169 | Args: 170 | prompt (str): Prompt to be send. 171 | stream (bool, optional): Flag for streaming response. Defaults to False. 172 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. 173 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. 174 | Returns: 175 | str: Response generated 176 | """ 177 | 178 | def for_stream(): 179 | for response in self.ask( 180 | prompt, True, optimizer=optimizer, conversationally=conversationally 181 | ): 182 | yield self.get_message(response) 183 | 184 | def for_non_stream(): 185 | return self.get_message( 186 | self.ask( 187 | prompt, 188 | False, 189 | optimizer=optimizer, 190 | conversationally=conversationally, 191 | ) 192 | ) 193 | 194 | return for_stream() if stream else for_non_stream() 195 | 196 | def get_message(self, response: dict) -> str: 197 | """Retrieves message only from response 198 | 199 | Args: 200 | response (str): Response generated by `self.ask` 201 | 202 | Returns: 203 | str: Message extracted 204 | """ 205 | assert isinstance(response, dict), "Response should be of dict data-type only" 206 | return response["text"] 207 | -------------------------------------------------------------------------------- /src/pytgpt/gpt4free/__init__.py: -------------------------------------------------------------------------------- 1 | from .main import GPT4FREE 2 | from .main import AsyncGPT4FREE 3 | from .main import working_providers 4 | 5 | from .main import default_models 6 | 7 | from .main import completion_allowed_models 8 | 9 | __info__ = "Interact with various free ai providers bundled by https://github.com/xtekky/gpt4free" 10 | 11 | all = [ 12 | "GPT4FREE", 13 | "AsyncGPT4FREE", 14 | "working_providers", 15 | "default_models", 16 | "completion_allowed_models", 17 | ] 18 | -------------------------------------------------------------------------------- /src/pytgpt/gpt4free/main.py: -------------------------------------------------------------------------------- 1 | from pytgpt.utils import Optimizers 2 | from pytgpt.utils import Conversation 3 | from pytgpt.utils import AwesomePrompts 4 | from pytgpt.base import Provider, AsyncProvider 5 | from pytgpt import available_providers 6 | import g4f 7 | import os 8 | import pytgpt.exceptions as exceptions 9 | from typing import AsyncGenerator 10 | 11 | g4f.debug.version_check = False 12 | 13 | working_providers = available_providers 14 | 15 | completion_allowed_models = [ 16 | "code-davinci-002", 17 | "text-ada-001", 18 | "text-babbage-001", 19 | "text-curie-001", 20 | "text-davinci-002", 21 | "text-davinci-003", 22 | ] 23 | 24 | default_models = { 25 | "completion": "text-davinci-003", 26 | "chat_completion": "gpt-3.5-turbo", 27 | } 28 | 29 | default_provider = "Koala" 30 | 31 | 32 | class GPT4FREE(Provider): 33 | def __init__( 34 | self, 35 | provider: str = default_provider, 36 | is_conversation: bool = True, 37 | auth: str = None, 38 | max_tokens: int = 600, 39 | model: str = None, 40 | chat_completion: bool = False, 41 | ignore_working: bool = False, 42 | timeout: int = 30, 43 | intro: str = None, 44 | filepath: str = None, 45 | update_file: bool = True, 46 | proxies: dict = {}, 47 | history_offset: int = 10250, 48 | act: str = None, 49 | ): 50 | """Initialies GPT4FREE 51 | 52 | Args: 53 | provider (str, optional): gpt4free based provider name. Defaults to Koala. 54 | is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True. 55 | auth (str, optional): Authentication value for the provider incase it needs. Defaults to None. 56 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600. 57 | model (str, optional): LLM model name. Defaults to text-davinci-003|gpt-3.5-turbo. 58 | chat_completion(bool, optional): Provide native auto-contexting (conversationally). Defaults to False. 59 | ignore_working (bool, optional): Ignore working status of the provider. Defaults to False. 60 | timeout (int, optional): Http request timeout. Defaults to 30. 61 | intro (str, optional): Conversation introductory prompt. Defaults to None. 62 | filepath (str, optional): Path to file containing conversation history. Defaults to None. 63 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True. 64 | proxies (dict, optional): Http request proxies. Defaults to {}. 65 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250. 66 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None. 67 | """ 68 | assert provider in available_providers, ( 69 | f"Provider '{provider}' is not yet supported. " 70 | f"Try others like {', '.join(available_providers)}" 71 | ) 72 | if model is None: 73 | model = ( 74 | default_models["chat_completion"] 75 | if chat_completion 76 | else default_models["completion"] 77 | ) 78 | 79 | elif not chat_completion: 80 | assert model in completion_allowed_models, ( 81 | f"Model '{model}' is not yet supported for completion. " 82 | f"Try other models like {', '.join(completion_allowed_models)}" 83 | ) 84 | self.is_conversation = is_conversation 85 | self.max_tokens_to_sample = max_tokens 86 | self.stream_chunk_size = 64 87 | self.timeout = timeout 88 | self.last_response = {} 89 | 90 | self.__available_optimizers = ( 91 | method 92 | for method in dir(Optimizers) 93 | if callable(getattr(Optimizers, method)) and not method.startswith("__") 94 | ) 95 | Conversation.intro = ( 96 | AwesomePrompts().get_act( 97 | act, raise_not_found=True, default=None, case_insensitive=True 98 | ) 99 | if act 100 | else intro or Conversation.intro 101 | ) 102 | self.conversation = Conversation( 103 | False if chat_completion else is_conversation, 104 | self.max_tokens_to_sample, 105 | filepath, 106 | update_file, 107 | ) 108 | self.conversation.history_offset = history_offset 109 | self.model = model 110 | self.provider = provider 111 | self.chat_completion = chat_completion 112 | self.ignore_working = ignore_working 113 | self.auth = auth 114 | self.proxy = ( 115 | os.getenv("https_proxy", None) if not proxies else list(proxies.values())[0] 116 | ) 117 | 118 | self.__chat_class = g4f.ChatCompletion if chat_completion else g4f.Completion 119 | 120 | def __str__(self): 121 | return f"GPTFREE(provider='{self.provider}')" 122 | 123 | def ask( 124 | self, 125 | prompt: str, 126 | stream: bool = False, 127 | raw: bool = False, 128 | optimizer: str = None, 129 | conversationally: bool = False, 130 | ) -> dict: 131 | """Chat with AI 132 | 133 | Args: 134 | prompt (str): Prompt to be send. 135 | stream (bool, optional): Flag for streaming response. Defaults to False. 136 | raw (bool, optional): Stream back raw response as received. Defaults to False. 137 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. 138 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. 139 | Returns: 140 | dict : {} 141 | ```json 142 | { 143 | "text" : "How may I help you today?" 144 | } 145 | ``` 146 | """ 147 | conversation_prompt = self.conversation.gen_complete_prompt(prompt) 148 | if optimizer: 149 | if optimizer in self.__available_optimizers: 150 | conversation_prompt = getattr(Optimizers, optimizer)( 151 | conversation_prompt if conversationally else prompt 152 | ) 153 | else: 154 | raise Exception( 155 | f"Optimizer is not one of {self.__available_optimizers}" 156 | ) 157 | 158 | def payload(): 159 | if self.chat_completion: 160 | return dict( 161 | model=self.model, 162 | provider=self.provider, # g4f.Provider.Aichat, 163 | messages=[{"role": "user", "content": conversation_prompt}], 164 | stream=stream, 165 | ignore_working=self.ignore_working, 166 | auth=self.auth, 167 | proxy=self.proxy, 168 | timeout=self.timeout, 169 | ) 170 | 171 | else: 172 | return dict( 173 | model=self.model, 174 | prompt=conversation_prompt, 175 | provider=self.provider, 176 | stream=stream, 177 | ignore_working=self.ignore_working, 178 | auth=self.auth, 179 | proxy=self.proxy, 180 | timeout=self.timeout, 181 | ) 182 | 183 | def format_response(response): 184 | return dict(text=response) 185 | 186 | def for_stream(): 187 | previous_chunks = "" 188 | response = self.__chat_class.create(**payload()) 189 | 190 | for chunk in response: 191 | previous_chunks += chunk 192 | formatted_resp = format_response(previous_chunks) 193 | self.last_response.update(formatted_resp) 194 | yield previous_chunks if raw else formatted_resp 195 | 196 | self.conversation.update_chat_history( 197 | prompt, 198 | previous_chunks, 199 | ) 200 | 201 | def for_non_stream(): 202 | response = self.__chat_class.create(**payload()) 203 | formatted_resp = format_response(response) 204 | 205 | self.last_response.update(formatted_resp) 206 | self.conversation.update_chat_history(prompt, response) 207 | 208 | return response if raw else formatted_resp 209 | 210 | return for_stream() if stream else for_non_stream() 211 | 212 | def chat( 213 | self, 214 | prompt: str, 215 | stream: bool = False, 216 | optimizer: str = None, 217 | conversationally: bool = False, 218 | ) -> str: 219 | """Generate response `str` 220 | Args: 221 | prompt (str): Prompt to be send. 222 | stream (bool, optional): Flag for streaming response. Defaults to False. 223 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. 224 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. 225 | Returns: 226 | str: Response generated 227 | """ 228 | 229 | def for_stream(): 230 | for response in self.ask( 231 | prompt, True, optimizer=optimizer, conversationally=conversationally 232 | ): 233 | yield self.get_message(response) 234 | 235 | def for_non_stream(): 236 | return self.get_message( 237 | self.ask( 238 | prompt, 239 | False, 240 | optimizer=optimizer, 241 | conversationally=conversationally, 242 | ) 243 | ) 244 | 245 | return for_stream() if stream else for_non_stream() 246 | 247 | def get_message(self, response: dict) -> str: 248 | """Retrieves message only from response 249 | 250 | Args: 251 | response (dict): Response generated by `self.ask` 252 | 253 | Returns: 254 | str: Message extracted 255 | """ 256 | assert isinstance(response, dict), "Response should be of dict data-type only" 257 | return response["text"] 258 | 259 | 260 | class AsyncGPT4FREE(AsyncProvider): 261 | def __init__( 262 | self, 263 | provider: str = default_provider, 264 | is_conversation: bool = True, 265 | auth: str = None, 266 | max_tokens: int = 600, 267 | model: str = None, 268 | ignore_working: bool = False, 269 | timeout: int = 30, 270 | intro: str = None, 271 | filepath: str = None, 272 | update_file: bool = True, 273 | proxies: dict = {}, 274 | history_offset: int = 10250, 275 | act: str = None, 276 | ): 277 | """Initialies GPT4FREE 278 | 279 | Args: 280 | provider (str, optional): gpt4free based provider name. Defaults to Koala. 281 | is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True. 282 | auth (str, optional): Authentication value for the provider incase it needs. Defaults to None. 283 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600. 284 | model (str, optional): LLM model name. Defaults to text-davinci-003|gpt-3.5-turbo. 285 | ignore_working (bool, optional): Ignore working status of the provider. Defaults to False. 286 | timeout (int, optional): Http request timeout. Defaults to 30. 287 | intro (str, optional): Conversation introductory prompt. Defaults to None. 288 | filepath (str, optional): Path to file containing conversation history. Defaults to None. 289 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True. 290 | proxies (dict, optional): Http request proxies. Defaults to {}. 291 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250. 292 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None. 293 | """ 294 | assert provider in available_providers, ( 295 | f"Provider '{provider}' is not yet supported. " 296 | f"Try others like {', '.join(available_providers)}" 297 | ) 298 | if model is None: 299 | model = default_models["chat_completion"] 300 | 301 | self.is_conversation = is_conversation 302 | self.max_tokens_to_sample = max_tokens 303 | self.stream_chunk_size = 64 304 | self.timeout = timeout 305 | self.last_response = {} 306 | 307 | self.__available_optimizers = ( 308 | method 309 | for method in dir(Optimizers) 310 | if callable(getattr(Optimizers, method)) and not method.startswith("__") 311 | ) 312 | Conversation.intro = ( 313 | AwesomePrompts().get_act( 314 | act, raise_not_found=True, default=None, case_insensitive=True 315 | ) 316 | if act 317 | else intro or Conversation.intro 318 | ) 319 | self.conversation = Conversation( 320 | is_conversation, 321 | self.max_tokens_to_sample, 322 | filepath, 323 | update_file, 324 | ) 325 | self.conversation.history_offset = history_offset 326 | self.model = model 327 | self.provider = provider 328 | self.ignore_working = ignore_working 329 | self.auth = auth 330 | self.proxy = None if not proxies else list(proxies.values())[0] 331 | 332 | def __str__(self): 333 | return f"AsyncGPTFREE(provider={self.provider})" 334 | 335 | async def ask( 336 | self, 337 | prompt: str, 338 | stream: bool = False, 339 | raw: bool = False, 340 | optimizer: str = None, 341 | conversationally: bool = False, 342 | ) -> dict | AsyncGenerator: 343 | """Chat with AI asynchronously. 344 | 345 | Args: 346 | prompt (str): Prompt to be send. 347 | stream (bool, optional): Flag for streaming response. Defaults to False. 348 | raw (bool, optional): Stream back raw response as received. Defaults to False. 349 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. 350 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. 351 | Returns: 352 | dict|AsyncGenerator : ai content 353 | ```json 354 | { 355 | "text" : "How may I help you today?" 356 | } 357 | ``` 358 | """ 359 | conversation_prompt = self.conversation.gen_complete_prompt(prompt) 360 | if optimizer: 361 | if optimizer in self.__available_optimizers: 362 | conversation_prompt = getattr(Optimizers, optimizer)( 363 | conversation_prompt if conversationally else prompt 364 | ) 365 | else: 366 | raise Exception( 367 | f"Optimizer is not one of {self.__available_optimizers}" 368 | ) 369 | 370 | payload = dict( 371 | model=self.model, 372 | provider=self.provider, # g4f.Provider.Aichat, 373 | messages=[{"role": "user", "content": conversation_prompt}], 374 | stream=True, 375 | ignore_working=self.ignore_working, 376 | auth=self.auth, 377 | proxy=self.proxy, 378 | timeout=self.timeout, 379 | ) 380 | 381 | async def format_response(response): 382 | return dict(text=response) 383 | 384 | async def for_stream(): 385 | previous_chunks = "" 386 | response = g4f.ChatCompletion.create_async(**payload) 387 | 388 | async for chunk in response: 389 | previous_chunks += chunk 390 | formatted_resp = await format_response(previous_chunks) 391 | self.last_response.update(formatted_resp) 392 | yield previous_chunks if raw else formatted_resp 393 | 394 | self.conversation.update_chat_history( 395 | prompt, 396 | previous_chunks, 397 | ) 398 | 399 | async def for_non_stream(): 400 | async for _ in for_stream(): 401 | pass 402 | return self.last_response 403 | 404 | return for_stream() if stream else await for_non_stream() 405 | 406 | async def chat( 407 | self, 408 | prompt: str, 409 | stream: bool = False, 410 | optimizer: str = None, 411 | conversationally: bool = False, 412 | ) -> dict | AsyncGenerator: 413 | """Generate response `str` asynchronously. 414 | Args: 415 | prompt (str): Prompt to be send. 416 | stream (bool, optional): Flag for streaming response. Defaults to False. 417 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. 418 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. 419 | Returns: 420 | str|AsyncGenerator: Response generated 421 | """ 422 | 423 | async def for_stream(): 424 | async_ask = await self.ask( 425 | prompt, True, optimizer=optimizer, conversationally=conversationally 426 | ) 427 | async for response in async_ask: 428 | yield await self.get_message(response) 429 | 430 | async def for_non_stream(): 431 | return await self.get_message( 432 | await self.ask( 433 | prompt, 434 | False, 435 | optimizer=optimizer, 436 | conversationally=conversationally, 437 | ) 438 | ) 439 | 440 | return for_stream() if stream else await for_non_stream() 441 | 442 | async def get_message(self, response: dict) -> str: 443 | """Retrieves message only from response 444 | 445 | Args: 446 | response (dict): Response generated by `self.ask` 447 | 448 | Returns: 449 | str: Message extracted 450 | """ 451 | assert isinstance(response, dict), "Response should be of dict data-type only" 452 | return response["text"] 453 | 454 | 455 | if __name__ == "__main__": 456 | bot = GPT4FREE() 457 | 458 | def main(): 459 | resp = bot.ask("hello") 460 | for value in resp: 461 | print(value) 462 | 463 | async def asyncmain(): 464 | bot = AsyncGPT4FREE("Blackbox") 465 | print(type(await bot.ask("hello", True))) 466 | exit() 467 | while True: 468 | resp = await bot.chat(input(">>"), False) 469 | print(resp) 470 | # async for value in resp: 471 | # print(value) 472 | 473 | # main() 474 | import asyncio 475 | 476 | asyncio.run(asyncmain()) 477 | -------------------------------------------------------------------------------- /src/pytgpt/gpt4free/utils.py: -------------------------------------------------------------------------------- 1 | import g4f 2 | from .main import GPT4FREE 3 | from pathlib import Path 4 | from pytgpt.utils import default_path 5 | from json import dump, load 6 | from time import time 7 | from threading import Thread as thr 8 | from functools import wraps 9 | from rich.progress import Progress 10 | import logging 11 | 12 | results_path = Path(default_path) / "provider_test.json" 13 | 14 | 15 | def exception_handler(func): 16 | 17 | @wraps(func) 18 | def decorator(*args, **kwargs): 19 | try: 20 | return func(*args, **kwargs) 21 | except Exception as e: 22 | pass 23 | 24 | return decorator 25 | 26 | 27 | @exception_handler 28 | def is_working(provider: str) -> bool: 29 | """Test working status of a provider 30 | 31 | Args: 32 | provider (str): Provider name 33 | 34 | Returns: 35 | bool: is_working status 36 | """ 37 | bot = GPT4FREE(provider=provider, is_conversation=False) 38 | text = bot.chat("hello") 39 | assert isinstance(text, str) 40 | assert bool(text.strip()) 41 | assert " 2 44 | return True 45 | 46 | 47 | class TestProviders: 48 | 49 | def __init__( 50 | self, 51 | test_at_once: int = 5, 52 | quiet: bool = False, 53 | timeout: int = 20, 54 | selenium: bool = False, 55 | do_log: bool = True, 56 | ): 57 | """Constructor 58 | 59 | Args: 60 | test_at_once (int, optional): Test n providers at once. Defaults to 5. 61 | quiet (bool, optinal): Disable stdout. Defaults to False. 62 | timout (int, optional): Thread timeout for each provider. Defaults to 20. 63 | selenium (bool, optional): Test even selenium dependent providers. Defaults to False. 64 | do_log (bool, optional): Flag to control logging. Defaults to True. 65 | """ 66 | self.test_at_once: int = test_at_once 67 | self.quiet = quiet 68 | self.timeout = timeout 69 | self.do_log = do_log 70 | self.__logger = logging.getLogger(__name__) 71 | self.working_providers: list = [ 72 | provider.__name__ 73 | for provider in g4f.Provider.__providers__ 74 | if provider.working 75 | ] 76 | 77 | if not selenium: 78 | import g4f.Provider.selenium as selenium_based 79 | from g4f import webdriver 80 | 81 | webdriver.has_requirements = False 82 | selenium_based_providers: list = dir(selenium_based) 83 | for provider in self.working_providers: 84 | try: 85 | selenium_based_providers.index(provider) 86 | except ValueError: 87 | pass 88 | else: 89 | self.__log( 90 | 10, f"Dropping provider - {provider} - [Selenium dependent]" 91 | ) 92 | self.working_providers.remove(provider) 93 | 94 | self.results_path: Path = results_path 95 | self.__create_empty_file(ignore_if_found=True) 96 | self.results_file_is_empty: bool = False 97 | 98 | def __log( 99 | self, 100 | level: int, 101 | message: str, 102 | ): 103 | """class logger""" 104 | if self.do_log: 105 | self.__logger.log(level, message) 106 | else: 107 | pass 108 | 109 | def __create_empty_file(self, ignore_if_found: bool = False): 110 | if ignore_if_found and self.results_path.is_file(): 111 | return 112 | with self.results_path.open("w") as fh: 113 | dump({"results": []}, fh) 114 | self.results_file_is_empty = True 115 | 116 | def test_provider(self, name: str): 117 | """Test each provider and save successful ones 118 | 119 | Args: 120 | name (str): Provider name 121 | """ 122 | 123 | try: 124 | bot = GPT4FREE(provider=name, is_conversation=False) 125 | start_time = time() 126 | text = bot.chat("hello there") 127 | assert isinstance(text, str), "Non-string response returned" 128 | assert bool(text.strip()), "Empty string" 129 | assert " 2 132 | except Exception as e: 133 | pass 134 | else: 135 | self.results_file_is_empty = False 136 | with self.results_path.open() as fh: 137 | current_results = load(fh) 138 | new_result = dict(time=time() - start_time, name=name) 139 | current_results["results"].append(new_result) 140 | self.__log(20, f"Test result - {new_result['name']} - {new_result['time']}") 141 | 142 | with self.results_path.open("w") as fh: 143 | dump(current_results, fh) 144 | 145 | @exception_handler 146 | def main( 147 | self, 148 | ): 149 | self.__create_empty_file() 150 | threads = [] 151 | # Create a progress bar 152 | total = len(self.working_providers) 153 | with Progress() as progress: 154 | self.__log( 155 | 20, f"Testing {total} providers : {', '.join(self.working_providers)}" 156 | ) 157 | task = progress.add_task( 158 | f"[cyan]Testing...[{self.test_at_once}]", 159 | total=total, 160 | visible=self.quiet == False, 161 | ) 162 | while not progress.finished: 163 | for count, provider in enumerate(self.working_providers, start=1): 164 | t1 = thr( 165 | target=self.test_provider, 166 | args=(provider,), 167 | ) 168 | t1.start() 169 | if count % self.test_at_once == 0 or count == len(provider): 170 | for t in threads: 171 | try: 172 | t.join(self.timeout) 173 | except Exception as e: 174 | pass 175 | threads.clear() 176 | else: 177 | threads.append(t1) 178 | progress.update(task, advance=1) 179 | 180 | def get_results(self, run: bool = False, best: bool = False) -> list[dict]: 181 | """Get test results 182 | 183 | Args: 184 | run (bool, optional): Run the test first. Defaults to False. 185 | best (bool, optional): Return name of the best provider. Defaults to False. 186 | 187 | Returns: 188 | list[dict]|str: Test results. 189 | """ 190 | if run or self.results_file_is_empty: 191 | self.main() 192 | 193 | with self.results_path.open() as fh: 194 | results: dict = load(fh) 195 | 196 | results = results["results"] 197 | if not results: 198 | if run: 199 | raise Exception("Unable to find working g4f provider") 200 | else: 201 | self.__log(30, "Hunting down working g4f providers.") 202 | return self.get_results(run=True, best=best) 203 | 204 | time_list = [] 205 | 206 | sorted_list = [] 207 | for entry in results: 208 | time_list.append(entry["time"]) 209 | 210 | time_list.sort() 211 | 212 | for time_value in time_list: 213 | for entry in results: 214 | if entry["time"] == time_value: 215 | sorted_list.append(entry) 216 | return sorted_list[0]["name"] if best else sorted_list 217 | 218 | @property 219 | def best(self): 220 | """Fastest provider overally""" 221 | return self.get_results(run=False, best=True) 222 | 223 | @property 224 | def auto(self): 225 | """Best working provider""" 226 | for result in self.get_results(run=False, best=False): 227 | self.__log(20, "Confirming working status of provider : " + result["name"]) 228 | if is_working(result["name"]): 229 | return result["name"] 230 | -------------------------------------------------------------------------------- /src/pytgpt/groq/__init__.py: -------------------------------------------------------------------------------- 1 | from .main import GROQ 2 | from .main import AsyncGROQ 3 | from .main import session 4 | 5 | 6 | __info__ = "Interact with GROQ's model. " "API key is required" 7 | 8 | __all__ = ["OPENAI", "AsyncGROQ", "session"] 9 | -------------------------------------------------------------------------------- /src/pytgpt/imager/__init__.py: -------------------------------------------------------------------------------- 1 | from .imager import Imager 2 | from .imager import Prodia 3 | from .imager import AsyncImager 4 | from .imager import AsyncProdia 5 | 6 | __info__ = "Generate images using pollinations.ai and prodia.com providers" 7 | 8 | __all__ = ["Imager", "Prodia", "AsyncImager", "AsyncProdia"] 9 | -------------------------------------------------------------------------------- /src/pytgpt/imager/imager.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import httpx 3 | import os 4 | from typing import Generator, AsyncGenerator, Union, Coroutine 5 | from string import punctuation 6 | from random import choice 7 | from random import randint 8 | 9 | 10 | class AsyncImager: 11 | """Asynchronous implementation of Imager (default provider)""" 12 | 13 | def __init__( 14 | self, 15 | timeout: int = 20, 16 | proxies: dict = {}, 17 | ): 18 | """Initializes `AsyncImager` 19 | 20 | Args: 21 | timeout (int, optional): Http request timeout. Defaults to 20. 22 | proxies (dict, optional): Http request proxies (socks). Defaults to {}. 23 | """ 24 | self.image_gen_endpoint: str = "https://image.pollinations.ai/prompt/%(prompt)s" 25 | self.headers = { 26 | "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8", 27 | "Accept-Language": "en-US,en;q=0.5", 28 | "Accept-Encoding": "gzip, deflate", 29 | "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0", 30 | } 31 | self.session = httpx.AsyncClient( 32 | headers=self.headers, timeout=timeout, proxies=proxies 33 | ) 34 | self.timeout = timeout 35 | self.prompt: str = "AI-generated image - pytgpt" 36 | self.image_extension: str = "jpeg" 37 | 38 | async def generate( 39 | self, prompt: str, amount: int = 1, stream: bool = False, additives: bool = True 40 | ) -> list[bytes]: 41 | """Generat image from prompt 42 | 43 | Args: 44 | prompt (str): Image description. 45 | amount (int): Total images to be generated. Defaults to 1. 46 | additives (bool, optional): Try to make each prompt unique. Defaults to True. 47 | 48 | Returns: 49 | list[bytes]|bytes: Image generated 50 | """ 51 | assert bool(prompt), "Prompt cannot be null" 52 | assert isinstance( 53 | amount, int 54 | ), f"Amount should be an integer only not {type(amount)}" 55 | assert amount > 0, "Amount should be greater than 0" 56 | ads = lambda: ( 57 | "" 58 | if not additives 59 | else choice(punctuation) 60 | + choice(punctuation) 61 | + choice(punctuation) 62 | + choice(punctuation) 63 | + choice(punctuation) 64 | ) 65 | 66 | async def for_stream(): 67 | for _ in range(amount): 68 | resp = await self.session.get( 69 | url=self.image_gen_endpoint % dict(prompt=prompt + ads()), 70 | timeout=self.timeout, 71 | ) 72 | resp.raise_for_status() 73 | yield resp.content 74 | 75 | async def for_non_stream(): 76 | response = [] 77 | 78 | async for image in for_stream(): 79 | response.append(image) 80 | return response 81 | 82 | self.prompt = prompt 83 | return for_stream() if stream else await for_non_stream() 84 | 85 | async def save( 86 | self, 87 | response: list[bytes], 88 | name: str = None, 89 | dir: str = os.getcwd(), 90 | filenames_prefix: str = "", 91 | ) -> list[str]: 92 | """Save generated images 93 | 94 | Args: 95 | response (list[bytes]|Generator): Response of Imager.generate 96 | name (str): Filename for the images. Defaults to last prompt. 97 | dir (str, optional): Directory for saving images. Defaults to os.getcwd(). 98 | filenames_prefix (str, optional): String to be prefixed at each filename to be returned. 99 | """ 100 | assert isinstance( 101 | response, (list, AsyncGenerator) 102 | ), f"Response should be of {list} or {AsyncGenerator} types not {type(response)}" 103 | name = self.prompt if name is None else name 104 | 105 | filenames: list = [] 106 | 107 | count = 0 108 | if isinstance(response, AsyncGenerator): 109 | new_response = [] 110 | async for image in response: 111 | new_response.append(image) 112 | response = new_response 113 | 114 | for image in response: 115 | 116 | def complete_path(): 117 | count_value = "" if count == 0 else f"_{count}" 118 | return os.path.join( 119 | dir, name + count_value + "." + self.image_extension 120 | ) 121 | 122 | while os.path.isfile(complete_path()): 123 | count += 1 124 | 125 | absolute_path_to_file = complete_path() 126 | filenames.append(filenames_prefix + os.path.split(absolute_path_to_file)[1]) 127 | 128 | with open(absolute_path_to_file, "wb") as fh: 129 | fh.write(image) 130 | 131 | return filenames 132 | 133 | 134 | class AsyncProdia(AsyncImager): 135 | """ 136 | Asynchronous implementation of Prodia. 137 | This class provides methods for generating images based on prompts. 138 | """ 139 | 140 | def __init__(self, timeout: int = 30, proxies: dict[str, str] = {}): 141 | """Constructor 142 | 143 | Args: 144 | timeout (int, optional): Http request timeout in seconds. Defaults to 30. 145 | proxies (dict[str, str], optional): Http request proxies. Defaults to {}. 146 | """ 147 | super().__init__(timeout=timeout, proxies=proxies) 148 | self.image_extension: str = "png" 149 | 150 | async def _generate(self, prompt: str) -> bytes: 151 | """ 152 | Create a new image generation based on the given prompt. 153 | 154 | Args: 155 | prompt (str): The prompt for generating the image. 156 | 157 | Returns: 158 | resp (bytes): The generated image content 159 | """ 160 | 161 | try: 162 | resp = await self.session.get( 163 | "https://api.prodia.com/generate", 164 | params={ 165 | "new": "true", 166 | "prompt": prompt, 167 | "model": "dreamshaper_6BakedVae.safetensors [114c8abb]", 168 | "negative_prompt": "(nsfw:1.5),verybadimagenegative_v1.3, ng_deepnegative_v1_75t, (ugly face:0.5),cross-eyed,sketches, (worst quality:2), (low quality:2.1), (normal quality:2), lowres, normal quality, ((monochrome)), ((grayscale)), skin spots, acnes, skin blemishes, bad anatomy, DeepNegative, facing away, tilted head, {Multiple people}, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worstquality, low quality, normal quality, jpegartifacts, signature, watermark, username, blurry, bad feet, cropped, poorly drawn hands, poorly drawn face, mutation, deformed, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, extra fingers, fewer digits, extra limbs, extra arms,extra legs, malformed limbs, fused fingers, too many fingers, long neck, cross-eyed,mutated hands, polar lowres, bad body, bad proportions, gross proportions, text, error, missing fingers, missing arms, missing legs, extra digit, extra arms, extra leg, extra foot, repeating hair", 169 | "steps": "50", 170 | "cfg": "9.5", 171 | "seed": randint(1, 10000), 172 | "sampler": "Euler", 173 | "aspect_ratio": "square", 174 | }, 175 | timeout=self.timeout, 176 | ) 177 | data = resp.json() 178 | while True: 179 | resp = await self.session.get( 180 | f"https://api.prodia.com/job/{data['job']}", timeout=self.timeout 181 | ) 182 | json = resp.json() 183 | if json["status"] == "succeeded": 184 | resp = await self.session.get( 185 | f"https://images.prodia.xyz/{data['job']}.png?download=1", 186 | timeout=self.timeout, 187 | ) 188 | return resp.content 189 | 190 | except Exception as e: 191 | print(e) 192 | raise Exception("Unable to generate image") from e 193 | 194 | async def generate( 195 | self, 196 | prompt: str, 197 | amount: int = 1, 198 | stream: bool = False, 199 | additives: bool = False, 200 | ) -> list[bytes]: 201 | """Generate image from prompt 202 | 203 | Args: 204 | prompt (str): Image description. 205 | amount (int): Total images to be generated. Defaults to 1. 206 | additives (bool, optional): Try to make each prompt unique. Defaults to True. 207 | 208 | Returns: 209 | list[bytes]|bytes: Image generated 210 | """ 211 | self.prompt = prompt 212 | get_prompt: object = lambda prompt: ( 213 | f"prompt {randint(1, 10000)}" if additives else prompt 214 | ) 215 | 216 | async def for_stream(): 217 | for _ in range(amount): 218 | yield await self._generate(get_prompt(prompt)) 219 | 220 | async def for_non_stream(): 221 | resp = [] 222 | for _ in range(amount): 223 | resp.append(await self._generate(get_prompt(prompt))) 224 | return resp 225 | 226 | return for_stream() if stream else await for_non_stream() 227 | 228 | 229 | class Imager: 230 | """Default Image provider""" 231 | 232 | def __init__( 233 | self, 234 | timeout: int = 20, 235 | proxies: dict = {}, 236 | ): 237 | """Initializes `AsyncImager` 238 | 239 | Args: 240 | timeout (int, optional): Http request timeout. Defaults to 20. 241 | proxies (dict, optional): Http request proxies (socks). Defaults to {}. 242 | """ 243 | self.loop = asyncio.get_event_loop() 244 | self.async_imager = AsyncImager(timeout=timeout, proxies=proxies) 245 | 246 | def generate( 247 | self, prompt: str, amount: int = 1, stream: bool = False, additives: bool = True 248 | ) -> list[bytes]: 249 | """Generate image from prompt 250 | 251 | Args: 252 | prompt (str): Image description. 253 | amount (int): Total images to be generated. Defaults to 1. 254 | additives (bool, optional): Try to make each prompt unique. Defaults to True. 255 | 256 | Returns: 257 | list[bytes]|bytes: Image generated 258 | """ 259 | return self.loop.run_until_complete( 260 | self.async_imager.generate( 261 | prompt=prompt, 262 | amount=amount, 263 | stream=stream, 264 | additives=additives, 265 | ) 266 | ) 267 | 268 | def save( 269 | self, 270 | response: list[bytes], 271 | name: str = None, 272 | dir: str = os.getcwd(), 273 | filenames_prefix: str = "", 274 | ) -> list[str]: 275 | """Save generated images 276 | 277 | Args: 278 | response (list[bytes]|Generator): Response of Imager.generate 279 | name (str): Filename for the images. Defaults to last prompt. 280 | dir (str, optional): Directory for saving images. Defaults to os.getcwd(). 281 | filenames_prefix (str, optional): String to be prefixed at each filename to be returned. 282 | """ 283 | return self.loop.run_until_complete( 284 | self.async_imager.save( 285 | response=response, name=name, dir=dir, filenames_prefix=filenames_prefix 286 | ) 287 | ) 288 | 289 | 290 | class Prodia(Imager): 291 | """ 292 | This class provides methods for generating images based on prompts. 293 | """ 294 | 295 | def __init__(self, timeout: int = 30, proxies: dict[str, str] = {}): 296 | """Constructor 297 | 298 | Args: 299 | timeout (int, optional): Http request timeout in seconds. Defaults to 30. 300 | proxies (dict[str, str], optional): Http request proxies. Defaults to {}. 301 | """ 302 | super().__init__(timeout=timeout, proxies=proxies) 303 | self.image_extension: str = "png" 304 | self.loop = asyncio.get_event_loop() 305 | self.async_prodia = AsyncProdia(timeout=timeout, proxies=proxies) 306 | 307 | def _generate(self, prompt: str) -> bytes: 308 | """ 309 | Create a new image generation based on the given prompt. 310 | 311 | Args: 312 | prompt (str): The prompt for generating the image. 313 | 314 | Returns: 315 | resp (bytes): The generated image content 316 | """ 317 | return self.loop.run_until_complete(self.async_prodia._generate(prompt=prompt)) 318 | 319 | def generate( 320 | self, 321 | prompt: str, 322 | amount: int = 1, 323 | stream: bool = False, 324 | additives: bool = False, 325 | ) -> list[bytes]: 326 | """Generat image from prompt 327 | 328 | Args: 329 | prompt (str): Image description. 330 | amount (int): Total images to be generated. Defaults to 1. 331 | additives (bool, optional): Try to make each prompt unique. Defaults to True. 332 | 333 | Returns: 334 | list[bytes]|bytes: Image generated 335 | """ 336 | return self.loop.run_until_complete( 337 | self.async_prodia.generate( 338 | prompt=prompt, 339 | amount=amount, 340 | stream=stream, 341 | additives=additives, 342 | ) 343 | ) 344 | 345 | 346 | if __name__ == "__main__": 347 | # start = AsyncImager() 348 | # loop = asyncio.new_event_loop() 349 | # resp = loop.run_until_complete(start.generate('hello world 2', stream=False)) 350 | # loop.run_until_complete( 351 | # start.save(resp) 352 | # ) 353 | bot = Prodia() 354 | resp = bot.generate("Coding bot ", 1, stream=False) 355 | bot.save(resp) 356 | -------------------------------------------------------------------------------- /src/pytgpt/koboldai/__init__.py: -------------------------------------------------------------------------------- 1 | from .main import KOBOLDAI 2 | from .main import AsyncKOBOLDAI 3 | from .main import session 4 | 5 | __info__ = "Interact with Koboldai AI models" 6 | 7 | __all__ = ["KOBOLDAI", "AsyncKOBOLDAI", "session"] 8 | -------------------------------------------------------------------------------- /src/pytgpt/koboldai/main.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import httpx 4 | import pytgpt.exceptions as exceptions 5 | from pytgpt.utils import Optimizers 6 | from pytgpt.utils import Conversation 7 | from pytgpt.utils import AwesomePrompts 8 | from pytgpt.utils import sanitize_stream 9 | from pytgpt.base import Provider, AsyncProvider 10 | from typing import AsyncGenerator 11 | 12 | session = requests.Session() 13 | 14 | 15 | class KOBOLDAI(Provider): 16 | def __init__( 17 | self, 18 | is_conversation: bool = True, 19 | max_tokens: int = 600, 20 | temperature: float = 1, 21 | top_p: float = 1, 22 | timeout: int = 30, 23 | intro: str = None, 24 | filepath: str = None, 25 | update_file: bool = True, 26 | proxies: dict = {}, 27 | history_offset: int = 10250, 28 | act: str = None, 29 | ): 30 | """Instantiate TGPT 31 | 32 | Args: 33 | is_conversation (str, optional): Flag for chatting conversationally. Defaults to True. 34 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600. 35 | temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.2. 36 | top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999. 37 | timeout (int, optional): Http requesting timeout. Defaults to 30 38 | intro (str, optional): Conversation introductory prompt. Defaults to `Conversation.intro`. 39 | filepath (str, optional): Path to file containing conversation history. Defaults to None. 40 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True. 41 | proxies (dict, optional) : Http reqiuest proxies (socks). Defaults to {}. 42 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250. 43 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None. 44 | """ 45 | self.is_conversation = is_conversation 46 | self.max_tokens_to_sample = max_tokens 47 | self.temperature = temperature 48 | self.top_p = top_p 49 | self.chat_endpoint = ( 50 | "https://koboldai-koboldcpp-tiefighter.hf.space/api/extra/generate/stream" 51 | ) 52 | self.stream_chunk_size = 64 53 | self.timeout = timeout 54 | self.last_response = {} 55 | self.headers = { 56 | "Content-Type": "application/json", 57 | "Accept": "application/json", 58 | } 59 | 60 | self.__available_optimizers = ( 61 | method 62 | for method in dir(Optimizers) 63 | if callable(getattr(Optimizers, method)) and not method.startswith("__") 64 | ) 65 | session.headers.update(self.headers) 66 | Conversation.intro = ( 67 | AwesomePrompts().get_act( 68 | act, raise_not_found=True, default=None, case_insensitive=True 69 | ) 70 | if act 71 | else intro or Conversation.intro 72 | ) 73 | self.conversation = Conversation( 74 | is_conversation, self.max_tokens_to_sample, filepath, update_file 75 | ) 76 | self.conversation.history_offset = history_offset 77 | session.proxies = proxies 78 | 79 | def ask( 80 | self, 81 | prompt: str, 82 | stream: bool = False, 83 | raw: bool = False, 84 | optimizer: str = None, 85 | conversationally: bool = False, 86 | ) -> dict: 87 | """Chat with AI 88 | 89 | Args: 90 | prompt (str): Prompt to be send. 91 | stream (bool, optional): Flag for streaming response. Defaults to False. 92 | raw (bool, optional): Stream back raw response as received. Defaults to False. 93 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. 94 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. 95 | Returns: 96 | dict : {} 97 | ```json 98 | { 99 | "token" : "How may I assist you today?" 100 | } 101 | ``` 102 | """ 103 | conversation_prompt = self.conversation.gen_complete_prompt(prompt) 104 | if optimizer: 105 | if optimizer in self.__available_optimizers: 106 | conversation_prompt = getattr(Optimizers, optimizer)( 107 | conversation_prompt if conversationally else prompt 108 | ) 109 | else: 110 | raise Exception( 111 | f"Optimizer is not one of {self.__available_optimizers}" 112 | ) 113 | 114 | session.headers.update(self.headers) 115 | payload = { 116 | "prompt": conversation_prompt, 117 | "temperature": self.temperature, 118 | "top_p": self.top_p, 119 | } 120 | 121 | def for_stream(): 122 | response = session.post( 123 | self.chat_endpoint, json=payload, stream=True, timeout=self.timeout 124 | ) 125 | if not response.ok: 126 | raise exceptions.FailedToGenerateResponseError( 127 | f"Failed to generate response - ({response.status_code}, {response.reason})" 128 | ) 129 | 130 | message_load = "" 131 | for value in response.iter_lines( 132 | decode_unicode=True, 133 | delimiter="" if raw else "event: message\ndata:", 134 | chunk_size=self.stream_chunk_size, 135 | ): 136 | try: 137 | resp = json.loads(value) 138 | message_load += self.get_message(resp) 139 | resp["token"] = message_load 140 | self.last_response.update(resp) 141 | yield value if raw else resp 142 | except json.decoder.JSONDecodeError: 143 | pass 144 | self.conversation.update_chat_history( 145 | prompt, self.get_message(self.last_response) 146 | ) 147 | 148 | def for_non_stream(): 149 | # let's make use of stream 150 | for _ in for_stream(): 151 | pass 152 | return self.last_response 153 | 154 | return for_stream() if stream else for_non_stream() 155 | 156 | def chat( 157 | self, 158 | prompt: str, 159 | stream: bool = False, 160 | optimizer: str = None, 161 | conversationally: bool = False, 162 | ) -> str: 163 | """Generate response `str` 164 | Args: 165 | prompt (str): Prompt to be send. 166 | stream (bool, optional): Flag for streaming response. Defaults to False. 167 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. 168 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. 169 | Returns: 170 | str: Response generated 171 | """ 172 | 173 | def for_stream(): 174 | for response in self.ask( 175 | prompt, True, optimizer=optimizer, conversationally=conversationally 176 | ): 177 | yield self.get_message(response) 178 | 179 | def for_non_stream(): 180 | return self.get_message( 181 | self.ask( 182 | prompt, 183 | False, 184 | optimizer=optimizer, 185 | conversationally=conversationally, 186 | ) 187 | ) 188 | 189 | return for_stream() if stream else for_non_stream() 190 | 191 | def get_message(self, response: dict) -> str: 192 | """Retrieves message only from response 193 | 194 | Args: 195 | response (dict): Response generated by `self.ask` 196 | 197 | Returns: 198 | str: Message extracted 199 | """ 200 | assert isinstance(response, dict), "Response should be of dict data-type only" 201 | return response.get("token") 202 | 203 | 204 | class AsyncKOBOLDAI(AsyncProvider): 205 | def __init__( 206 | self, 207 | is_conversation: bool = True, 208 | max_tokens: int = 600, 209 | temperature: float = 1, 210 | top_p: float = 1, 211 | timeout: int = 30, 212 | intro: str = None, 213 | filepath: str = None, 214 | update_file: bool = True, 215 | proxies: dict = {}, 216 | history_offset: int = 10250, 217 | act: str = None, 218 | ): 219 | """Instantiate TGPT 220 | 221 | Args: 222 | is_conversation (str, optional): Flag for chatting conversationally. Defaults to True. 223 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600. 224 | temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.2. 225 | top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999. 226 | timeout (int, optional): Http requesting timeout. Defaults to 30 227 | intro (str, optional): Conversation introductory prompt. Defaults to `Conversation.intro`. 228 | filepath (str, optional): Path to file containing conversation history. Defaults to None. 229 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True. 230 | proxies (dict, optional) : Http reqiuest proxies (socks). Defaults to {}. 231 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250. 232 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None. 233 | """ 234 | self.is_conversation = is_conversation 235 | self.max_tokens_to_sample = max_tokens 236 | self.temperature = temperature 237 | self.top_p = top_p 238 | self.chat_endpoint = ( 239 | "https://koboldai-koboldcpp-tiefighter.hf.space/api/extra/generate/stream" 240 | ) 241 | self.stream_chunk_size = 64 242 | self.timeout = timeout 243 | self.last_response = {} 244 | self.headers = { 245 | "Content-Type": "application/json", 246 | "Accept": "application/json", 247 | } 248 | 249 | self.__available_optimizers = ( 250 | method 251 | for method in dir(Optimizers) 252 | if callable(getattr(Optimizers, method)) and not method.startswith("__") 253 | ) 254 | Conversation.intro = ( 255 | AwesomePrompts().get_act( 256 | act, raise_not_found=True, default=None, case_insensitive=True 257 | ) 258 | if act 259 | else intro or Conversation.intro 260 | ) 261 | self.conversation = Conversation( 262 | is_conversation, self.max_tokens_to_sample, filepath, update_file 263 | ) 264 | self.conversation.history_offset = history_offset 265 | self.session = httpx.AsyncClient(headers=self.headers, proxies=proxies) 266 | 267 | async def ask( 268 | self, 269 | prompt: str, 270 | stream: bool = False, 271 | raw: bool = False, 272 | optimizer: str = None, 273 | conversationally: bool = False, 274 | ) -> dict | AsyncGenerator: 275 | """Chat with AI asynchronously. 276 | 277 | Args: 278 | prompt (str): Prompt to be send. 279 | stream (bool, optional): Flag for streaming response. Defaults to False. 280 | raw (bool, optional): Stream back raw response as received. Defaults to False. 281 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. 282 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. 283 | Returns: 284 | dict|AsyncGenerator : ai content 285 | ```json 286 | { 287 | "token" : "How may I assist you today?" 288 | } 289 | ``` 290 | """ 291 | conversation_prompt = self.conversation.gen_complete_prompt(prompt) 292 | if optimizer: 293 | if optimizer in self.__available_optimizers: 294 | conversation_prompt = getattr(Optimizers, optimizer)( 295 | conversation_prompt if conversationally else prompt 296 | ) 297 | else: 298 | raise Exception( 299 | f"Optimizer is not one of {self.__available_optimizers}" 300 | ) 301 | 302 | payload = { 303 | "prompt": conversation_prompt, 304 | "temperature": self.temperature, 305 | "top_p": self.top_p, 306 | } 307 | 308 | async def for_stream(): 309 | async with self.session.stream( 310 | "POST", self.chat_endpoint, json=payload, timeout=self.timeout 311 | ) as response: 312 | if not response.is_success: 313 | raise exceptions.FailedToGenerateResponseError( 314 | f"Failed to generate response - ({response.status_code}, {response.reason_phrase})" 315 | ) 316 | 317 | message_load = "" 318 | async for value in response.aiter_lines(): 319 | try: 320 | resp = sanitize_stream(value) 321 | message_load += await self.get_message(resp) 322 | resp["token"] = message_load 323 | self.last_response.update(resp) 324 | yield value if raw else resp 325 | except json.decoder.JSONDecodeError: 326 | pass 327 | 328 | self.conversation.update_chat_history( 329 | prompt, await self.get_message(self.last_response) 330 | ) 331 | 332 | async def for_non_stream(): 333 | # let's make use of stream 334 | async for _ in for_stream(): 335 | pass 336 | return self.last_response 337 | 338 | return for_stream() if stream else await for_non_stream() 339 | 340 | async def chat( 341 | self, 342 | prompt: str, 343 | stream: bool = False, 344 | optimizer: str = None, 345 | conversationally: bool = False, 346 | ) -> str | AsyncGenerator: 347 | """Generate response `str` asynchronously. 348 | Args: 349 | prompt (str): Prompt to be send. 350 | stream (bool, optional): Flag for streaming response. Defaults to False. 351 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. 352 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. 353 | Returns: 354 | str: Response generated 355 | """ 356 | 357 | async def for_stream(): 358 | async_ask = await self.ask( 359 | prompt, True, optimizer=optimizer, conversationally=conversationally 360 | ) 361 | async for response in async_ask: 362 | yield await self.get_message(response) 363 | 364 | async def for_non_stream(): 365 | return await self.get_message( 366 | await self.ask( 367 | prompt, 368 | False, 369 | optimizer=optimizer, 370 | conversationally=conversationally, 371 | ) 372 | ) 373 | 374 | return for_stream() if stream else await for_non_stream() 375 | 376 | async def get_message(self, response: dict) -> str: 377 | """Retrieves message only from response 378 | 379 | Args: 380 | response (dict): Response generated by `self.ask` 381 | 382 | Returns: 383 | str: Message extracted 384 | """ 385 | assert isinstance(response, dict), "Response should be of dict data-type only" 386 | return response.get("token") 387 | 388 | 389 | if __name__ == "__main__": 390 | bot = KOBOLDAI() 391 | 392 | def main(): 393 | resp = bot.ask("hello") 394 | for value in resp: 395 | print(value) 396 | 397 | async def asyncmain(): 398 | bot = AsyncKOBOLDAI() 399 | resp = await bot.chat("hello", False) 400 | print(resp) 401 | # async for value in resp: 402 | # print(value) 403 | 404 | main() 405 | import asyncio 406 | 407 | asyncio.run(asyncmain()) 408 | -------------------------------------------------------------------------------- /src/pytgpt/novita/__init__.py: -------------------------------------------------------------------------------- 1 | from pytgpt.novita.main import NOVITA 2 | from pytgpt.novita.main import AsyncNOVITA 3 | from pytgpt.novita.main import available_models 4 | from pytgpt.openai.main import session 5 | 6 | 7 | __info__ = "Interact with NOVITA's model. " "API key is required" 8 | 9 | __all__ = ["NOVITA", "AsyncNOVITA", "available_models", "session"] 10 | -------------------------------------------------------------------------------- /src/pytgpt/novita/main.py: -------------------------------------------------------------------------------- 1 | from pytgpt.openai import OPENAI, AsyncOPENAI 2 | from pytgpt.exceptions import UnsupportedModelError 3 | 4 | model = "meta-llama/llama-3.1-8b-instruct" 5 | 6 | available_models = [ 7 | "meta-llama/llama-3.1-8b-instruct", 8 | "meta-llama/llama-3.1-70b-instruct", 9 | "meta-llama/llama-3.1-405b-instruct", 10 | "meta-llama/llama-3-8b-instruct", 11 | "meta-llama/llama-3-70b-instruct", 12 | "gryphe/mythomax-l2-13b", 13 | "google/gemma-2-9b-it", 14 | "mistralai/mistral-nemo", 15 | "microsoft/wizardlm-2-8x22b", 16 | "mistralai/mistral-7b-instruct", 17 | "microsoft/wizardlm-2-7b", 18 | "openchat/openchat-7b", 19 | "nousresearch/hermes-2-pro-llama-3-8b", 20 | "sao10k/l3-70b-euryale-v2.1", 21 | "cognitivecomputations/dolphin-mixtral-8x22b", 22 | "jondurbin/airoboros-l2-70b", 23 | "lzlv_70b", 24 | "nousresearch/nous-hermes-llama2-13b", 25 | "teknium/openhermes-2.5-mistral-7b", 26 | "sophosympatheia/midnight-rose-70b", 27 | "meta-llama/llama-3.1-8b-instruct-bf16", 28 | "qwen/qwen-2.5-72b-instruct", 29 | "sao10k/l31-70b-euryale-v2.2", 30 | "qwen/qwen-2-7b-instruct", 31 | "qwen/qwen-2-72b-instruct", 32 | ] 33 | 34 | 35 | class NOVITA(OPENAI): 36 | """Novita AI provider""" 37 | 38 | def __init__(self, *args, **kwargs): 39 | model_choice = kwargs.setdefault("model", model) 40 | if not model_choice in available_models: 41 | raise UnsupportedModelError( 42 | f"Model '{model_choice}' is not yet supported. Choose from {available_models}" 43 | ) 44 | super().__init__(*args, **kwargs) 45 | self.chat_endpoint = "https://api.novita.ai/v3/openai/chat/completions" 46 | 47 | 48 | class AsyncNOVITA(AsyncOPENAI): 49 | """Async Novita AI provider""" 50 | 51 | def __init__(self, *args, **kwargs): 52 | model_choice = kwargs.setdefault("model", model) 53 | if not model_choice in available_models: 54 | raise UnsupportedModelError( 55 | f"Model '{model_choice}' is not yet supported. Choose from {available_models}" 56 | ) 57 | super().__init__(*args, **kwargs) 58 | self.chat_endpoint = "https://api.novita.ai/v3/openai/chat/completions" 59 | -------------------------------------------------------------------------------- /src/pytgpt/openai/__init__.py: -------------------------------------------------------------------------------- 1 | from .main import OPENAI 2 | from .main import AsyncOPENAI 3 | from .main import session 4 | 5 | 6 | __info__ = "Interact with OpenAI's model. " "API key is required" 7 | 8 | __all__ = ["OPENAI", "AsyncOPENAI", "session"] 9 | -------------------------------------------------------------------------------- /src/pytgpt/perplexity/__init__.py: -------------------------------------------------------------------------------- 1 | from .main import PERPLEXITY 2 | from .main import session 3 | 4 | __info__ = "Interact with PHIND's LLM provider" 5 | 6 | __all__ = [ 7 | "PERPLEXITY", 8 | "session", 9 | ] 10 | -------------------------------------------------------------------------------- /src/pytgpt/perplexity/main.py: -------------------------------------------------------------------------------- 1 | import json 2 | import yaml 3 | import requests 4 | from pytgpt.utils import Optimizers 5 | from pytgpt.utils import Conversation 6 | from pytgpt.utils import AwesomePrompts 7 | from pytgpt.base import Provider 8 | from Helpingai_T2 import Perplexity 9 | from typing import Any 10 | 11 | session = requests.Session() 12 | 13 | 14 | class PERPLEXITY(Provider): 15 | def __init__( 16 | self, 17 | is_conversation: bool = True, 18 | max_tokens: int = 600, 19 | timeout: int = 30, 20 | intro: str = None, 21 | filepath: str = None, 22 | update_file: bool = True, 23 | proxies: dict = {}, 24 | history_offset: int = 10250, 25 | act: str = None, 26 | quiet: bool = False, 27 | ): 28 | """Instantiates PERPLEXITY 29 | 30 | Args: 31 | is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True 32 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600. 33 | timeout (int, optional): Http request timeout. Defaults to 30. 34 | intro (str, optional): Conversation introductory prompt. Defaults to None. 35 | filepath (str, optional): Path to file containing conversation history. Defaults to None. 36 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True. 37 | proxies (dict, optional): Http request proxies. Defaults to {}. 38 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250. 39 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None. 40 | quiet (bool, optional): Ignore web search-results and yield final response only. Defaults to False. 41 | """ 42 | self.max_tokens_to_sample = max_tokens 43 | self.is_conversation = is_conversation 44 | self.last_response = {} 45 | self.web_results: dict = {} 46 | self.quiet = quiet 47 | 48 | self.__available_optimizers = ( 49 | method 50 | for method in dir(Optimizers) 51 | if callable(getattr(Optimizers, method)) and not method.startswith("__") 52 | ) 53 | Conversation.intro = ( 54 | AwesomePrompts().get_act( 55 | act, raise_not_found=True, default=None, case_insensitive=True 56 | ) 57 | if act 58 | else intro or Conversation.intro 59 | ) 60 | self.conversation = Conversation( 61 | is_conversation, self.max_tokens_to_sample, filepath, update_file 62 | ) 63 | self.conversation.history_offset = history_offset 64 | 65 | def ask( 66 | self, 67 | prompt: str, 68 | stream: bool = False, 69 | raw: bool = False, 70 | optimizer: str = None, 71 | conversationally: bool = False, 72 | ) -> dict: 73 | """Chat with AI 74 | 75 | Args: 76 | prompt (str): Prompt to be send. 77 | stream (bool, optional): Flag for streaming response. Defaults to False. 78 | raw (bool, optional): Stream back raw response as received. Defaults to False. 79 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. 80 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. 81 | Returns: 82 | dict : {} 83 | ```json 84 | { 85 | "status": "pending", 86 | "uuid": "3604dfcc-611f-4b7d-989d-edca2a7233c7", 87 | "read_write_token": null, 88 | "frontend_context_uuid": "f6d43119-5231-481d-b692-f52e1f52d2c6", 89 | "final": false, 90 | "backend_uuid": "a6d6ec9e-da69-4841-af74-0de0409267a8", 91 | "media_items": [], 92 | "widget_data": [], 93 | "knowledge_cards": [], 94 | "expect_search_results": "false", 95 | "mode": "concise", 96 | "search_focus": "internet", 97 | "gpt4": false, 98 | "display_model": "turbo", 99 | "attachments": null, 100 | "answer": "", 101 | "web_results": [], 102 | "chunks": [], 103 | "extra_web_results": [] 104 | } 105 | ``` 106 | """ 107 | conversation_prompt = self.conversation.gen_complete_prompt(prompt) 108 | if optimizer: 109 | if optimizer in self.__available_optimizers: 110 | conversation_prompt = getattr(Optimizers, optimizer)( 111 | conversation_prompt if conversationally else prompt 112 | ) 113 | else: 114 | raise Exception( 115 | f"Optimizer is not one of {self.__available_optimizers}" 116 | ) 117 | 118 | def for_stream(): 119 | for response in Perplexity().generate_answer(conversation_prompt): 120 | yield json.dumps(response) if raw else response 121 | self.last_response.update(response) 122 | 123 | self.conversation.update_chat_history( 124 | prompt, 125 | self.get_message(self.last_response), 126 | ) 127 | 128 | def for_non_stream(): 129 | for _ in for_stream(): 130 | pass 131 | return self.last_response 132 | 133 | return for_stream() if stream else for_non_stream() 134 | 135 | def chat( 136 | self, 137 | prompt: str, 138 | stream: bool = False, 139 | optimizer: str = None, 140 | conversationally: bool = False, 141 | ) -> str: 142 | """Generate response `str` 143 | Args: 144 | prompt (str): Prompt to be send. 145 | stream (bool, optional): Flag for streaming response. Defaults to False. 146 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. 147 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. 148 | Returns: 149 | str: Response generated 150 | """ 151 | 152 | def for_stream(): 153 | for response in self.ask( 154 | prompt, True, optimizer=optimizer, conversationally=conversationally 155 | ): 156 | yield self.get_message(response) 157 | 158 | def for_non_stream(): 159 | return self.get_message( 160 | self.ask( 161 | prompt, 162 | False, 163 | optimizer=optimizer, 164 | conversationally=conversationally, 165 | ) 166 | ) 167 | 168 | return for_stream() if stream else for_non_stream() 169 | 170 | def get_message(self, response: dict) -> str: 171 | """Retrieves message only from response 172 | 173 | Args: 174 | response (dict): Response generated by `self.ask` 175 | 176 | Returns: 177 | str: Message extracted 178 | """ 179 | assert isinstance(response, dict), "Response should be of dict data-type only" 180 | text_str: str = response.get("answer", "") 181 | 182 | def update_web_results(web_results: list) -> None: 183 | for index, results in enumerate(web_results, start=1): 184 | self.web_results[str(index) + ". " + results["name"]] = dict( 185 | url=results.get("url"), snippet=results.get("snippet") 186 | ) 187 | 188 | if response.get("text"): 189 | # last chunk 190 | target: dict[str, Any] = json.loads(response.get("text")) 191 | text_str = target.get("answer") 192 | web_results: list[dict] = target.get("web_results") 193 | self.web_results.clear() 194 | update_web_results(web_results) 195 | 196 | return ( 197 | text_str 198 | if self.quiet or not self.web_results 199 | else text_str + "\n\n# WEB-RESULTS\n\n" + yaml.dump(self.web_results) 200 | ) 201 | 202 | else: 203 | if str(response.get("expect_search_results")).lower() == "true": 204 | return ( 205 | text_str 206 | if self.quiet 207 | else text_str 208 | + "\n\n# WEB-RESULTS\n\n" 209 | + yaml.dump(response.get("web_results")) 210 | ) 211 | else: 212 | return text_str 213 | -------------------------------------------------------------------------------- /src/pytgpt/phind/__init__.py: -------------------------------------------------------------------------------- 1 | from .main import PHIND 2 | from .main import AsyncPHIND 3 | from .main import session 4 | 5 | __info__ = "Interact with PHIND's LLM provider" 6 | 7 | __all__ = [ 8 | "PHIND", 9 | "AsyncPHIND", 10 | "session", 11 | ] 12 | -------------------------------------------------------------------------------- /src/pytgpt/poe/__init__.py: -------------------------------------------------------------------------------- 1 | from .main import POE 2 | from .main import default_model 3 | 4 | __info__ = "Interact with Poe LLM provider" 5 | 6 | __all__ = [ 7 | "POE", 8 | "default_model", 9 | ] 10 | -------------------------------------------------------------------------------- /src/pytgpt/poe/main.py: -------------------------------------------------------------------------------- 1 | from poe_api_wrapper import PoeApi 2 | from poe_api_wrapper.api import BOTS_LIST 3 | from pytgpt.base import Provider 4 | from pytgpt.utils import Conversation 5 | from pytgpt.utils import Optimizers 6 | from pytgpt.utils import AwesomePrompts 7 | from pathlib import Path 8 | from json import loads 9 | from json import dumps 10 | from loguru import logger 11 | import logging 12 | 13 | logger.remove() 14 | 15 | default_model = "Assistant" 16 | 17 | 18 | class POE(Provider): 19 | def __init__( 20 | self, 21 | cookie: str, 22 | model: str = default_model, 23 | proxy: bool = False, 24 | timeout: int = 30, 25 | filepath: str = None, 26 | update_file: str = True, 27 | intro: str = None, 28 | act: str = None, 29 | init: bool = True, 30 | ): 31 | """Initializes POE 32 | 33 | Args: 34 | cookie (str): Path to `poe.com.cookies.json` file or 'p-b' cookie-value. 35 | model (str, optional): Model name. Default to Assistant. 36 | proxy (bool, optional): Flag for Httpx request proxy. Defaults to False. 37 | timeout (int, optional): Http request timeout. Defaults to 30. 38 | filepath (str, optional): Path to save the chat history. Defaults to None. 39 | update_file (str, optional): Flag for controlling chat history updates. Defaults to True. 40 | intro (str, optional): Conversation introductory prompt. Defaults to None. 41 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None. 42 | init (bool, optional): Resend the intro prompt. Defaults to True. 43 | """ 44 | assert isinstance( 45 | cookie, str 46 | ), f"Cookie must be of {str} datatype only not {type(cookie)}" 47 | assert ( 48 | model in BOTS_LIST.keys() 49 | ), f"model name '{model}' is not one of {', '.join(list(BOTS_LIST.keys()))}" 50 | cookie_path = Path(cookie) 51 | 52 | if cookie_path.exists() or any(["/" in cookie, ".json" in cookie]): 53 | cookie = None 54 | all_cookies = loads(cookie_path.read_text()) 55 | for entry in all_cookies: 56 | if entry["name"] == "p-b": 57 | cookie = entry["value"] 58 | assert ( 59 | cookie 60 | ), f'Required cookie value cannot be retrieved from the path "{cookie_path.as_posix()}"' 61 | 62 | if proxy: 63 | import poe_api_wrapper.proxies as proxies 64 | 65 | proxies.PROXY = True 66 | 67 | self.bot = BOTS_LIST[model] 68 | self.session = PoeApi(cookie) 69 | self.last_response = {} 70 | self.__available_optimizers = ( 71 | method 72 | for method in dir(Optimizers) 73 | if callable(getattr(Optimizers, method)) and not method.startswith("__") 74 | ) 75 | Conversation.intro = ( 76 | AwesomePrompts().get_act( 77 | act, raise_not_found=True, default=None, case_insensitive=True 78 | ) 79 | if act 80 | else intro or Conversation.intro 81 | ) 82 | self.conversation = Conversation( 83 | status=False, filepath=filepath, update_file=update_file 84 | ) 85 | if init: 86 | self.ask(self.conversation.intro) # Init 87 | 88 | def ask( 89 | self, 90 | prompt: str, 91 | stream: bool = False, 92 | raw: bool = False, 93 | optimizer: str = None, 94 | conversationally: bool = False, 95 | ) -> dict: 96 | """Chat with AI 97 | 98 | Args: 99 | prompt (str): Prompt to be send. 100 | stream (bool, optional): Flag for streaming response. Defaults to False. 101 | raw (bool, optional): Stream back raw response as received. Defaults to False. 102 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defeaults to None 103 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. 104 | Returns: 105 | dict : {} 106 | ```json 107 | { 108 | "id": "TWVzc2FnZToxMTU0MzgyNDQ1ODU=", 109 | "messageId": 115438244585, 110 | "creationTime": 1707777376544407, 111 | "clientNonce": null, 112 | "state": "complete", 113 | "text": "Hello! How can I assist you today?", 114 | "author": "capybara", 115 | "contentType": "text_markdown", 116 | "sourceType": "chat_input", 117 | "attachmentTruncationState": "not_truncated", 118 | "attachments": [], 119 | "vote": null, 120 | "suggestedReplies": [], 121 | "hasCitations": false, 122 | "__isNode": "Message", 123 | "textLengthOnCancellation": null, 124 | "chatCode": "21a2jn0yrq9phxiy478", 125 | "chatId": 328236777, 126 | "title": null, 127 | "response": "" 128 | } 129 | ``` 130 | """ 131 | conversation_prompt = self.conversation.gen_complete_prompt(prompt) 132 | if optimizer: 133 | if optimizer in self.__available_optimizers: 134 | conversation_prompt = getattr(Optimizers, optimizer)( 135 | conversation_prompt if conversationally else prompt 136 | ) 137 | else: 138 | raise Exception( 139 | f"Optimizer is not one of {self.__available_optimizers}" 140 | ) 141 | 142 | def for_stream(): 143 | for response in self.session.send_message(self.bot, conversation_prompt): 144 | if raw: 145 | yield dumps(response) 146 | else: 147 | yield response 148 | 149 | self.last_response.update(response) 150 | 151 | self.conversation.update_chat_history( 152 | prompt, 153 | self.get_message(self.last_response), 154 | force=True, 155 | ) 156 | 157 | def for_non_stream(): 158 | # let's make use of stream 159 | for _ in for_stream(): 160 | pass 161 | return self.last_response 162 | 163 | return for_stream() if stream else for_non_stream() 164 | 165 | def chat( 166 | self, 167 | prompt: str, 168 | stream: bool = False, 169 | optimizer: str = None, 170 | conversationally: bool = False, 171 | ) -> str: 172 | """Generate response `str` 173 | Args: 174 | prompt (str): Prompt to be send. 175 | stream (bool, optional): Flag for streaming response. Defaults to False. 176 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None. 177 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False. 178 | Returns: 179 | str: Response generated 180 | """ 181 | 182 | def for_stream(): 183 | for response in self.ask( 184 | prompt, True, optimizer=optimizer, conversationally=conversationally 185 | ): 186 | yield self.get_message(response) 187 | 188 | def for_non_stream(): 189 | return self.get_message( 190 | self.ask( 191 | prompt, 192 | False, 193 | optimizer=optimizer, 194 | conversationally=conversationally, 195 | ) 196 | ) 197 | 198 | return for_stream() if stream else for_non_stream() 199 | 200 | def get_message(self, response: dict) -> str: 201 | """Retrieves message only from response 202 | 203 | Args: 204 | response (dict): Response generated by `self.ask` 205 | 206 | Returns: 207 | str: Message extracted 208 | """ 209 | assert isinstance(response, dict), "Response should be of dict data-type only" 210 | return response["text"] 211 | -------------------------------------------------------------------------------- /src/pytgpt/providers.py: -------------------------------------------------------------------------------- 1 | from pytgpt.phind import PHIND 2 | from pytgpt.koboldai import KOBOLDAI 3 | from pytgpt.ai4chat import AI4CHAT 4 | from pytgpt.gpt4free import GPT4FREE 5 | from pytgpt.auto import AUTO 6 | 7 | tgpt_mapper: dict[str, object] = { 8 | "phind": PHIND, 9 | "koboldai": KOBOLDAI, 10 | "ai4chat": AI4CHAT, 11 | "auto": AUTO, 12 | "gpt4free": GPT4FREE, 13 | } 14 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Simatwa/python-tgpt/6a49c156d99eed6c5f7b3045823b60087a870d97/tests/__init__.py -------------------------------------------------------------------------------- /tests/base.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import types 3 | 4 | prompt = "This is a test prompt" 5 | 6 | 7 | class llmBase(unittest.TestCase): 8 | def setUp(self): 9 | """Override this""" 10 | self.bot = None 11 | self.prompt = prompt 12 | 13 | def test_ask_non_stream(self): 14 | """Ask non-stream""" 15 | resp = self.bot.ask(self.prompt) 16 | self.assertIsInstance(resp, dict) 17 | 18 | def test_ask_stream(self): 19 | """Ask stream""" 20 | resp = self.bot.ask(self.prompt, stream=True) 21 | self.assertIsInstance(resp, types.GeneratorType) 22 | for value in resp: 23 | self.assertIsInstance(value, dict) 24 | 25 | def test_ask_stream_raw(self): 26 | """Ask stream raw""" 27 | resp = self.bot.ask(self.prompt, True, True) 28 | self.assertIsInstance(resp, types.GeneratorType) 29 | 30 | for count, value in enumerate(resp): 31 | self.assertIsInstance(value, str) 32 | 33 | def test_get_message(self): 34 | """Response retrieval""" 35 | resp = self.bot.ask(self.prompt) 36 | self.assertIsInstance(self.bot.get_message(resp), str) 37 | 38 | def test_chat_non_stream(self): 39 | """Chat non-stream""" 40 | resp = self.bot.chat(self.prompt) 41 | self.assertIs(type(resp), str, f"{resp} is not str") 42 | 43 | def test_chat_stream(self): 44 | """Chat stream""" 45 | resp = self.bot.chat(self.prompt, stream=True) 46 | self.assertIsInstance(resp, types.GeneratorType) 47 | for value in resp: 48 | self.assertIsInstance(value, str) 49 | 50 | def test_optimizer_usage(self): 51 | """Code optimization""" 52 | resp = self.bot.chat(self.prompt, optimizer="code") 53 | self.assertIsInstance(resp, str) 54 | 55 | def test_last_response(self): 56 | """Last response availability""" 57 | self.bot.chat(self.prompt) 58 | self.assertIsInstance(self.bot.last_response, dict) 59 | 60 | 61 | class AsyncProviderBase(unittest.IsolatedAsyncioTestCase): 62 | 63 | def setUp(self): 64 | """Override this""" 65 | self.bot = None 66 | self.prompt = prompt 67 | 68 | async def test_ask_non_stream(self): 69 | """Aync ask non-stream""" 70 | resp = await self.bot.ask(self.prompt) 71 | self.assertIsInstance(resp, dict) 72 | 73 | async def test_ask_stream(self): 74 | """Async ask stream""" 75 | resp = await self.bot.ask(self.prompt, stream=True) 76 | self.assertIsInstance(resp, types.AsyncGeneratorType) 77 | async for value in resp: 78 | self.assertIsInstance(value, dict) 79 | 80 | async def test_ask_stream_raw(self): 81 | """Async ask stream raw""" 82 | resp = await self.bot.ask(self.prompt, True, True) 83 | self.assertIsInstance(resp, types.AsyncGeneratorType) 84 | 85 | async for value in resp: 86 | self.assertIsInstance(value, str) 87 | 88 | async def test_get_message(self): 89 | """Async response retrieval""" 90 | resp = await self.bot.ask(self.prompt) 91 | self.assertIsInstance(await self.bot.get_message(resp), str) 92 | 93 | async def test_chat_non_stream(self): 94 | """Aysnc chat non-stream""" 95 | resp = await self.bot.chat(self.prompt) 96 | self.assertIs(type(resp), str, f"{resp} is not str") 97 | 98 | async def test_chat_stream(self): 99 | """Async chat stream""" 100 | resp = await self.bot.chat(self.prompt, stream=True) 101 | self.assertIsInstance(resp, types.AsyncGeneratorType) 102 | async for value in resp: 103 | self.assertIsInstance(value, str) 104 | 105 | async def test_optimizer_usage(self): 106 | """Async code optimization""" 107 | resp = await self.bot.chat(self.prompt, optimizer="code") 108 | self.assertIsInstance(resp, str) 109 | 110 | async def test_last_response(self): 111 | """Async last response availability""" 112 | await self.bot.chat(self.prompt) 113 | self.assertIsInstance(self.bot.last_response, dict) 114 | 115 | 116 | if __name__ == "__main__": 117 | unittest.main() 118 | -------------------------------------------------------------------------------- /tests/test_ai4chat_tgpt.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import tests.base as base 3 | from pytgpt.ai4chat import AI4CHAT 4 | from pytgpt.ai4chat import AsyncAI4CHAT 5 | 6 | 7 | class TestPhind(base.llmBase): 8 | def setUp(self): 9 | self.bot = AI4CHAT() 10 | self.prompt = base.prompt 11 | 12 | 13 | class TestAsyncPhind(base.AsyncProviderBase): 14 | 15 | def setUp(self): 16 | self.bot = AsyncAI4CHAT() 17 | self.prompt = base.prompt 18 | 19 | 20 | if __name__ == "__main__": 21 | unittest.main() 22 | -------------------------------------------------------------------------------- /tests/test_api.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | from fastapi.testclient import TestClient 4 | from fastapi import status 5 | from pytgpt.api import app 6 | 7 | 8 | class TestV1(unittest.TestCase): 9 | 10 | def setUp(self): 11 | self.client = TestClient(app, headers={"accept": "application/json"}) 12 | 13 | def test_home_redirect_to_docs(self): 14 | resp = self.client.get("/") 15 | self.assertEqual(resp.status_code, status.HTTP_200_OK) 16 | 17 | def test_server_status(self): 18 | """Server is running""" 19 | resp = self.client.get("/status") 20 | self.assertTrue(resp.json().get("is_alive")) 21 | 22 | def test_chat_providers(self): 23 | """Check supported providers""" 24 | resp = self.client.get("/v1/chat/providers") 25 | self.assertEqual(len(resp.json()), 2) 26 | 27 | def test_text_no_stream(self): 28 | """Non-streaming response""" 29 | resp = self.client.post( 30 | "/v1/chat/nostream", 31 | json={ 32 | "prompt": "Hello there", 33 | "provider": "phind", 34 | "whole": False, 35 | "max_tokens": 600, 36 | "timeout": 30, 37 | "proxy": None, 38 | }, 39 | ) 40 | self.assertIn("text", resp.json()) 41 | 42 | def test_text_no_tream_whole(self): 43 | """Raw body returned""" 44 | resp = self.client.post( 45 | "/v1/chat/nostream", 46 | json={ 47 | "prompt": "Hello there", 48 | "provider": "phind", 49 | "whole": True, 50 | "max_tokens": 600, 51 | "timeout": 30, 52 | "proxy": None, 53 | }, 54 | ) 55 | resp_dict = resp.json() 56 | self.assertIsNotNone(resp_dict["text"]) 57 | self.assertIsInstance(resp_dict["body"], dict) 58 | 59 | def test_text_stream(self): 60 | """Streaming response""" 61 | resp = self.client.post( 62 | "/v1/chat/stream", 63 | json={ 64 | "prompt": "Hello there", 65 | "provider": "phind", 66 | "whole": False, 67 | "max_tokens": 600, 68 | "timeout": 30, 69 | "proxy": None, 70 | }, 71 | ) 72 | self.assertTrue(resp.is_success) 73 | 74 | @unittest.skipUnless( 75 | os.getenv("PYTGPT_TEST_MEDIA", "") == "true", 76 | "PYTGPT_TEST_MEDIA environment variable is not set to 'true' ", 77 | ) 78 | def test_prompt_to_image_post(self): 79 | resp = self.client.post( 80 | "/v1/image", 81 | json={ 82 | "prompt": "Developed Nairobi in 3050", 83 | "amount": 2, 84 | "proxy": None, 85 | "timeout": 30, 86 | }, 87 | ) 88 | resp_dict = resp.json() 89 | self.assertIsNotNone(resp_dict.get("urls")) 90 | self.assertEqual(len(resp_dict["urls"]), 2) 91 | 92 | @unittest.skipUnless( 93 | os.getenv("PYTGPT_TEST_MEDIA", "") == "true", 94 | "PYTGPT_TEST_MEDIA environment variable is not set to 'true' ", 95 | ) 96 | def test_prompt_to_image_bytes_post(self): 97 | resp = self.client.post( 98 | "/v1/image/bytes", json={"prompt": "Jay Z performing", "timeout": 30} 99 | ) 100 | self.assertIsNotNone(resp.headers.get("Content-Disposition")) 101 | 102 | @unittest.skipUnless( 103 | os.getenv("PYTGPT_TEST_MEDIA", "") == "true", 104 | "PYTGPT_TEST_MEDIA environment variable is not set to 'true' ", 105 | ) 106 | def test_prompt_to_image_bytes_get(self): 107 | resp = self.client.get( 108 | "/v1/image/bytes", params={"prompt": "Jay Z performing", "timeout": 30} 109 | ) 110 | self.assertIsNotNone(resp.headers.get("Content-Disposition")) 111 | 112 | @unittest.skipUnless( 113 | os.getenv("PYTGPT_TEST_MEDIA", "") == "true", 114 | "PYTGPT_TEST_MEDIA environment variable is not set to 'true' ", 115 | ) 116 | def test_prompt_to_image_bytes_get_redirect(self): 117 | resp = self.client.get( 118 | "/v1/image/bytes", 119 | params={ 120 | "prompt": "Jay Z performing", 121 | }, 122 | ) 123 | self.assertEqual(resp.status_code, status.HTTP_200_OK) 124 | 125 | 126 | if __name__ == "__main__": 127 | unittest.main() 128 | -------------------------------------------------------------------------------- /tests/test_auto_tgpt.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import tests.base as base 3 | from pytgpt.auto import AUTO 4 | from pytgpt.auto import AsyncAUTO 5 | 6 | 7 | class TestAuto(base.llmBase): 8 | def setUp(self): 9 | self.bot = AUTO() 10 | self.prompt = base.prompt 11 | 12 | 13 | class TestAsyncAuto(base.AsyncProviderBase): 14 | 15 | def setUp(self): 16 | self.bot = AsyncAUTO() 17 | self.prompt = base.prompt 18 | 19 | 20 | if __name__ == "__main__": 21 | unittest.main() 22 | -------------------------------------------------------------------------------- /tests/test_deepseek.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import tests.base as base 3 | from os import getenv 4 | from pytgpt.deepseek import DEEPSEEK 5 | from pytgpt.deepseek import AsyncDEEPSEEK 6 | 7 | API_KEY = getenv("DEEPSEEK_API_KEY") 8 | 9 | 10 | class TestDeepseek(base.llmBase): 11 | def setUp(self): 12 | self.bot = DEEPSEEK(API_KEY) 13 | self.prompt = base.prompt 14 | 15 | 16 | class TestAsyncDeepSeek(base.AsyncProviderBase): 17 | 18 | def setUp(self): 19 | self.bot = AsyncDEEPSEEK(API_KEY) 20 | self.prompt = base.prompt 21 | 22 | 23 | if __name__ == "__main__": 24 | unittest.main() 25 | -------------------------------------------------------------------------------- /tests/test_gpt4all.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import tests.base as base 3 | from pytgpt.gpt4all import GPT4ALL 4 | from os import getenv 5 | 6 | 7 | class TestGpt4all(base.llmBase): 8 | def setUp(self): 9 | self.bot = GPT4ALL(getenv("model_path")) 10 | self.prompt = base.prompt 11 | 12 | 13 | if __name__ == "__main__": 14 | unittest.main() 15 | -------------------------------------------------------------------------------- /tests/test_gpt4free.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import tests.base as base 3 | from pytgpt.gpt4free import GPT4FREE 4 | from pytgpt.gpt4free import AsyncGPT4FREE 5 | 6 | 7 | class TestGpt4free(base.llmBase): 8 | def setUp(self): 9 | self.bot = GPT4FREE() 10 | self.prompt = base.prompt 11 | 12 | 13 | class TestAsyncGpt4free(base.AsyncProviderBase): 14 | 15 | def setUp(self): 16 | self.bot = AsyncGPT4FREE() 17 | self.prompt = base.prompt 18 | 19 | 20 | if __name__ == "__main__": 21 | unittest.main() 22 | -------------------------------------------------------------------------------- /tests/test_groq.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import tests.base as base 3 | from os import getenv 4 | from pytgpt.groq import GROQ 5 | from pytgpt.groq import AsyncGROQ 6 | 7 | 8 | class TestGroq(base.llmBase): 9 | def setUp(self): 10 | self.bot = GROQ(getenv("GROQ_API_KEY")) 11 | self.prompt = base.prompt 12 | 13 | 14 | class TestAsyncGroq(base.AsyncProviderBase): 15 | 16 | def setUp(self): 17 | self.bot = AsyncGROQ(getenv("GROQ_API_KEY")) 18 | self.prompt = base.prompt 19 | 20 | 21 | if __name__ == "__main__": 22 | unittest.main() 23 | -------------------------------------------------------------------------------- /tests/test_imager_tgpt.py: -------------------------------------------------------------------------------- 1 | import pytgpt.imager as imager 2 | from typing import List 3 | from typing import Generator 4 | import unittest 5 | import os 6 | 7 | 8 | class TestImager(unittest.TestCase): 9 | 10 | def setUp(self): 11 | self.imager = imager.Imager() 12 | self.prompt: str = "hello world" 13 | 14 | @unittest.skipUnless( 15 | os.getenv("PYTGPT_TEST_MEDIA", "") == "true", 16 | "PYTGPT_TEST_MEDIA environment variable is not set to 'true' ", 17 | ) 18 | def test_non_stream(self): 19 | """Test one photo generation""" 20 | img = self.imager.generate(self.prompt) 21 | self.assertIsInstance(img, List), "Image not generated" 22 | self.assertIsInstance(img[0], bytes), "Image not generated" 23 | 24 | @unittest.skipUnless( 25 | os.getenv("PYTGPT_TEST_MEDIA", "") == "true", 26 | "PYTGPT_TEST_MEDIA environment variable is not set to 'true' ", 27 | ) 28 | def test_stream(self): 29 | """Test multiple photo generation""" 30 | generator = self.imager.generate(self.prompt, amount=2, stream=True) 31 | self.assertIsInstance(generator, Generator) 32 | 33 | 34 | class TestProdia(TestImager): 35 | def setUp(self): 36 | self.imager = imager.Prodia() 37 | self.prompt: str = "hello world" 38 | 39 | 40 | # Tests for AsyncImager and AsyncProdia to be implemented 41 | if __name__ == "__main__": 42 | unittest.main() 43 | -------------------------------------------------------------------------------- /tests/test_koboldai.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import tests.base as base 3 | from pytgpt.koboldai import KOBOLDAI 4 | from pytgpt.koboldai import AsyncKOBOLDAI 5 | 6 | 7 | class TestKoboldai(base.llmBase): 8 | def setUp(self): 9 | self.bot = KOBOLDAI() 10 | self.prompt = base.prompt 11 | 12 | 13 | class TestAsyncKoboldai(base.AsyncProviderBase): 14 | 15 | def setUp(self): 16 | self.bot = AsyncKOBOLDAI() 17 | self.prompt = base.prompt 18 | 19 | 20 | if __name__ == "__main__": 21 | unittest.main() 22 | -------------------------------------------------------------------------------- /tests/test_novita.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import tests.base as base 3 | from os import getenv 4 | from pytgpt.novita import NOVITA 5 | from pytgpt.novita import AsyncNOVITA 6 | 7 | API_KEY = getenv("NOVITA_API_KEY") 8 | 9 | 10 | class TestOpenai(base.llmBase): 11 | def setUp(self): 12 | self.bot = NOVITA(API_KEY) 13 | self.prompt = base.prompt 14 | 15 | 16 | class TestAsyncOpenai(base.AsyncProviderBase): 17 | 18 | def setUp(self): 19 | self.bot = AsyncNOVITA(API_KEY) 20 | self.prompt = base.prompt 21 | 22 | 23 | if __name__ == "__main__": 24 | unittest.main() 25 | -------------------------------------------------------------------------------- /tests/test_openai.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import tests.base as base 3 | from os import getenv 4 | from pytgpt.openai import OPENAI 5 | from pytgpt.openai import AsyncOPENAI 6 | 7 | API_KEY = getenv("OPENAI_API_KEY") 8 | 9 | 10 | class TestOpenai(base.llmBase): 11 | def setUp(self): 12 | self.bot = OPENAI(API_KEY) 13 | self.prompt = base.prompt 14 | 15 | 16 | class TestAsyncOpenai(base.AsyncProviderBase): 17 | 18 | def setUp(self): 19 | self.bot = AsyncOPENAI(API_KEY) 20 | self.prompt = base.prompt 21 | 22 | 23 | if __name__ == "__main__": 24 | unittest.main() 25 | -------------------------------------------------------------------------------- /tests/test_perplexity.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import tests.base as base 3 | from pytgpt.perplexity import PERPLEXITY 4 | 5 | 6 | class TestPerplexity(base.llmBase): 7 | def setUp(self): 8 | self.bot = PERPLEXITY() 9 | self.prompt = base.prompt 10 | 11 | 12 | if __name__ == "__main__": 13 | unittest.main() 14 | -------------------------------------------------------------------------------- /tests/test_phind_tgpt.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import tests.base as base 3 | from pytgpt.phind import PHIND 4 | from pytgpt.phind import AsyncPHIND 5 | 6 | 7 | class TestPhind(base.llmBase): 8 | def setUp(self): 9 | self.bot = PHIND() 10 | self.prompt = base.prompt 11 | 12 | 13 | class TestAsyncPhind(base.AsyncProviderBase): 14 | 15 | def setUp(self): 16 | self.bot = AsyncPHIND() 17 | self.prompt = base.prompt 18 | 19 | 20 | if __name__ == "__main__": 21 | unittest.main() 22 | -------------------------------------------------------------------------------- /tests/test_poe.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import tests.base as base 3 | from pytgpt.poe import POE 4 | 5 | 6 | class TestPoe(base.llmBase): 7 | def setUp(self): 8 | self.bot = POE() 9 | self.prompt = base.prompt 10 | 11 | 12 | if __name__ == "__main__": 13 | unittest.main() 14 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | from pytgpt.utils import Optimizers 4 | from pytgpt.utils import Conversation 5 | from pytgpt.utils import AwesomePrompts 6 | from pytgpt.utils import Audio 7 | from pytgpt.utils import suggest_query 8 | 9 | 10 | class TestOptimizers(unittest.TestCase): 11 | def setUp(self): 12 | self.optimizer = Optimizers 13 | self.prompt = "This is a prompt example" 14 | 15 | def test_code_optimization(self): 16 | """Test code prompt optimization""" 17 | prompt = self.optimizer.code(self.prompt) 18 | 19 | self.assertIn(self.prompt, prompt) 20 | 21 | def test_shell_command_optimization(self): 22 | """Test shell-command prompt optimization""" 23 | 24 | prompt = self.optimizer.shell_command(self.prompt) 25 | self.assertIn(self.prompt, prompt) 26 | 27 | 28 | class TestConversation(unittest.TestCase): 29 | def setUp(self): 30 | self.intro = "This is a test intro prompt" 31 | Conversation.intro = self.intro 32 | self.conversation = Conversation() 33 | self.user_prompt = "Hello there" 34 | self.llm_response = "Hello how may I help you?" 35 | self.filepath = "test-path.txt" 36 | 37 | def get_file_content(self) -> str: 38 | if not os.path.exists(self.filepath): 39 | return "" 40 | with open(self.filepath) as fh: 41 | return fh.read() 42 | 43 | def test_intro_in_prompt(self): 44 | """Test intro presence in chat_history""" 45 | self.assertIn( 46 | self.intro, self.conversation.gen_complete_prompt(self.user_prompt) 47 | ) 48 | 49 | def test_history_based_prompt_generation(self): 50 | """Test combination of chat_history and new user prompt""" 51 | new_prompt = self.conversation.gen_complete_prompt(self.user_prompt) 52 | self.assertIn(self.user_prompt, new_prompt) 53 | 54 | def test_update_chat_history(self): 55 | """Test success of chat history updation""" 56 | self.conversation.update_chat_history(self.user_prompt, self.llm_response) 57 | 58 | def test_saving_chat_history_in_file(self): 59 | """Test saving chat history in .txt file""" 60 | self.conversation = Conversation(filepath=self.filepath) 61 | self.assertTrue(os.path.exists(self.filepath)) 62 | before_history_text = self.get_file_content() 63 | self.conversation.update_chat_history(self.user_prompt, self.llm_response) 64 | after_history_text = self.get_file_content() 65 | self.assertNotEqual(before_history_text, after_history_text) 66 | 67 | def test_chat_history_offset(self): 68 | """Test truncating lengthy chats""" 69 | maximum_chat_length = 400 70 | range_amount = ( 71 | int(maximum_chat_length / (len(self.user_prompt) + len(self.llm_response))) 72 | + 10 73 | ) 74 | self.conversation.history_offset = maximum_chat_length 75 | for _ in range(range_amount): 76 | self.conversation.update_chat_history(self.user_prompt, self.llm_response) 77 | incomplete_conversation = self.conversation.gen_complete_prompt( 78 | self.user_prompt 79 | ) 80 | self.assertTrue(len(incomplete_conversation) < maximum_chat_length) 81 | 82 | def tearDown(self): 83 | if os.path.exists(self.filepath): 84 | os.remove(self.filepath) 85 | 86 | 87 | class TestAwesomePrompt(unittest.TestCase): 88 | def setUp(self): 89 | self.awesome = AwesomePrompts() 90 | self.intro = "This is an intro example" 91 | self.key = "Excel Sheet" 92 | 93 | def test_all_acts_presence(self): 94 | """Tests all-acts availability""" 95 | self.assertIsInstance(self.awesome.all_acts, dict) 96 | 97 | def test_get_act_by_string(self): 98 | """Tests accessibility of act by str key""" 99 | self.assertIsNotNone(self.awesome.get_act(self.key, default=None)) 100 | 101 | def test_get_act_by_int(self): 102 | """Test accessibility of act by int""" 103 | self.assertIsNotNone(self.awesome.get_act(1, default=None)) 104 | 105 | def test_get_act_by_string_case_insensitive(self): 106 | """Test awesome search case-insensitively""" 107 | self.assertIsNotNone( 108 | self.awesome.get_act(self.key.lower(), case_insensitive=True) 109 | ) 110 | 111 | # @unittest.expectedFailure 112 | def test_get_act_by_string_case_sensitive(self): 113 | """Test awesome search case-sensitively""" 114 | self.assertIsNotNone( 115 | self.awesome.get_act(self.key, default=None, case_insensitive=False) 116 | ) 117 | 118 | def test_get_act_by_string_unavailable_key(self): 119 | """Test get_act raises KeyError""" 120 | with self.assertRaises(KeyError): 121 | self.awesome.get_act( 122 | "Som ranDom sTrInG here", default=None, raise_not_found=True 123 | ) 124 | 125 | 126 | class TestAudio(unittest.TestCase): 127 | """Text to speech synthesis""" 128 | 129 | def setUp(self): 130 | self.audio_generator = Audio() 131 | self.text = "This is a speech synthesis test" 132 | 133 | @unittest.skipUnless( 134 | os.getenv("PYTGPT_TEST_MEDIA", "") == "true", 135 | "PYTGPT_TEST_MEDIA environment variable is not set to 'true' ", 136 | ) 137 | def test_text_to_audio(self): 138 | """Speech synthesis""" 139 | voice_bytes = self.audio_generator.text_to_audio( 140 | self.text, 141 | ) 142 | self.assertIs(type(voice_bytes), bytes) 143 | 144 | @unittest.skipUnless( 145 | os.getenv("PYTGPT_TEST_MEDIA", "") == "true", 146 | "PYTGPT_TEST_MEDIA environment variable is not set to 'true' ", 147 | ) 148 | def test_text_to_audio_save_to(self): 149 | """Save speech to a file""" 150 | saved_to = self.audio_generator.text_to_audio(self.text, auto=True) 151 | self.assertIsInstance(saved_to, str) 152 | self.assertTrue(os.path.exists(saved_to)) 153 | os.remove(saved_to) 154 | 155 | 156 | class TestOthers(unittest.TestCase): 157 | 158 | def setUp(self): 159 | pass 160 | 161 | def test_query_suggestions(self): 162 | suggestions = suggest_query("hello", die_silently=True) 163 | self.assertIsInstance(suggestions, list) 164 | 165 | 166 | if __name__ == "__main__": 167 | unittest.main() 168 | -------------------------------------------------------------------------------- /tests/test_webchatgpt.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import tests.base as base 3 | from os import getenv 4 | from pytgpt.webchatgpt import WEBCHATGPT 5 | 6 | 7 | class TestWebchatgpt(base.llmBase): 8 | def setUp(self): 9 | self.bot = WEBCHATGPT(getenv("openai_cookie_file")) 10 | self.prompt = base.prompt 11 | 12 | 13 | if __name__ == "__main__": 14 | unittest.main() 15 | --------------------------------------------------------------------------------