├── t.py
├── __pycache__
└── t.cpython-311.pyc
├── ai4free
├── __pycache__
│ ├── Phind.cpython-311.pyc
│ ├── groq.cpython-311.pyc
│ ├── leo.cpython-311.pyc
│ ├── reka.cpython-311.pyc
│ ├── yep.cpython-311.pyc
│ ├── you.cpython-311.pyc
│ ├── Cohere.cpython-311.pyc
│ ├── OpenGPT.cpython-311.pyc
│ ├── Openai.cpython-311.pyc
│ ├── BasedGPT.cpython-311.pyc
│ ├── Berlin4h.cpython-311.pyc
│ ├── Blackbox.cpython-311.pyc
│ ├── ChatGPTUK.cpython-311.pyc
│ ├── Deepinfra.cpython-311.pyc
│ ├── KOBOLDAI.cpython-311.pyc
│ ├── ThinkAnyAI.cpython-311.pyc
│ ├── __init__.cpython-311.pyc
│ ├── perplexity.cpython-311.pyc
│ └── Deepinfra_txt.cpython-311.pyc
├── __init__.py
├── you.py
├── Cohere.py
├── perplexity.py
├── reka.py
├── BasedGPT.py
├── Berlin4h.py
├── ChatGPTUK.py
├── ThinkAnyAI.py
├── KOBOLDAI.py
├── Blackbox.py
├── OpenGPT.py
├── Deepinfra.py
├── Phind.py
└── yep.py
├── WhatsApp Image 2024-05-19 at 19.01.01_47251a0f.jpg
├── .github
└── ISSUE_TEMPLATE
│ ├── feature_request.md
│ └── bug_report.md
├── setup.py
├── LICENSE.md
└── README.md
/t.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/__pycache__/t.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Devs-Do-Code/ai4free/HEAD/__pycache__/t.cpython-311.pyc
--------------------------------------------------------------------------------
/ai4free/__pycache__/Phind.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Devs-Do-Code/ai4free/HEAD/ai4free/__pycache__/Phind.cpython-311.pyc
--------------------------------------------------------------------------------
/ai4free/__pycache__/groq.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Devs-Do-Code/ai4free/HEAD/ai4free/__pycache__/groq.cpython-311.pyc
--------------------------------------------------------------------------------
/ai4free/__pycache__/leo.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Devs-Do-Code/ai4free/HEAD/ai4free/__pycache__/leo.cpython-311.pyc
--------------------------------------------------------------------------------
/ai4free/__pycache__/reka.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Devs-Do-Code/ai4free/HEAD/ai4free/__pycache__/reka.cpython-311.pyc
--------------------------------------------------------------------------------
/ai4free/__pycache__/yep.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Devs-Do-Code/ai4free/HEAD/ai4free/__pycache__/yep.cpython-311.pyc
--------------------------------------------------------------------------------
/ai4free/__pycache__/you.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Devs-Do-Code/ai4free/HEAD/ai4free/__pycache__/you.cpython-311.pyc
--------------------------------------------------------------------------------
/ai4free/__pycache__/Cohere.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Devs-Do-Code/ai4free/HEAD/ai4free/__pycache__/Cohere.cpython-311.pyc
--------------------------------------------------------------------------------
/ai4free/__pycache__/OpenGPT.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Devs-Do-Code/ai4free/HEAD/ai4free/__pycache__/OpenGPT.cpython-311.pyc
--------------------------------------------------------------------------------
/ai4free/__pycache__/Openai.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Devs-Do-Code/ai4free/HEAD/ai4free/__pycache__/Openai.cpython-311.pyc
--------------------------------------------------------------------------------
/ai4free/__pycache__/BasedGPT.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Devs-Do-Code/ai4free/HEAD/ai4free/__pycache__/BasedGPT.cpython-311.pyc
--------------------------------------------------------------------------------
/ai4free/__pycache__/Berlin4h.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Devs-Do-Code/ai4free/HEAD/ai4free/__pycache__/Berlin4h.cpython-311.pyc
--------------------------------------------------------------------------------
/ai4free/__pycache__/Blackbox.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Devs-Do-Code/ai4free/HEAD/ai4free/__pycache__/Blackbox.cpython-311.pyc
--------------------------------------------------------------------------------
/ai4free/__pycache__/ChatGPTUK.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Devs-Do-Code/ai4free/HEAD/ai4free/__pycache__/ChatGPTUK.cpython-311.pyc
--------------------------------------------------------------------------------
/ai4free/__pycache__/Deepinfra.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Devs-Do-Code/ai4free/HEAD/ai4free/__pycache__/Deepinfra.cpython-311.pyc
--------------------------------------------------------------------------------
/ai4free/__pycache__/KOBOLDAI.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Devs-Do-Code/ai4free/HEAD/ai4free/__pycache__/KOBOLDAI.cpython-311.pyc
--------------------------------------------------------------------------------
/ai4free/__pycache__/ThinkAnyAI.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Devs-Do-Code/ai4free/HEAD/ai4free/__pycache__/ThinkAnyAI.cpython-311.pyc
--------------------------------------------------------------------------------
/ai4free/__pycache__/__init__.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Devs-Do-Code/ai4free/HEAD/ai4free/__pycache__/__init__.cpython-311.pyc
--------------------------------------------------------------------------------
/ai4free/__pycache__/perplexity.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Devs-Do-Code/ai4free/HEAD/ai4free/__pycache__/perplexity.cpython-311.pyc
--------------------------------------------------------------------------------
/WhatsApp Image 2024-05-19 at 19.01.01_47251a0f.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Devs-Do-Code/ai4free/HEAD/WhatsApp Image 2024-05-19 at 19.01.01_47251a0f.jpg
--------------------------------------------------------------------------------
/ai4free/__pycache__/Deepinfra_txt.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Devs-Do-Code/ai4free/HEAD/ai4free/__pycache__/Deepinfra_txt.cpython-311.pyc
--------------------------------------------------------------------------------
/ai4free/__init__.py:
--------------------------------------------------------------------------------
1 | from .Cohere import *
2 | from .reka import *
3 | from .groq import *
4 | from .leo import *
5 | from .KOBOLDAI import *
6 | from .Openai import *
7 | from .OpenGPT import *
8 | from .Blackbox import *
9 | from .Phind import *
10 | from .yep import *
11 | from .you import *
12 | from .perplexity import *
13 | from .ThinkAnyAI import *
14 | from .Berlin4h import *
15 | from .ChatGPTUK import *
16 | from .BasedGPT import *
17 | from .Deepinfra import *
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: "[NEW-Feature]"
5 | labels: enhancement
6 | assignees: SreejanPersonal
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: "[BUG]"
5 | labels: bug
6 | assignees: OE-LUCIFER
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 | 1. Go to '...'
16 | 2. Click on '....'
17 | 3. Scroll down to '....'
18 | 4. See error
19 |
20 | **Expected behavior**
21 | A clear and concise description of what you expected to happen.
22 |
23 | **Screenshots**
24 | If applicable, add screenshots to help explain your problem.
25 |
26 | **Desktop (please complete the following information):**
27 | - OS: [e.g. iOS]
28 | - Browser [e.g. chrome, safari]
29 | - Version [e.g. 22]
30 |
31 | **Additional context**
32 | Add any other context about the problem here.
33 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | with open("README.md", encoding="utf-8") as f:
4 | README = f.read()
5 |
6 | setup(
7 | name="ai4free",
8 | version="0.7",
9 | description="collection of free AI provides",
10 | long_description=README,
11 | long_description_content_type="text/markdown",
12 | author="OEvortex",
13 | author_email="helpingai5@gmail.com", # Specify the email for the first author
14 | packages=find_packages(),
15 | classifiers=[
16 | 'Development Status :: 5 - Production/Stable',
17 | 'Intended Audience :: Developers',
18 | 'License :: Other/Proprietary License',
19 | 'Operating System :: OS Independent',
20 | 'Programming Language :: Python :: 3',
21 | 'Programming Language :: Python :: 3.8',
22 | 'Programming Language :: Python :: 3.9',
23 | 'Programming Language :: Python :: 3.10',
24 | 'Programming Language :: Python :: 3.11',
25 | 'Programming Language :: Python :: 3.12',
26 | 'Programming Language :: Python :: Implementation :: CPython',
27 | 'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
28 | 'Topic :: Software Development :: Libraries :: Python Modules',
29 | ],
30 | install_requires=[
31 | "tls_client",
32 | "webscout",
33 | ],
34 | license='HelpingAI Simplified Universal License',
35 | project_urls={
36 | 'Source': 'https://github.com/Devs-Do-Code/ai4free',
37 | 'Tracker': 'https://github.com/Devs-Do-Code/ai4free/issues',
38 | 'YouTube': 'https://youtube.com/@OEvortex',
39 | 'Youtube': 'https://www.youtube.com/@DevsDoCode'
40 | },
41 | )
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | ****************************************
2 | # Ai4Free Simplified Universal License
3 | ****************************************
4 |
5 | Version 1.0
6 |
7 | ### Introduction
8 |
9 | This Ai4Free Simplified Universal License (HSUL) governs Ai4Free's content, including computer programs, scripts, datasets, documents, images, audio recordings, videos, and other digital assets. The HSUL provides simple, universal terms for accessing, modifying, and sharing resources while embracing ethical development practices.
10 |
11 | ### Grant of Rights
12 |
13 | Under the HSUL, Ai4Free authorizes you to copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Content, subject to the terms and conditions detailed in this document.
14 |
15 | ### Conditions
16 |
17 | To qualify for the rights granted in section 1, you must strictly adhere to the following conditions:
18 |
19 | 2.1. **Redistributions of Source Code.**
20 | If you redistribute the Source Code, you must include the entire HSUL with your distribution. Furthermore, you must add prominent notifications in all affected files stating:
21 |
22 | > "This Work is released under the Ai4Free Simplified Universal License v1.0."
23 |
24 | 2.2. **Binary Form Redistributions.**
25 | If you distribute Binaries generated from the Source Code, you must ensure the inclusion of the following statement in your distribution:
26 |
27 | > "This Work is based upon the Ai4Free Simplified Universally Licensed Work, under the Ai4Free Simplified Universal License v1.0."
28 |
29 | 2.3. **Notification of Changes.**
30 | Clearly indicate any alterations you introduce to the Source Code or Documentation via prominent comments detailing the nature and scope of the change(s). Reference the date and originator of the modifications.
31 |
32 | 2.4. **Branding Attribution.**
33 | Do not remove or alter any Ai4Free branding, logos, or notices included in the Content without explicit prior consent from Ai4Free.
34 |
35 | 2.5. **Exclusion of Warranty.**
36 | The Content is delivered "AS IS," bereft of any implicit guarantee, including — though not constrained to — warranties pertaining to marketability, applicability for a particular purpose, and non-infringement.
37 |
38 | 2.6. **Limitation of Liability.**
39 | To the maximum extent allowed by law, neither Ai4Free nor any contributor shall bear responsibility for any loss, personal injury, property damage, indirect, special, incidental, or consequential damages stemming from or relating to the Content or its employment.
40 |
41 | 2.7. **Governing Law.**
42 | This HSUL shall be managed and construed according to the laws of the jurisdiction where Ai4Free primarily operates.
43 |
44 | ### Definitions
45 |
46 | 3.1. **"Source Code"** signifies the preferred form for editing the Content, typically represented by human-readable programming languages, scripts, or documentation formats.
47 |
48 | 3.2. **"Binaries"** denote compiled forms of the Source Code, executables, libraries, or similar artifacts built from the Source Code.
49 |
50 | By leveraging this Content, you confirm your approval of the HSUL and pledge to honor its terms and conditions. If you disagree with the HSUL's rules, refrain from engaging with the Content.
51 |
52 | Copyright (c) 2023 Devs Do Code (Sree). All rights reserved
53 |
--------------------------------------------------------------------------------
/ai4free/you.py:
--------------------------------------------------------------------------------
1 | import time
2 | import uuid
3 | import click
4 | import requests
5 | from requests import get
6 | from uuid import uuid4
7 | from re import findall
8 | from requests.exceptions import RequestException
9 | from curl_cffi.requests import get, RequestsError
10 | import g4f
11 | from random import randint
12 | from PIL import Image
13 | import io
14 | import re
15 | import json
16 | import yaml
17 | from webscout.AIutel import Optimizers
18 | from webscout.AIutel import Conversation
19 | from webscout.AIutel import AwesomePrompts, sanitize_stream
20 | from webscout.AIbase import Provider, AsyncProvider
21 | from webscout import exceptions
22 | from typing import Any, AsyncGenerator
23 | import logging
24 | import httpx
25 | class YouChat(Provider):
26 | def __init__(
27 | self,
28 | is_conversation: bool = True,
29 | max_tokens: int = 600,
30 | timeout: int = 30,
31 | intro: str = None,
32 | filepath: str = None,
33 | update_file: bool = True,
34 | proxies: dict = {},
35 | history_offset: int = 10250,
36 | act: str = None,
37 | ):
38 | self.session = requests.Session()
39 | self.is_conversation = is_conversation
40 | self.max_tokens_to_sample = max_tokens
41 | self.chat_endpoint = "https://you.com/api/streamingSearch"
42 | self.stream_chunk_size = 64
43 | self.timeout = timeout
44 | self.last_response = {}
45 |
46 | self.payload = {
47 | "q": "",
48 | "page": 1,
49 | "count": 10,
50 | "safeSearch": "Off",
51 | "onShoppingPage": False,
52 | "mkt": "",
53 | "responseFilter": "WebPages,Translations,TimeZone,Computation,RelatedSearches",
54 | "domain": "youchat",
55 | "queryTraceId": uuid.uuid4(),
56 | "conversationTurnId": uuid.uuid4(),
57 | "pastChatLength": 0,
58 | "selectedChatMode": "default",
59 | "chat": "[]",
60 | }
61 |
62 | self.headers = {
63 | "cache-control": "no-cache",
64 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
65 | 'Referer': f'https://you.com/search?q={self.payload["q"]}&fromSearchBar=true&tbm=youchat&chatMode=default'
66 | }
67 |
68 | self.__available_optimizers = (
69 | method
70 | for method in dir(Optimizers)
71 | if callable(getattr(Optimizers, method)) and not method.startswith("__")
72 | )
73 | self.session.headers.update(self.headers)
74 | Conversation.intro = (
75 | AwesomePrompts().get_act(
76 | act, raise_not_found=True, default=None, case_insensitive=True
77 | )
78 | if act
79 | else intro or Conversation.intro
80 | )
81 | self.conversation = Conversation(
82 | is_conversation, self.max_tokens_to_sample, filepath, update_file
83 | )
84 | self.conversation.history_offset = history_offset
85 | self.session.proxies = proxies
86 |
87 | def ask(
88 | self,
89 | prompt: str,
90 | stream: bool = False,
91 | raw: bool = False,
92 | optimizer: str = None,
93 | conversationally: bool = False,
94 | ) -> dict:
95 | conversation_prompt = self.conversation.gen_complete_prompt(prompt)
96 | if optimizer:
97 | if optimizer in self.__available_optimizers:
98 | conversation_prompt = getattr(Optimizers, optimizer)(
99 | conversation_prompt if conversationally else prompt
100 | )
101 | else:
102 | raise Exception(
103 | f"Optimizer is not one of {self.__available_optimizers}"
104 | )
105 | self.session.headers.update(self.headers)
106 | self.session.headers.update(
107 | dict(
108 | cookie=f"safesearch_guest=Off; uuid_guest={str(uuid4())}",
109 | )
110 | )
111 | self.payload["q"] = prompt
112 |
113 | def for_stream():
114 | response = self.session.get(
115 | self.chat_endpoint,
116 | params=self.payload,
117 | stream=True,
118 | timeout=self.timeout,
119 | )
120 |
121 | if not response.ok:
122 | raise exceptions.FailedToGenerateResponseError(
123 | f"Failed to generate response - ({response.status_code}, {response.reason})"
124 | )
125 |
126 | streaming_response = ""
127 | for line in response.iter_lines(decode_unicode=True, chunk_size=64):
128 | if line:
129 | modified_value = re.sub("data:", "", line)
130 | try:
131 | json_modified_value = json.loads(modified_value)
132 | if "youChatToken" in json_modified_value:
133 | streaming_response += json_modified_value["youChatToken"]
134 | if print:
135 | print(json_modified_value["youChatToken"], end="")
136 | except:
137 | continue
138 | self.last_response.update(dict(text=streaming_response))
139 | self.conversation.update_chat_history(
140 | prompt, self.get_message(self.last_response)
141 | )
142 | return streaming_response
143 |
144 | def for_non_stream():
145 | for _ in for_stream():
146 | pass
147 | return self.last_response
148 |
149 | return for_stream() if stream else for_non_stream()
150 |
151 | def chat(
152 | self,
153 | prompt: str,
154 | stream: bool = False,
155 | optimizer: str = None,
156 | conversationally: bool = False,
157 | ) -> str:
158 | """Generate response `str`
159 | Args:
160 | prompt (str): Prompt to be send.
161 | stream (bool, optional): Flag for streaming response. Defaults to False.
162 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
163 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
164 | Returns:
165 | str: Response generated
166 | """
167 |
168 | def chat(
169 | self,
170 | prompt: str,
171 | stream: bool = False,
172 | optimizer: str = None,
173 | conversationally: bool = False,
174 | ) -> str:
175 | """Generate response `str`
176 | Args:
177 | prompt (str): Prompt to be send.
178 | stream (bool, optional): Flag for streaming response. Defaults to False.
179 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
180 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
181 | Returns:
182 | str: Response generated
183 | """
184 |
185 | def for_stream():
186 | for response in self.ask(
187 | prompt, True, optimizer=optimizer, conversationally=conversationally
188 | ):
189 | yield self.get_message(response)
190 |
191 | def for_non_stream():
192 | return self.get_message(
193 | self.ask(
194 | prompt,
195 | False,
196 | optimizer=optimizer,
197 | conversationally=conversationally,
198 | )
199 | )
200 |
201 | return for_stream() if stream else for_non_stream()
202 |
203 | def get_message(self, response: dict) -> str:
204 | """Retrieves message only from response
205 |
206 | Args:
207 | response (dict): Response generated by `self.ask`
208 |
209 | Returns:
210 | str: Message extracted
211 | """
212 | assert isinstance(response, dict), "Response should be of dict data-type only"
213 | return response["text"]
--------------------------------------------------------------------------------
/ai4free/Cohere.py:
--------------------------------------------------------------------------------
1 | import time
2 | import uuid
3 | import requests
4 | from requests import get
5 | from uuid import uuid4
6 | from re import findall
7 | from requests.exceptions import RequestException
8 | from curl_cffi.requests import get, RequestsError
9 | from random import randint
10 | import json
11 | import yaml
12 | from webscout.AIutel import Optimizers
13 | from webscout.AIutel import Conversation
14 | from webscout.AIutel import AwesomePrompts, sanitize_stream
15 | from webscout.AIbase import Provider, AsyncProvider
16 | from Helpingai_T2 import Perplexity
17 | from webscout import exceptions
18 | from typing import Any, AsyncGenerator
19 | class Cohere(Provider):
20 | def __init__(
21 | self,
22 | api_key: str,
23 | is_conversation: bool = True,
24 | max_tokens: int = 600,
25 | model: str = "command-r-plus",
26 | temperature: float = 0.7,
27 | system_prompt: str = "You are helpful AI",
28 | timeout: int = 30,
29 | intro: str = None,
30 | filepath: str = None,
31 | update_file: bool = True,
32 | proxies: dict = {},
33 | history_offset: int = 10250,
34 | act: str = None,
35 | top_k: int = -1,
36 | top_p: float = 0.999,
37 | ):
38 | """Initializes Cohere
39 |
40 | Args:
41 | api_key (str): Cohere API key.
42 | is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
43 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
44 | model (str, optional): Model to use for generating text. Defaults to "command-r-plus".
45 | temperature (float, optional): Diversity of the generated text. Higher values produce more diverse outputs.
46 | Defaults to 0.7.
47 | system_prompt (str, optional): A system_prompt or context to set the style or tone of the generated text.
48 | Defaults to "You are helpful AI".
49 | timeout (int, optional): Http request timeout. Defaults to 30.
50 | intro (str, optional): Conversation introductory prompt. Defaults to None.
51 | filepath (str, optional): Path to file containing conversation history. Defaults to None.
52 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
53 | proxies (dict, optional): Http request proxies. Defaults to {}.
54 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
55 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
56 | """
57 | self.session = requests.Session()
58 | self.is_conversation = is_conversation
59 | self.max_tokens_to_sample = max_tokens
60 | self.api_key = api_key
61 | self.model = model
62 | self.temperature = temperature
63 | self.system_prompt = system_prompt
64 | self.chat_endpoint = "https://production.api.os.cohere.ai/coral/v1/chat"
65 | self.stream_chunk_size = 64
66 | self.timeout = timeout
67 | self.last_response = {}
68 | self.headers = {
69 | "Content-Type": "application/json",
70 | "Authorization": f"Bearer {self.api_key}",
71 | }
72 |
73 | self.__available_optimizers = (
74 | method
75 | for method in dir(Optimizers)
76 | if callable(getattr(Optimizers, method)) and not method.startswith("__")
77 | )
78 | self.session.headers.update(self.headers)
79 | Conversation.intro = (
80 | AwesomePrompts().get_act(
81 | act, raise_not_found=True, default=None, case_insensitive=True
82 | )
83 | if act
84 | else intro or Conversation.intro
85 | )
86 | self.conversation = Conversation(
87 | is_conversation, self.max_tokens_to_sample, filepath, update_file
88 | )
89 | self.conversation.history_offset = history_offset
90 | self.session.proxies = proxies
91 |
92 | def ask(
93 | self,
94 | prompt: str,
95 | stream: bool = False,
96 | raw: bool = False,
97 | optimizer: str = None,
98 | conversationally: bool = False,
99 | ) -> dict:
100 | """Chat with AI
101 |
102 | Args:
103 | prompt (str): Prompt to be send.
104 | stream (bool, optional): Flag for streaming response. Defaults to False.
105 | raw (bool, optional): Stream back raw response as received. Defaults to False.
106 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
107 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
108 | Returns:
109 | dict : {}
110 | ```json
111 | {
112 | "text" : "How may I assist you today?"
113 | }
114 | ```
115 | """
116 | conversation_prompt = self.conversation.gen_complete_prompt(prompt)
117 | if optimizer:
118 | if optimizer in self.__available_optimizers:
119 | conversation_prompt = getattr(Optimizers, optimizer)(
120 | conversation_prompt if conversationally else prompt
121 | )
122 | else:
123 | raise Exception(
124 | f"Optimizer is not one of {self.__available_optimizers}"
125 | )
126 | self.session.headers.update(self.headers)
127 | payload = {
128 | "message": conversation_prompt,
129 | "model": self.model,
130 | "temperature": self.temperature,
131 | "preamble": self.system_prompt,
132 | }
133 |
134 | def for_stream():
135 | response = self.session.post(
136 | self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
137 | )
138 | if not response.ok:
139 | raise Exception(
140 | f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
141 | )
142 |
143 | for value in response.iter_lines(
144 | decode_unicode=True,
145 | chunk_size=self.stream_chunk_size,
146 | ):
147 | try:
148 | resp = json.loads(value.strip().split("\n")[-1])
149 | self.last_response.update(resp)
150 | yield value if raw else resp
151 | except json.decoder.JSONDecodeError:
152 | pass
153 | self.conversation.update_chat_history(
154 | prompt, self.get_message(self.last_response)
155 | )
156 |
157 | def for_non_stream():
158 | # let's make use of stream
159 | for _ in for_stream():
160 | pass
161 | return self.last_response
162 |
163 | return for_stream() if stream else for_non_stream()
164 |
165 | def chat(
166 | self,
167 | prompt: str,
168 | stream: bool = False,
169 | optimizer: str = None,
170 | conversationally: bool = False,
171 | ) -> str:
172 | """Generate response `str`
173 | Args:
174 | prompt (str): Prompt to be send.
175 | stream (bool, optional): Flag for streaming response. Defaults to False.
176 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
177 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
178 | Returns:
179 | str: Response generated
180 | """
181 |
182 | def for_stream():
183 | for response in self.ask(
184 | prompt, True, optimizer=optimizer, conversationally=conversationally
185 | ):
186 | yield self.get_message(response)
187 |
188 | def for_non_stream():
189 | return self.get_message(
190 | self.ask(
191 | prompt,
192 | False,
193 | optimizer=optimizer,
194 | conversationally=conversationally,
195 | )
196 | )
197 |
198 | return for_stream() if stream else for_non_stream()
199 |
200 | def get_message(self, response: dict) -> str:
201 | """Retrieves message only from response
202 |
203 | Args:
204 | response (dict): Response generated by `self.ask`
205 |
206 | Returns:
207 | str: Message extracted
208 | """
209 | assert isinstance(response, dict), "Response should be of dict data-type only"
210 | return response["result"]["chatStreamEndEvent"]["response"]["text"]
--------------------------------------------------------------------------------
/ai4free/perplexity.py:
--------------------------------------------------------------------------------
1 | import time
2 | import uuid
3 | import requests
4 | from requests import get
5 | from uuid import uuid4
6 | from re import findall
7 | from requests.exceptions import RequestException
8 | from curl_cffi.requests import get, RequestsError
9 | from random import randint
10 | import json
11 | import yaml
12 | from webscout.AIutel import Optimizers
13 | from webscout.AIutel import Conversation
14 | from webscout.AIutel import AwesomePrompts, sanitize_stream
15 | from webscout.AIbase import Provider, AsyncProvider
16 | from Helpingai_T2 import Perplexity
17 | from webscout import exceptions
18 | from typing import Any, AsyncGenerator
19 | import logging
20 | import httpx
21 | class PERPLEXITY(Provider):
22 | def __init__(
23 | self,
24 | is_conversation: bool = True,
25 | max_tokens: int = 600,
26 | timeout: int = 30,
27 | intro: str = None,
28 | filepath: str = None,
29 | update_file: bool = True,
30 | proxies: dict = {},
31 | history_offset: int = 10250,
32 | act: str = None,
33 | quiet: bool = False,
34 | ):
35 | """Instantiates PERPLEXITY
36 |
37 | Args:
38 | is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
39 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
40 | timeout (int, optional): Http request timeout. Defaults to 30.
41 | intro (str, optional): Conversation introductory prompt. Defaults to None.
42 | filepath (str, optional): Path to file containing conversation history. Defaults to None.
43 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
44 | proxies (dict, optional): Http request proxies. Defaults to {}.
45 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
46 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
47 | quiet (bool, optional): Ignore web search-results and yield final response only. Defaults to False.
48 | """
49 | self.max_tokens_to_sample = max_tokens
50 | self.is_conversation = is_conversation
51 | self.last_response = {}
52 | self.web_results: dict = {}
53 | self.quiet = quiet
54 |
55 | self.__available_optimizers = (
56 | method
57 | for method in dir(Optimizers)
58 | if callable(getattr(Optimizers, method)) and not method.startswith("__")
59 | )
60 | Conversation.intro = (
61 | AwesomePrompts().get_act(
62 | act, raise_not_found=True, default=None, case_insensitive=True
63 | )
64 | if act
65 | else intro or Conversation.intro
66 | )
67 | self.conversation = Conversation(
68 | is_conversation, self.max_tokens_to_sample, filepath, update_file
69 | )
70 | self.conversation.history_offset = history_offset
71 |
72 | def ask(
73 | self,
74 | prompt: str,
75 | stream: bool = False,
76 | raw: bool = False,
77 | optimizer: str = None,
78 | conversationally: bool = False,
79 | ) -> dict:
80 | """Chat with AI
81 |
82 | Args:
83 | prompt (str): Prompt to be send.
84 | stream (bool, optional): Flag for streaming response. Defaults to False.
85 | raw (bool, optional): Stream back raw response as received. Defaults to False.
86 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
87 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
88 | Returns:
89 | dict : {}
90 | ```json
91 | {
92 | "status": "pending",
93 | "uuid": "3604dfcc-611f-4b7d-989d-edca2a7233c7",
94 | "read_write_token": null,
95 | "frontend_context_uuid": "f6d43119-5231-481d-b692-f52e1f52d2c6",
96 | "final": false,
97 | "backend_uuid": "a6d6ec9e-da69-4841-af74-0de0409267a8",
98 | "media_items": [],
99 | "widget_data": [],
100 | "knowledge_cards": [],
101 | "expect_search_results": "false",
102 | "mode": "concise",
103 | "search_focus": "internet",
104 | "gpt4": false,
105 | "display_model": "turbo",
106 | "attachments": null,
107 | "answer": "",
108 | "web_results": [],
109 | "chunks": [],
110 | "extra_web_results": []
111 | }
112 | ```
113 | """
114 | conversation_prompt = self.conversation.gen_complete_prompt(prompt)
115 | if optimizer:
116 | if optimizer in self.__available_optimizers:
117 | conversation_prompt = getattr(Optimizers, optimizer)(
118 | conversation_prompt if conversationally else prompt
119 | )
120 | else:
121 | raise Exception(
122 | f"Optimizer is not one of {self.__available_optimizers}"
123 | )
124 |
125 | def for_stream():
126 | for response in Perplexity().generate_answer(conversation_prompt):
127 | yield json.dumps(response) if raw else response
128 | self.last_response.update(response)
129 |
130 | self.conversation.update_chat_history(
131 | prompt,
132 | self.get_message(self.last_response),
133 | )
134 |
135 | def for_non_stream():
136 | for _ in for_stream():
137 | pass
138 | return self.last_response
139 |
140 | return for_stream() if stream else for_non_stream()
141 |
142 | def chat(
143 | self,
144 | prompt: str,
145 | stream: bool = False,
146 | optimizer: str = None,
147 | conversationally: bool = False,
148 | ) -> str:
149 | """Generate response `str`
150 | Args:
151 | prompt (str): Prompt to be send.
152 | stream (bool, optional): Flag for streaming response. Defaults to False.
153 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
154 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
155 | Returns:
156 | str: Response generated
157 | """
158 |
159 | def for_stream():
160 | for response in self.ask(
161 | prompt, True, optimizer=optimizer, conversationally=conversationally
162 | ):
163 | yield self.get_message(response)
164 |
165 | def for_non_stream():
166 | return self.get_message(
167 | self.ask(
168 | prompt,
169 | False,
170 | optimizer=optimizer,
171 | conversationally=conversationally,
172 | )
173 | )
174 |
175 | return for_stream() if stream else for_non_stream()
176 |
177 | def get_message(self, response: dict) -> str:
178 | """Retrieves message only from response
179 |
180 | Args:
181 | response (dict): Response generated by `self.ask`
182 |
183 | Returns:
184 | str: Message extracted
185 | """
186 | assert isinstance(response, dict), "Response should be of dict data-type only"
187 | text_str: str = response.get("answer", "")
188 |
189 | def update_web_results(web_results: list) -> None:
190 | for index, results in enumerate(web_results, start=1):
191 | self.web_results[str(index) + ". " + results["name"]] = dict(
192 | url=results.get("url"), snippet=results.get("snippet")
193 | )
194 |
195 | if response.get("text"):
196 | # last chunk
197 | target: dict[str, Any] = json.loads(response.get("text"))
198 | text_str = target.get("answer")
199 | web_results: list[dict] = target.get("web_results")
200 | self.web_results.clear()
201 | update_web_results(web_results)
202 |
203 | return (
204 | text_str
205 | if self.quiet or not self.web_results
206 | else text_str + "\n\n# WEB-RESULTS\n\n" + yaml.dump(self.web_results)
207 | )
208 |
209 | else:
210 | if str(response.get("expect_search_results")).lower() == "true":
211 | return (
212 | text_str
213 | if self.quiet
214 | else text_str
215 | + "\n\n# WEB-RESULTS\n\n"
216 | + yaml.dump(response.get("web_results"))
217 | )
218 | else:
219 | return text_str
--------------------------------------------------------------------------------
/ai4free/reka.py:
--------------------------------------------------------------------------------
1 | import time
2 | import uuid
3 | import requests
4 | from requests import get
5 | from uuid import uuid4
6 | from re import findall
7 | from requests.exceptions import RequestException
8 | from curl_cffi.requests import get, RequestsError
9 | from random import randint
10 | import json
11 | import yaml
12 | from webscout.AIutel import Optimizers
13 | from webscout.AIutel import Conversation
14 | from webscout.AIutel import AwesomePrompts, sanitize_stream
15 | from webscout.AIbase import Provider, AsyncProvider
16 | from Helpingai_T2 import Perplexity
17 | from webscout import exceptions
18 | from typing import Any, AsyncGenerator
19 | class REKA(Provider):
20 | def __init__(
21 | self,
22 | api_key: str,
23 | is_conversation: bool = True,
24 | max_tokens: int = 600,
25 | timeout: int = 30,
26 | intro: str = None,
27 | filepath: str = None,
28 | update_file: bool = True,
29 | proxies: dict = {},
30 | history_offset: int = 10250,
31 | act: str = None,
32 | model: str = "reka-core",
33 | system_prompt: str = "Be Helpful and Friendly. Keep your response straightforward, short and concise",
34 | use_search_engine: bool = False,
35 | use_code_interpreter: bool = False,
36 | ):
37 | """Instantiates REKA
38 |
39 | Args:
40 | is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
41 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
42 | timeout (int, optional): Http request timeout. Defaults to 30.
43 | intro (str, optional): Conversation introductory prompt. Defaults to None.
44 | filepath (str, optional): Path to file containing conversation history. Defaults to None.
45 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
46 | proxies (dict, optional): Http request proxies. Defaults to {}.
47 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
48 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
49 | model (str, optional): REKA model name. Defaults to "reka-core".
50 | system_prompt (str, optional): System prompt for REKA. Defaults to "Be Helpful and Friendly. Keep your response straightforward, short and concise".
51 | use_search_engine (bool, optional): Whether to use the search engine. Defaults to False.
52 | use_code_interpreter (bool, optional): Whether to use the code interpreter. Defaults to False.
53 | """
54 | self.session = requests.Session()
55 | self.is_conversation = is_conversation
56 | self.max_tokens_to_sample = max_tokens
57 | self.api_endpoint = "https://chat.reka.ai/api/chat"
58 | self.stream_chunk_size = 64
59 | self.timeout = timeout
60 | self.last_response = {}
61 | self.model = model
62 | self.system_prompt = system_prompt
63 | self.use_search_engine = use_search_engine
64 | self.use_code_interpreter = use_code_interpreter
65 | self.access_token = api_key
66 | self.headers = {
67 | "Authorization": f"Bearer {self.access_token}",
68 | }
69 |
70 | self.__available_optimizers = (
71 | method
72 | for method in dir(Optimizers)
73 | if callable(getattr(Optimizers, method)) and not method.startswith("__")
74 | )
75 | self.session.headers.update(self.headers)
76 | Conversation.intro = (
77 | AwesomePrompts().get_act(
78 | act, raise_not_found=True, default=None, case_insensitive=True
79 | )
80 | if act
81 | else intro or Conversation.intro
82 | )
83 | self.conversation = Conversation(
84 | is_conversation, self.max_tokens_to_sample, filepath, update_file
85 | )
86 | self.conversation.history_offset = history_offset
87 | self.session.proxies = proxies
88 |
89 | def ask(
90 | self,
91 | prompt: str,
92 | stream: bool = False,
93 | raw: bool = False,
94 | optimizer: str = None,
95 | conversationally: bool = False,
96 | ) -> dict:
97 | """Chat with AI
98 |
99 | Args:
100 | prompt (str): Prompt to be send.
101 | stream (bool, optional): Flag for streaming response. Defaults to False.
102 | raw (bool, optional): Stream back raw response as received. Defaults to False.
103 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
104 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
105 | Returns:
106 | dict : {}
107 | ```json
108 | {
109 | "text" : "How may I assist you today?"
110 | }
111 | ```
112 | """
113 | conversation_prompt = self.conversation.gen_complete_prompt(prompt)
114 | if optimizer:
115 | if optimizer in self.__available_optimizers:
116 | conversation_prompt = getattr(Optimizers, optimizer)(
117 | conversation_prompt if conversationally else prompt
118 | )
119 | else:
120 | raise Exception(
121 | f"Optimizer is not one of {self.__available_optimizers}"
122 | )
123 |
124 | self.session.headers.update(self.headers)
125 | payload = {
126 |
127 | "conversation_history": [
128 | {"type": "human", "text": f"## SYSTEM PROMPT: {self.system_prompt}\n\n## QUERY: {conversation_prompt}"},
129 | ],
130 |
131 | "stream": stream,
132 | "use_search_engine": self.use_search_engine,
133 | "use_code_interpreter": self.use_code_interpreter,
134 | "model_name": self.model,
135 | # "model_name": "reka-flash",
136 | # "model_name": "reka-edge",
137 | }
138 |
139 | def for_stream():
140 | response = self.session.post(self.api_endpoint, json=payload, stream=True, timeout=self.timeout)
141 | if not response.ok:
142 | raise Exception(
143 | f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
144 | )
145 |
146 | for value in response.iter_lines(
147 | decode_unicode=True,
148 | chunk_size=self.stream_chunk_size,
149 | ):
150 | try:
151 | resp = json.loads(value)
152 | self.last_response.update(resp)
153 | yield value if raw else resp
154 | except json.decoder.JSONDecodeError:
155 | pass
156 | self.conversation.update_chat_history(
157 | prompt, self.get_message(self.last_response)
158 | )
159 |
160 | def for_non_stream():
161 | # let's make use of stream
162 | for _ in for_stream():
163 | pass
164 | return self.last_response
165 |
166 | return for_stream() if stream else for_non_stream()
167 |
168 | def chat(
169 | self,
170 | prompt: str,
171 | stream: bool = False,
172 | optimizer: str = None,
173 | conversationally: bool = False,
174 | ) -> str:
175 | """Generate response `str`
176 | Args:
177 | prompt (str): Prompt to be send.
178 | stream (bool, optional): Flag for streaming response. Defaults to False.
179 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
180 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
181 | Returns:
182 | str: Response generated
183 | """
184 |
185 | def for_stream():
186 | for response in self.ask(
187 | prompt, True, optimizer=optimizer, conversationally=conversationally
188 | ):
189 | yield self.get_message(response)
190 |
191 | def for_non_stream():
192 | return self.get_message(
193 | self.ask(
194 | prompt,
195 | False,
196 | optimizer=optimizer,
197 | conversationally=conversationally,
198 | )
199 | )
200 |
201 | return for_stream() if stream else for_non_stream()
202 |
203 | def get_message(self, response: dict) -> str:
204 | """Retrieves message only from response
205 |
206 | Args:
207 | response (dict): Response generated by `self.ask`
208 |
209 | Returns:
210 | str: Message extracted
211 | """
212 | assert isinstance(response, dict), "Response should be of dict data-type only"
213 | return response.get("text")
--------------------------------------------------------------------------------
/ai4free/BasedGPT.py:
--------------------------------------------------------------------------------
1 | import time
2 | import uuid
3 | from selenium import webdriver
4 | from selenium.webdriver.chrome.options import Options
5 | from selenium.webdriver.common.by import By
6 | from selenium.webdriver.support import expected_conditions as EC
7 | from selenium.webdriver.support.ui import WebDriverWait
8 | import click
9 | import requests
10 | from requests import get
11 | from uuid import uuid4
12 | from re import findall
13 | from requests.exceptions import RequestException
14 | from curl_cffi.requests import get, RequestsError
15 | import g4f
16 | from random import randint
17 | from PIL import Image
18 | import io
19 | import re
20 | import json
21 | import yaml
22 | from webscout.AIutel import Optimizers
23 | from webscout.AIutel import Conversation
24 | from webscout.AIutel import AwesomePrompts, sanitize_stream
25 | from webscout.AIbase import Provider, AsyncProvider
26 | from webscout import exceptions
27 | from typing import Any, AsyncGenerator, Dict
28 | import logging
29 | import httpx
30 |
31 | class BasedGPT(Provider):
32 | def __init__(
33 | self,
34 | is_conversation: bool = True,
35 | max_tokens: int = 600,
36 | timeout: int = 30,
37 | intro: str = None,
38 | filepath: str = None,
39 | update_file: bool = True,
40 | proxies: dict = {},
41 | history_offset: int = 10250,
42 | act: str = None,
43 | system_prompt: str = "Be Helpful and Friendly",
44 | ):
45 | """Instantiates BasedGPT
46 |
47 | Args:
48 | is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
49 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
50 | timeout (int, optional): Http request timeout. Defaults to 30.
51 | intro (str, optional): Conversation introductory prompt. Defaults to None.
52 | filepath (str, optional): Path to file containing conversation history. Defaults to None.
53 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
54 | proxies (dict, optional): Http request proxies. Defaults to {}.
55 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
56 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
57 | system_prompt (str, optional): System prompt for BasedGPT. Defaults to "Be Helpful and Friendly".
58 | """
59 | self.session = requests.Session()
60 | self.is_conversation = is_conversation
61 | self.max_tokens_to_sample = max_tokens
62 | self.chat_endpoint = "https://www.basedgpt.chat/api/chat"
63 | self.stream_chunk_size = 64
64 | self.timeout = timeout
65 | self.last_response = {}
66 | self.system_prompt = system_prompt
67 |
68 | self.__available_optimizers = (
69 | method
70 | for method in dir(Optimizers)
71 | if callable(getattr(Optimizers, method)) and not method.startswith("__")
72 | )
73 | self.session.headers.update(
74 | {"Content-Type": "application/json"}
75 | )
76 | Conversation.intro = (
77 | AwesomePrompts().get_act(
78 | act, raise_not_found=True, default=None, case_insensitive=True
79 | )
80 | if act
81 | else intro or Conversation.intro
82 | )
83 | self.conversation = Conversation(
84 | is_conversation, self.max_tokens_to_sample, filepath, update_file
85 | )
86 | self.conversation.history_offset = history_offset
87 | self.session.proxies = proxies
88 |
89 | def ask(
90 | self,
91 | prompt: str,
92 | stream: bool = False,
93 | raw: bool = False,
94 | optimizer: str = None,
95 | conversationally: bool = False,
96 | ) -> dict:
97 | """Chat with AI
98 |
99 | Args:
100 | prompt (str): Prompt to be send.
101 | stream (bool, optional): Flag for streaming response. Defaults to False.
102 | raw (bool, optional): Stream back raw response as received. Defaults to False.
103 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
104 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
105 | Returns:
106 | dict : {}
107 | ```json
108 | {
109 | "id": "chatcmpl-TaREJpBZsRVQFRFic1wIA7Q7XfnaD",
110 | "object": "chat.completion",
111 | "created": 1704623244,
112 | "model": "gpt-3.5-turbo",
113 | "usage": {
114 | "prompt_tokens": 0,
115 | "completion_tokens": 0,
116 | "total_tokens": 0
117 | },
118 | "choices": [
119 | {
120 | "message": {
121 | "role": "assistant",
122 | "content": "Hello! How can I assist you today?"
123 | },
124 | "finish_reason": "stop",
125 | "index": 0
126 | }
127 | ]
128 | }
129 | ```
130 | """
131 | conversation_prompt = self.conversation.gen_complete_prompt(prompt)
132 | if optimizer:
133 | if optimizer in self.__available_optimizers:
134 | conversation_prompt = getattr(Optimizers, optimizer)(
135 | conversation_prompt if conversationally else prompt
136 | )
137 | else:
138 | raise Exception(
139 | f"Optimizer is not one of {self.__available_optimizers}"
140 | )
141 |
142 | payload = {
143 | "messages": [
144 | {"role": "system", "content": self.system_prompt},
145 | {"role": "user", "content": conversation_prompt},
146 | ],
147 | }
148 |
149 | def for_stream():
150 | response = self.session.post(
151 | self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
152 | )
153 | if not response.ok:
154 | raise exceptions.FailedToGenerateResponseError(
155 | f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
156 | )
157 |
158 | message_load = ""
159 | for value in response.iter_lines(
160 | decode_unicode=True,
161 | delimiter="",
162 | chunk_size=self.stream_chunk_size,
163 | ):
164 | try:
165 | message_load += value
166 | yield value if raw else dict(text=message_load)
167 | except json.decoder.JSONDecodeError:
168 | pass
169 | self.last_response.update(dict(text=message_load))
170 | self.conversation.update_chat_history(
171 | prompt, self.get_message(self.last_response)
172 | )
173 |
174 | def for_non_stream():
175 | for _ in for_stream():
176 | pass
177 | return self.last_response
178 |
179 | return for_stream() if stream else for_non_stream()
180 |
181 | def chat(
182 | self,
183 | prompt: str,
184 | stream: bool = False,
185 | optimizer: str = None,
186 | conversationally: bool = False,
187 | ) -> str:
188 | """Generate response `str`
189 | Args:
190 | prompt (str): Prompt to be send.
191 | stream (bool, optional): Flag for streaming response. Defaults to False.
192 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
193 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
194 | Returns:
195 | str: Response generated
196 | """
197 |
198 | def for_stream():
199 | for response in self.ask(
200 | prompt, True, optimizer=optimizer, conversationally=conversationally
201 | ):
202 | yield self.get_message(response)
203 |
204 | def for_non_stream():
205 | return self.get_message(
206 | self.ask(
207 | prompt,
208 | False,
209 | optimizer=optimizer,
210 | conversationally=conversationally,
211 | )
212 | )
213 |
214 | return for_stream() if stream else for_non_stream()
215 |
216 | def get_message(self, response: dict) -> str:
217 | """Retrieves message only from response
218 |
219 | Args:
220 | response (dict): Response generated by `self.ask`
221 |
222 | Returns:
223 | str: Message extracted
224 | """
225 | assert isinstance(response, dict), "Response should be of dict data-type only"
226 | return response["text"]
--------------------------------------------------------------------------------
/ai4free/Berlin4h.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import json
3 | import uuid
4 | from typing import Any, Dict, Optional
5 | from webscout.AIutel import Optimizers
6 | from webscout.AIutel import Conversation
7 | from webscout.AIutel import AwesomePrompts, sanitize_stream
8 | from webscout.AIbase import Provider, AsyncProvider
9 | from webscout import exceptions
10 |
11 | class Berlin4h(Provider):
12 | """
13 | A class to interact with the Berlin4h AI API.
14 | """
15 |
16 | def __init__(
17 | self,
18 | api_token: str = "3bf369cd84339603f8a5361e964f9ebe",
19 | api_endpoint: str = "https://ai.berlin4h.top/api/chat/completions",
20 | model: str = "gpt-3.5-turbo",
21 | temperature: float = 0.9,
22 | presence_penalty: float = 0,
23 | frequency_penalty: float = 0,
24 | max_tokens: int = 4000,
25 | is_conversation: bool = True,
26 | timeout: int = 30,
27 | intro: str = None,
28 | filepath: str = None,
29 | update_file: bool = True,
30 | proxies: dict = {},
31 | history_offset: int = 10250,
32 | act: str = None,
33 | ) -> None:
34 | """
35 | Initializes the Berlin4h API with given parameters.
36 |
37 | Args:
38 | api_token (str): The API token for authentication.
39 | api_endpoint (str): The API endpoint to use for requests.
40 | model (str): The AI model to use for text generation.
41 | temperature (float): The temperature parameter for the model.
42 | presence_penalty (float): The presence penalty parameter for the model.
43 | frequency_penalty (float): The frequency penalty parameter for the model.
44 | max_tokens (int): The maximum number of tokens to generate.
45 | is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
46 | timeout (int, optional): Http request timeout. Defaults to 30.
47 | intro (str, optional): Conversation introductory prompt. Defaults to None.
48 | filepath (str, optional): Path to file containing conversation history. Defaults to None.
49 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
50 | proxies (dict, optional): Http request proxies. Defaults to {}.
51 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
52 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
53 | """
54 | self.api_token = api_token
55 | self.api_endpoint = api_endpoint
56 | self.model = model
57 | self.temperature = temperature
58 | self.presence_penalty = presence_penalty
59 | self.frequency_penalty = frequency_penalty
60 | self.max_tokens = max_tokens
61 | self.parent_message_id: Optional[str] = None
62 | self.session = requests.Session()
63 | self.is_conversation = is_conversation
64 | self.max_tokens_to_sample = max_tokens
65 | self.stream_chunk_size = 1
66 | self.timeout = timeout
67 | self.last_response = {}
68 | self.headers = {"Content-Type": "application/json", "Token": self.api_token}
69 | self.__available_optimizers = (
70 | method
71 | for method in dir(Optimizers)
72 | if callable(getattr(Optimizers, method)) and not method.startswith("__")
73 | )
74 | self.session.headers.update(self.headers)
75 | Conversation.intro = (
76 | AwesomePrompts().get_act(
77 | act, raise_not_found=True, default=None, case_insensitive=True
78 | )
79 | if act
80 | else intro or Conversation.intro
81 | )
82 | self.conversation = Conversation(
83 | is_conversation, self.max_tokens_to_sample, filepath, update_file
84 | )
85 | self.conversation.history_offset = history_offset
86 | self.session.proxies = proxies
87 |
88 | def ask(
89 | self,
90 | prompt: str,
91 | stream: bool = False,
92 | raw: bool = False,
93 | optimizer: str = None,
94 | conversationally: bool = False,
95 | ) -> Dict[str, Any]:
96 | """
97 | Sends a prompt to the Berlin4h AI API and returns the response.
98 |
99 | Args:
100 | prompt: The text prompt to generate text from.
101 | stream (bool, optional): Whether to stream the response. Defaults to False.
102 | raw (bool, optional): Whether to return the raw response. Defaults to False.
103 | optimizer (str, optional): The name of the optimizer to use. Defaults to None.
104 | conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
105 |
106 | Returns:
107 | The response from the API.
108 | """
109 | conversation_prompt = self.conversation.gen_complete_prompt(prompt)
110 | if optimizer:
111 | if optimizer in self.__available_optimizers:
112 | conversation_prompt = getattr(Optimizers, optimizer)(
113 | conversation_prompt if conversationally else prompt
114 | )
115 | else:
116 | raise Exception(
117 | f"Optimizer is not one of {self.__available_optimizers}"
118 | )
119 |
120 | payload: Dict[str, any] = {
121 | "prompt": conversation_prompt,
122 | "parentMessageId": self.parent_message_id or str(uuid.uuid4()),
123 | "options": {
124 | "model": self.model,
125 | "temperature": self.temperature,
126 | "presence_penalty": self.presence_penalty,
127 | "frequency_penalty": self.frequency_penalty,
128 | "max_tokens": self.max_tokens,
129 | },
130 | }
131 |
132 | def for_stream():
133 | response = self.session.post(
134 | self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
135 | )
136 |
137 | if not response.ok:
138 | raise exceptions.FailedToGenerateResponseError(
139 | f"Failed to generate response - ({response.status_code}, {response.reason})"
140 | )
141 |
142 | streaming_response = ""
143 | # Collect the entire line before processing
144 | for line in response.iter_lines(decode_unicode=True):
145 | if line:
146 | try:
147 | json_data = json.loads(line)
148 | content = json_data['content']
149 | if ">" in content: break
150 | streaming_response += content
151 | yield content if raw else dict(text=streaming_response) # Yield accumulated response
152 | except:
153 | continue
154 | self.last_response.update(dict(text=streaming_response))
155 | self.conversation.update_chat_history(
156 | prompt, self.get_message(self.last_response)
157 | )
158 |
159 | def for_non_stream():
160 | for _ in for_stream():
161 | pass
162 | return self.last_response
163 |
164 | return for_stream() if stream else for_non_stream()
165 |
166 | def chat(
167 | self,
168 | prompt: str,
169 | stream: bool = False,
170 | optimizer: str = None,
171 | conversationally: bool = False,
172 | ) -> str:
173 | """Generate response `str`
174 | Args:
175 | prompt (str): Prompt to be send.
176 | stream (bool, optional): Flag for streaming response. Defaults to False.
177 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
178 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
179 | Returns:
180 | str: Response generated
181 | """
182 |
183 | def for_stream():
184 | for response in self.ask(
185 | prompt, True, optimizer=optimizer, conversationally=conversationally
186 | ):
187 | yield self.get_message(response)
188 |
189 | def for_non_stream():
190 | return self.get_message(
191 | self.ask(
192 | prompt,
193 | False,
194 | optimizer=optimizer,
195 | conversationally=conversationally,
196 | )
197 | )
198 |
199 | return for_stream() if stream else for_non_stream()
200 |
201 | def get_message(self, response: dict) -> str:
202 | """Retrieves message only from response
203 |
204 | Args:
205 | response (dict): Response generated by `self.ask`
206 |
207 | Returns:
208 | str: Message extracted
209 | """
210 | assert isinstance(response, dict), "Response should be of dict data-type only"
211 | return response["text"]
--------------------------------------------------------------------------------
/ai4free/ChatGPTUK.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from typing import Any, AsyncGenerator, Dict, Optional
3 | import json
4 | import re
5 |
6 | from webscout.AIutel import Optimizers
7 | from webscout.AIutel import Conversation
8 | from webscout.AIutel import AwesomePrompts, sanitize_stream
9 | from webscout.AIbase import Provider, AsyncProvider
10 | from webscout import exceptions
11 |
12 |
13 | class ChatGPTUK(Provider):
14 | """
15 | A class to interact with the ChatGPT UK API.
16 | """
17 |
18 | def __init__(
19 | self,
20 | is_conversation: bool = True,
21 | max_tokens: int = 600,
22 | temperature: float = 0.9,
23 | presence_penalty: float = 0,
24 | frequency_penalty: float = 0,
25 | top_p: float = 1,
26 | model: str = "google-gemini-pro",
27 | timeout: int = 30,
28 | intro: str = None,
29 | filepath: str = None,
30 | update_file: bool = True,
31 | proxies: dict = {},
32 | history_offset: int = 10250,
33 | act: str = None,
34 | ) -> None:
35 | """
36 | Initializes the ChatGPTUK API with given parameters.
37 |
38 | Args:
39 | is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
40 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
41 | temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.9.
42 | presence_penalty (float, optional): Chances of topic being repeated. Defaults to 0.
43 | frequency_penalty (float, optional): Chances of word being repeated. Defaults to 0.
44 | top_p (float, optional): Sampling threshold during inference time. Defaults to 1.
45 | model (str, optional): LLM model name. Defaults to "google-gemini-pro".
46 | timeout (int, optional): Http request timeout. Defaults to 30.
47 | intro (str, optional): Conversation introductory prompt. Defaults to None.
48 | filepath (str, optional): Path to file containing conversation history. Defaults to None.
49 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
50 | proxies (dict, optional): Http request proxies. Defaults to {}.
51 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
52 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
53 | """
54 | self.session = requests.Session()
55 | self.is_conversation = is_conversation
56 | self.max_tokens_to_sample = max_tokens
57 | self.api_endpoint = "https://free.chatgpt.org.uk/api/openai/v1/chat/completions"
58 | self.stream_chunk_size = 64
59 | self.timeout = timeout
60 | self.last_response = {}
61 | self.model = model
62 | self.temperature = temperature
63 | self.presence_penalty = presence_penalty
64 | self.frequency_penalty = frequency_penalty
65 | self.top_p = top_p
66 | self.headers = {"Content-Type": "application/json"}
67 |
68 | self.__available_optimizers = (
69 | method
70 | for method in dir(Optimizers)
71 | if callable(getattr(Optimizers, method)) and not method.startswith("__")
72 | )
73 | self.session.headers.update(self.headers)
74 | Conversation.intro = (
75 | AwesomePrompts().get_act(
76 | act, raise_not_found=True, default=None, case_insensitive=True
77 | )
78 | if act
79 | else intro or Conversation.intro
80 | )
81 | self.conversation = Conversation(
82 | is_conversation, self.max_tokens_to_sample, filepath, update_file
83 | )
84 | self.conversation.history_offset = history_offset
85 | self.session.proxies = proxies
86 |
87 | def ask(
88 | self,
89 | prompt: str,
90 | stream: bool = False,
91 | raw: bool = False,
92 | optimizer: str = None,
93 | conversationally: bool = False,
94 | ) -> dict:
95 | """Chat with AI
96 |
97 | Args:
98 | prompt (str): Prompt to be send.
99 | stream (bool, optional): Flag for streaming response. Defaults to False.
100 | raw (bool, optional): Stream back raw response as received. Defaults to False.
101 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
102 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
103 | Returns:
104 | dict : {}
105 | ```json
106 | {
107 | "text" : "How may I assist you today?"
108 | }
109 | ```
110 | """
111 | conversation_prompt = self.conversation.gen_complete_prompt(prompt)
112 | if optimizer:
113 | if optimizer in self.__available_optimizers:
114 | conversation_prompt = getattr(Optimizers, optimizer)(
115 | conversation_prompt if conversationally else prompt
116 | )
117 | else:
118 | raise Exception(
119 | f"Optimizer is not one of {self.__available_optimizers}"
120 | )
121 |
122 | self.session.headers.update(self.headers)
123 | payload = {
124 | "messages": [
125 | {"role": "system", "content": "Keep your responses long and detailed"},
126 | {"role": "user", "content": conversation_prompt}
127 | ],
128 | "stream": True,
129 | "model": self.model,
130 | "temperature": self.temperature,
131 | "presence_penalty": self.presence_penalty,
132 | "frequency_penalty": self.frequency_penalty,
133 | "top_p": self.top_p,
134 | "max_tokens": self.max_tokens_to_sample
135 | }
136 |
137 | def for_stream():
138 | response = self.session.post(
139 | self.api_endpoint, json=payload, stream=True, timeout=self.timeout
140 | )
141 | if not response.ok:
142 | raise exceptions.FailedToGenerateResponseError(
143 | f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
144 | )
145 |
146 | streaming_response = ""
147 | for line in response.iter_lines(decode_unicode=True, chunk_size=1):
148 | if line:
149 | modified_line = re.sub("data:", "", line)
150 | try:
151 | json_data = json.loads(modified_line)
152 | content = json_data['choices'][0]['delta']['content']
153 | streaming_response += content
154 | yield content if raw else dict(text=streaming_response)
155 | except:
156 | continue
157 | self.last_response.update(dict(text=streaming_response))
158 | self.conversation.update_chat_history(
159 | prompt, self.get_message(self.last_response)
160 | )
161 |
162 | def for_non_stream():
163 | for _ in for_stream():
164 | pass
165 | return self.last_response
166 |
167 | return for_stream() if stream else for_non_stream()
168 |
169 | def chat(
170 | self,
171 | prompt: str,
172 | stream: bool = False,
173 | optimizer: str = None,
174 | conversationally: bool = False,
175 | ) -> str:
176 | """Generate response `str`
177 | Args:
178 | prompt (str): Prompt to be send.
179 | stream (bool, optional): Flag for streaming response. Defaults to False.
180 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
181 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
182 | Returns:
183 | str: Response generated
184 | """
185 |
186 | def for_stream():
187 | for response in self.ask(
188 | prompt, True, optimizer=optimizer, conversationally=conversationally
189 | ):
190 | yield self.get_message(response)
191 |
192 | def for_non_stream():
193 | return self.get_message(
194 | self.ask(
195 | prompt,
196 | False,
197 | optimizer=optimizer,
198 | conversationally=conversationally,
199 | )
200 | )
201 |
202 | return for_stream() if stream else for_non_stream()
203 |
204 | def get_message(self, response: dict) -> str:
205 | """Retrieves message only from response
206 |
207 | Args:
208 | response (dict): Response generated by `self.ask`
209 |
210 | Returns:
211 | str: Message extracted
212 | """
213 | assert isinstance(response, dict), "Response should be of dict data-type only"
214 | return response["text"]
--------------------------------------------------------------------------------
/ai4free/ThinkAnyAI.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from typing import Any, AsyncGenerator, Dict
3 |
4 | from webscout.AIutel import AwesomePrompts, Conversation, Optimizers
5 | from webscout.AIbase import Provider
6 | from webscout import exceptions
7 |
8 | from uuid import uuid4
9 |
10 |
11 | class ThinkAnyAI(Provider):
12 | """A client for interfacing with the ThinkAny AI API for conversational and search-based queries.
13 |
14 | Available Modes:
15 | - claude-3-haiku
16 | - llama-3-8b-instruct
17 | - mistral-7b-instruct
18 | - rwkv-v6
19 | - gemini-pro
20 | - gpt-3.5-turbo
21 | """
22 |
23 | def __init__(
24 | self,
25 | model: str = "claude-3-haiku",
26 | locale: str = "en",
27 | web_search: bool = False,
28 | chunk_size: int = 1,
29 | streaming: bool = True,
30 | is_conversation: bool = True,
31 | max_tokens: int = 600,
32 | timeout: int = 30,
33 | intro: str = None,
34 | filepath: str = None,
35 | update_file: bool = True,
36 | proxies: dict = {},
37 | history_offset: int = 10250,
38 | act: str = None,
39 | ):
40 | """Initializes ThinkAnyAI
41 |
42 | Args:
43 | model (str): The AI model to be used for generating responses. Defaults to "claude-3-haiku".
44 | locale (str): The language locale. Defaults to "en" (English).
45 | web_search (bool): Whether to include web search results in the response. Defaults to False.
46 | chunk_size (int): The size of data chunks when streaming responses. Defaults to 1.
47 | streaming (bool): Whether to stream response data. Defaults to True.
48 | is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
49 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
50 | timeout (int, optional): Http request timeout. Defaults to 30.
51 | intro (str, optional): Conversation introductory prompt. Defaults to None.
52 | filepath (str, optional): Path to file containing conversation history. Defaults to None.
53 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
54 | proxies (dict, optional): Http request proxies. Defaults to {}.
55 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
56 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
57 | """
58 | self.base_url = "https://thinkany.ai/api"
59 | self.model = model
60 | self.locale = locale
61 | self.web_search = web_search
62 | self.chunk_size = chunk_size
63 | self.streaming = streaming
64 | self.last_response = {}
65 | self.session = requests.Session()
66 | self.session.proxies = proxies
67 |
68 | self.__available_optimizers = (
69 | method
70 | for method in dir(Optimizers)
71 | if callable(getattr(Optimizers, method)) and not method.startswith("__")
72 | )
73 |
74 | Conversation.intro = (
75 | AwesomePrompts().get_act(
76 | act, raise_not_found=True, default=None, case_insensitive=True
77 | )
78 | if act
79 | else intro or Conversation.intro
80 | )
81 | self.conversation = Conversation(
82 | is_conversation, max_tokens, filepath, update_file
83 | )
84 | self.conversation.history_offset = history_offset
85 |
86 | def ask(
87 | self,
88 | prompt: str,
89 | stream: bool = False,
90 | raw: bool = False,
91 | optimizer: str = None,
92 | conversationally: bool = False,
93 | ) -> dict | AsyncGenerator:
94 | """Chat with AI asynchronously.
95 |
96 | Args:
97 | prompt (str): Prompt to be send.
98 | stream (bool, optional): Flag for streaming response. Defaults to False.
99 | raw (bool, optional): Stream back raw response as received. Defaults to False.
100 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defeaults to None
101 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
102 | Returns:
103 | dict : {}
104 | ```json
105 | {
106 | "content": "General Kenobi! \n\n(I couldn't help but respond with the iconic Star Wars greeting since you used it first. )\n\nIs there anything I can help you with today?\n[Image of Hello there General Kenobi]",
107 | "conversation_id": "c_f13f6217f9a997aa",
108 | "response_id": "r_d3665f95975c368f",
109 | "factualityQueries": null,
110 | "textQuery": [
111 | "hello there",
112 | 1
113 | ],
114 | "choices": [
115 | {
116 | "id": "rc_ea075c9671bfd8cb",
117 | "content": [
118 | "General Kenobi! \n\n(I couldn't help but respond with the iconic Star Wars greeting since you used it first. )\n\nIs there anything I can help you with today?\n[Image of Hello there General Kenobi]"
119 | ]
120 | },
121 | {
122 | "id": "rc_de6dd3fb793a5402",
123 | "content": [
124 | "General Kenobi! (or just a friendly hello, whichever you prefer!). \n\nI see you're a person of culture as well. *Star Wars* references are always appreciated. \n\nHow can I help you today?\n"
125 | ]
126 | },
127 | {
128 | "id": "rc_a672ac089caf32db",
129 | "content": [
130 | "General Kenobi! (or just a friendly hello if you're not a Star Wars fan!). \n\nHow can I help you today? Feel free to ask me anything, or tell me what you'd like to chat about. I'm here to assist in any way I can.\n[Image of Obi-Wan Kenobi saying hello there]"
131 | ]
132 | }
133 | ],
134 |
135 | "images": [
136 | "https://i.pinimg.com/originals/40/74/60/407460925c9e419d82b93313f0b42f71.jpg"
137 | ]
138 | }
139 |
140 | ```
141 | """
142 | conversation_prompt = self.conversation.gen_complete_prompt(prompt)
143 | if optimizer:
144 | if optimizer in self.__available_optimizers:
145 | conversation_prompt = getattr(Optimizers, optimizer)(
146 | conversation_prompt if conversationally else prompt
147 | )
148 | else:
149 | raise Exception(
150 | f"Optimizer is not one of {self.__available_optimizers}"
151 | )
152 |
153 | def initiate_conversation(query: str) -> str:
154 | """
155 | Initiates a new conversation with the ThinkAny AI API.
156 |
157 | Args:
158 | query (str): The initial query to start the conversation.
159 |
160 | Returns:
161 | str: The UUID (Unique Identifier) of the conversation.
162 | """
163 | url = f"{self.base_url}/new-conversation"
164 | payload = {
165 | "content": query,
166 | "locale": self.locale,
167 | "mode": "search" if self.web_search else "chat",
168 | "model": self.model,
169 | "source": "all",
170 | }
171 | response = self.session.post(url, json=payload)
172 | return response.json().get("data", {}).get("uuid", "DevsDoCode")
173 |
174 | def RAG_search(uuid: str) -> tuple[bool, list]:
175 | """
176 | Performs a web search using the Retrieve And Generate (RAG) model.
177 |
178 | Args:
179 | uuid (str): The UUID of the conversation.
180 |
181 | Returns:
182 | tuple: A tuple containing a boolean indicating the success of the search
183 | and a list of search result links.
184 | """
185 | if not self.web_search:
186 | return True, []
187 | url = f"{self.base_url}/rag-search"
188 | payload = {"conv_uuid": uuid}
189 | response = self.session.post(url, json=payload)
190 | links = [source["link"] for source in response.json().get("data", [])]
191 | return response.json().get("message", "").strip(), links
192 |
193 | def for_stream():
194 | conversation_uuid = initiate_conversation(conversation_prompt)
195 | web_search_result, links = RAG_search(conversation_uuid)
196 | if not web_search_result:
197 | print("Failed to generate WEB response. Making normal Query...")
198 |
199 | url = f"{self.base_url}/chat"
200 | payload = {
201 | "role": "user",
202 | "content": prompt,
203 | "conv_uuid": conversation_uuid,
204 | "model": self.model,
205 | }
206 | response = self.session.post(url, json=payload, stream=True)
207 | complete_content = ""
208 | for content in response.iter_content(
209 | decode_unicode=True, chunk_size=self.chunk_size
210 | ):
211 | complete_content += content
212 | yield content if raw else dict(text=complete_content)
213 | self.last_response.update(dict(text=complete_content, links=links))
214 | self.conversation.update_chat_history(
215 | prompt, self.get_message(self.last_response)
216 | )
217 |
218 | def for_non_stream():
219 | for _ in for_stream():
220 | pass
221 | return self.last_response
222 |
223 | return for_stream() if stream else for_non_stream()
224 |
225 | def chat(
226 | self,
227 | prompt: str,
228 | stream: bool = False,
229 | optimizer: str = None,
230 | conversationally: bool = False,
231 | ) -> str:
232 | """Generate response `str`
233 | Args:
234 | prompt (str): Prompt to be send.
235 | stream (bool, optional): Flag for streaming response. Defaults to False.
236 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
237 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
238 | Returns:
239 | str: Response generated
240 | """
241 |
242 | def for_stream():
243 | for response in self.ask(
244 | prompt, True, optimizer=optimizer, conversationally=conversationally
245 | ):
246 | yield self.get_message(response)
247 |
248 | def for_non_stream():
249 | return self.get_message(
250 | self.ask(
251 | prompt,
252 | False,
253 | optimizer=optimizer,
254 | conversationally=conversationally,
255 | )
256 | )
257 |
258 | return for_stream() if stream else for_non_stream()
259 |
260 | def get_message(self, response: Dict[str, Any]) -> str:
261 | """Retrieves message only from response
262 |
263 | Args:
264 | response (dict): Response generated by `self.ask`
265 |
266 | Returns:
267 | str: Message extracted
268 | """
269 | assert isinstance(response, dict), "Response should be of dict data-type only"
270 | return response["text"]
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
17 |
18 |
19 |
20 |

21 |

22 |

23 |

24 |

25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 | [](https://discord.gg/ehwfVtsAts)
36 | [](https://twitter.com/anand_sreejan)
37 |
39 |
40 |
41 |
42 | # AI4Free: A Python Library for Free Access to All Available Large Language Models
43 |
44 | AI4Free is a Python library that provides convenient access to a variety of large language models (LLMs) from different providers, all without requiring any API keys or fees. This allows developers and researchers to experiment with various LLMs and explore their capabilities without the barrier of cost.
45 |
46 | ## Crafted with ❤️ by Devs Do Code (Sree)
47 |
48 | > **Disclaimer:** This project is not officially associated with Any Offical APIs. It is an independent reverse engineering effort to explore the All Available APIs.
49 |
50 |
51 | ## Features
52 | - **Multiple LLM Providers:** AI4Free supports a diverse range of LLM providers including:
53 | - **Open-source LLMs:** KoboldAI, LEO (Brave AI)
54 | - **Free-tier access LLMs:** YouChat, OpenGPT, Yep
55 | - **Research/Demo access LLMs:** Phind, Blackbox
56 | - **Conversation Management:** The library helps maintain conversation history with the LLMs, enabling more natural and context-aware interactions.
57 | - **Prompt Optimization:** AI4Free includes built-in prompt optimization techniques to enhance the quality and relevance of generated responses.
58 | - **Streaming Support:** Responses can be streamed in real-time, allowing for immediate feedback and dynamic interactions.
59 | - **Asynchronous Capabilities:** Async versions of several providers are available for efficient handling of multiple requests and improved performance.
60 |
61 | ## Installation
62 | ```bash
63 | pip install -U ai4free
64 | ```
65 | **Use code with caution.**
66 |
67 | ## Usage
68 | The basic usage pattern involves creating an instance of the desired LLM provider and then using the `chat()` method to interact with the model.
69 |
70 | ## Example Usage of Available Providers (Synchronous)
71 |
72 | Here's how to use each of the available providers in AI4Free without asynchronous functions:
73 |
74 | ## LEO
75 | ```python
76 | from ai4free import LEO
77 |
78 | leo = LEO()
79 |
80 | while True:
81 | prompt = input("You: ")
82 | response = leo.chat(prompt)
83 | print(f"LEO: {response}")
84 | ```
85 |
86 |
87 | ## KoboldAI
88 | ```python
89 | from ai4free import KOBOLDAI
90 |
91 | koboldai = KOBOLDAI()
92 |
93 | while True:
94 | prompt = input("You: ")
95 | response = koboldai.chat(prompt)
96 | print(f"KoboldAI: {response}")
97 | ```
98 |
99 |
100 | ## Blackbox
101 | ```python
102 | from ai4free import BLACKBOXAI
103 |
104 | ai = BLACKBOXAI(
105 | is_conversation=True,
106 | max_tokens=800,
107 | timeout=30,
108 | intro=None,
109 | filepath=None,
110 | update_file=True,
111 | proxies={},
112 | history_offset=10250,
113 | act=None,
114 | model=None # You can specify a model if needed
115 | )
116 |
117 | # Start an infinite loop for continuous interaction
118 | while True:
119 | # Define a prompt to send to the AI
120 | prompt = input("Enter your prompt: ")
121 |
122 | # Check if the user wants to exit the loop
123 | if prompt.lower() == "exit":
124 | break
125 |
126 | # Use the 'chat' method to send the prompt and receive a response
127 | r = ai.chat(prompt)
128 | print(r)
129 | ```
130 |
131 | ### ThinkAnyAI
132 | ```python
133 | from ai4free import ThinkAnyAI
134 |
135 | opengpt = ThinkAnyAI()
136 |
137 | while True:
138 | prompt = input("Enter your prompt: ")
139 | response_str = opengpt.chat(prompt)
140 | print(response_str)
141 | ```
142 |
143 |
144 | ## Phind
145 | ```python
146 | from ai4free import PhindSearch
147 |
148 | # Create an instance of the PHIND class
149 | ph = PhindSearch()
150 |
151 | # Define a prompt to send to the AI
152 | prompt = "write a essay on phind"
153 |
154 | response = ph.chat(prompt)
155 | print(response)
156 | ```
157 |
158 |
159 | ## Yep
160 | ```python
161 | from ai4free import YEPCHAT
162 |
163 | # Instantiate the YEPCHAT class with default parameters
164 | YEPCHAT = YEPCHAT()
165 |
166 | # Define a prompt to send to the AI
167 | prompt = "What is the capital of France?"
168 |
169 | # Use the 'chat' method to get a response from the AI
170 | r = YEPCHAT.chat(prompt)
171 | print(r)
172 | ```
173 |
174 |
175 | ## YouChat
176 | ```python
177 | from ai4free import YouChat
178 |
179 | ai = YouChat(
180 | is_conversation=True,
181 | max_tokens=800,
182 | timeout=30,
183 | intro=None,
184 | filepath=None,
185 | update_file=True,
186 | proxies={},
187 | history_offset=10250,
188 | act=None,
189 | )
190 |
191 | prompt = "what is meaning of life"
192 |
193 | response = ai.ask(prompt)
194 |
195 | # Extract and print the message from the response
196 | message = ai.get_message(response)
197 | print(message)
198 | ```
199 |
200 | ## Cohere
201 | ```python
202 | from ai4free import Cohere
203 |
204 | # Replace 'YOUR_API_KEY' with your Cohere API key
205 | cohere = Cohere(api_key='YOUR_API_KEY')
206 |
207 | while True:
208 | prompt = input("You: ")
209 | response = cohere.chat(prompt)
210 | print(f"Cohere: {response}")
211 | ```
212 |
213 |
214 | ## REKA
215 | ```python
216 | from ai4free import REKA
217 |
218 | # Replace 'YOUR_API_KEY' with your REKA API key
219 | reka = REKA(api_key='YOUR_API_KEY')
220 |
221 | while True:
222 | prompt = input("You: ")
223 | response = reka.chat(prompt)
224 | print(f"REKA: {response}")
225 | ```
226 |
227 |
228 | ## GROQ
229 | ```python
230 | from ai4free import GROQ
231 |
232 | # Replace 'YOUR_API_KEY' with your GROQ API key
233 | groq = GROQ(api_key='YOUR_API_KEY')
234 |
235 | while True:
236 | prompt = input("You: ")
237 | response = groq.chat(prompt)
238 | print(f"GROQ: {response}")
239 | ```
240 | ## VLM
241 | ```python
242 | from ai4free import VLM
243 |
244 |
245 | # Initialize the VLM class
246 | vlm = VLM(model="llava-hf/llava-1.5-7b-hf", system_prompt="You are a helpful and informative AI assistant.")
247 |
248 | # Path to the image and the user message
249 | image_path = r"C:\Users\hp\Desktop\ai4free\WhatsApp Image 2024-05-19 at 19.01.01_47251a0f.jpg"
250 | user_message = "What is shown in this image?"
251 |
252 | # Encode the image to base64
253 | image_base64 = vlm.encode_image_to_base64(image_path)
254 |
255 | # Define the prompt with both image and text
256 | prompt = {
257 | "role": "user",
258 | "content": [
259 | {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image_base64}"}},
260 | {"type": "text", "text": user_message}
261 | ]
262 | }
263 |
264 | # Get the response
265 | response = vlm.ask(prompt)
266 |
267 | # Extract and print the message from the response
268 | message = vlm.get_message(response)
269 | print(message)
270 | ```
271 | ## Deepinfra
272 | ```
273 | from ai4free import DeepInfra
274 |
275 | ai = DeepInfra(
276 | model="meta-llama/Meta-Llama-3-70B-Instruct", # DeepInfra models
277 | is_conversation=True,
278 | max_tokens=800,
279 | timeout=30,
280 | intro=None,
281 | filepath=None,
282 | update_file=True,
283 | proxies={},
284 | history_offset=10250,
285 | act=None,
286 | )
287 |
288 | prompt = "what is meaning of life"
289 |
290 | response = ai.ask(prompt)
291 |
292 | # Extract and print the message from the response
293 | message = ai.get_message(response)
294 | print(message)
295 | ```
296 | ## Available Providers
297 | - **Cohere:** Provides access to various text generation models including "command-r-plus" with capabilities like summarization, copywriting, and dialogue.
298 | - **REKA:** Offers several LLM models like "reka-core", "reka-flash", and "reka-edge" for tasks such as question answering, text generation, and summarization.
299 | - **GROQ:** Grants access to models like "mixtral-8x7b-32768" with capabilities for text generation, translation, and question answering.
300 | - **LEO:** Provides access to "llama-2-13b-chat" with abilities for dialogue, text generation, and question answering.
301 | - **KoboldAI:** Offers various open-source LLM models for text generation and creative writing.
302 | - **OpenAI:** Enables interaction with OpenAI models like "gpt-3.5-turbo" for diverse tasks like text generation, translation, and code generation. Requires an API key.
303 | - **OpenGPT:** Provides access to various LLM models for text generation and creative writing.
304 | - **Blackbox:** Grants access to powerful LLMs for various tasks like text generation, translation, and question answering.
305 | - **Phind:** Offers access to advanced LLMs with research and demo capabilities for tasks like text generation, code generation, and question answering.
306 | - **Yep:** Provides access to models like "Mixtral-8x7B-Instruct-v0.1" with capabilities for text generation, translation, and question answering.
307 | - **YouChat:** Offers free-tier access to a powerful LLM with abilities for dialogue, text generation, and question answering.
308 | - **ThinkAnyAI:** Offers access to various LLM models like "claude-3-haiku", "llama-3-8b-instruct", "mistral-7b-instruct", "rwkv-v6", "gemini-pro", and "gpt-3.5-turbo" for tasks like text generation, question answering, and creative writing.
309 |
310 | ## Conclusion
311 | AI4Free opens up exciting possibilities for exploring and utilizing the power of large language models without any cost. With its easy-to-use interface and support for diverse LLM providers, the library provides a valuable tool for developers, researchers, and anyone interested in exploring the cutting-edge of AI language technology.
312 |
313 |
314 |
315 |
316 |

317 |

318 |

319 |

320 |

321 |
322 |
323 |
324 |
325 |
326 |
327 |
328 |
329 |
330 | [](https://discord.gg/ehwfVtsAts)
331 | [](https://twitter.com/anand_sreejan)
332 |
334 |
335 |
336 |
--------------------------------------------------------------------------------
/ai4free/KOBOLDAI.py:
--------------------------------------------------------------------------------
1 | import time
2 | import uuid
3 | import requests
4 | from requests import get
5 | from uuid import uuid4
6 | from re import findall
7 | from requests.exceptions import RequestException
8 | from curl_cffi.requests import get, RequestsError
9 | from random import randint
10 | import json
11 | import yaml
12 | from webscout.AIutel import Optimizers
13 | from webscout.AIutel import Conversation
14 | from webscout.AIutel import AwesomePrompts, sanitize_stream
15 | from webscout.AIbase import Provider, AsyncProvider
16 | from Helpingai_T2 import Perplexity
17 | from webscout import exceptions
18 | from typing import Any, AsyncGenerator
19 | import logging
20 | import httpx
21 | class KOBOLDAI(Provider):
22 | def __init__(
23 | self,
24 | is_conversation: bool = True,
25 | max_tokens: int = 600,
26 | temperature: float = 1,
27 | top_p: float = 1,
28 | timeout: int = 30,
29 | intro: str = None,
30 | filepath: str = None,
31 | update_file: bool = True,
32 | proxies: dict = {},
33 | history_offset: int = 10250,
34 | act: str = None,
35 | ):
36 | """Instantiate aiforfree
37 |
38 | Args:
39 | is_conversation (str, optional): Flag for chatting conversationally. Defaults to True.
40 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
41 | temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.2.
42 | top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
43 | timeout (int, optional): Http requesting timeout. Defaults to 30
44 | intro (str, optional): Conversation introductory prompt. Defaults to `Conversation.intro`.
45 | filepath (str, optional): Path to file containing conversation history. Defaults to None.
46 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
47 | proxies (dict, optional) : Http reqiuest proxies (socks). Defaults to {}.
48 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
49 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
50 | """
51 | self.session = requests.Session()
52 | self.is_conversation = is_conversation
53 | self.max_tokens_to_sample = max_tokens
54 | self.temperature = temperature
55 | self.top_p = top_p
56 | self.chat_endpoint = (
57 | "https://koboldai-koboldcpp-tiefighter.hf.space/api/extra/generate/stream"
58 | )
59 | self.stream_chunk_size = 64
60 | self.timeout = timeout
61 | self.last_response = {}
62 | self.headers = {
63 | "Content-Type": "application/json",
64 | "Accept": "application/json",
65 | }
66 |
67 | self.__available_optimizers = (
68 | method
69 | for method in dir(Optimizers)
70 | if callable(getattr(Optimizers, method)) and not method.startswith("__")
71 | )
72 | self.session.headers.update(self.headers)
73 | Conversation.intro = (
74 | AwesomePrompts().get_act(
75 | act, raise_not_found=True, default=None, case_insensitive=True
76 | )
77 | if act
78 | else intro or Conversation.intro
79 | )
80 | self.conversation = Conversation(
81 | is_conversation, self.max_tokens_to_sample, filepath, update_file
82 | )
83 | self.conversation.history_offset = history_offset
84 | self.session.proxies = proxies
85 |
86 | def ask(
87 | self,
88 | prompt: str,
89 | stream: bool = False,
90 | raw: bool = False,
91 | optimizer: str = None,
92 | conversationally: bool = False,
93 | ) -> dict:
94 | """Chat with AI
95 |
96 | Args:
97 | prompt (str): Prompt to be send.
98 | stream (bool, optional): Flag for streaming response. Defaults to False.
99 | raw (bool, optional): Stream back raw response as received. Defaults to False.
100 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
101 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
102 | Returns:
103 | dict : {}
104 | ```json
105 | {
106 | "token" : "How may I assist you today?"
107 | }
108 | ```
109 | """
110 | conversation_prompt = self.conversation.gen_complete_prompt(prompt)
111 | if optimizer:
112 | if optimizer in self.__available_optimizers:
113 | conversation_prompt = getattr(Optimizers, optimizer)(
114 | conversation_prompt if conversationally else prompt
115 | )
116 | else:
117 | raise Exception(
118 | f"Optimizer is not one of {self.__available_optimizers}"
119 | )
120 |
121 | self.session.headers.update(self.headers)
122 | payload = {
123 | "prompt": conversation_prompt,
124 | "temperature": self.temperature,
125 | "top_p": self.top_p,
126 | }
127 |
128 | def for_stream():
129 | response = self.session.post(
130 | self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
131 | )
132 | if not response.ok:
133 | raise Exception(
134 | f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
135 | )
136 |
137 | message_load = ""
138 | for value in response.iter_lines(
139 | decode_unicode=True,
140 | delimiter="" if raw else "event: message\ndata:",
141 | chunk_size=self.stream_chunk_size,
142 | ):
143 | try:
144 | resp = json.loads(value)
145 | message_load += self.get_message(resp)
146 | resp["token"] = message_load
147 | self.last_response.update(resp)
148 | yield value if raw else resp
149 | except json.decoder.JSONDecodeError:
150 | pass
151 | self.conversation.update_chat_history(
152 | prompt, self.get_message(self.last_response)
153 | )
154 |
155 | def for_non_stream():
156 | # let's make use of stream
157 | for _ in for_stream():
158 | pass
159 | return self.last_response
160 |
161 | return for_stream() if stream else for_non_stream()
162 |
163 | def chat(
164 | self,
165 | prompt: str,
166 | stream: bool = False,
167 | optimizer: str = None,
168 | conversationally: bool = False,
169 | ) -> str:
170 | """Generate response `str`
171 | Args:
172 | prompt (str): Prompt to be send.
173 | stream (bool, optional): Flag for streaming response. Defaults to False.
174 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
175 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
176 | Returns:
177 | str: Response generated
178 | """
179 |
180 | def for_stream():
181 | for response in self.ask(
182 | prompt, True, optimizer=optimizer, conversationally=conversationally
183 | ):
184 | yield self.get_message(response)
185 |
186 | def for_non_stream():
187 | return self.get_message(
188 | self.ask(
189 | prompt,
190 | False,
191 | optimizer=optimizer,
192 | conversationally=conversationally,
193 | )
194 | )
195 |
196 | return for_stream() if stream else for_non_stream()
197 |
198 | def get_message(self, response: dict) -> str:
199 | """Retrieves message only from response
200 |
201 | Args:
202 | response (dict): Response generated by `self.ask`
203 |
204 | Returns:
205 | str: Message extracted
206 | """
207 | assert isinstance(response, dict), "Response should be of dict data-type only"
208 | return response.get("token")
209 | class AsyncKOBOLDAI(AsyncProvider):
210 | def __init__(
211 | self,
212 | is_conversation: bool = True,
213 | max_tokens: int = 600,
214 | temperature: float = 1,
215 | top_p: float = 1,
216 | timeout: int = 30,
217 | intro: str = None,
218 | filepath: str = None,
219 | update_file: bool = True,
220 | proxies: dict = {},
221 | history_offset: int = 10250,
222 | act: str = None,
223 | ):
224 | """Instantiate aiforfree
225 |
226 | Args:
227 | is_conversation (str, optional): Flag for chatting conversationally. Defaults to True.
228 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
229 | temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.2.
230 | top_p (float, optional): Sampling threshold during inference time. Defaults to 0.999.
231 | timeout (int, optional): Http requesting timeout. Defaults to 30
232 | intro (str, optional): Conversation introductory prompt. Defaults to `Conversation.intro`.
233 | filepath (str, optional): Path to file containing conversation history. Defaults to None.
234 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
235 | proxies (dict, optional) : Http reqiuest proxies (socks). Defaults to {}.
236 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
237 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
238 | """
239 | self.is_conversation = is_conversation
240 | self.max_tokens_to_sample = max_tokens
241 | self.temperature = temperature
242 | self.top_p = top_p
243 | self.chat_endpoint = (
244 | "https://koboldai-koboldcpp-tiefighter.hf.space/api/extra/generate/stream"
245 | )
246 | self.stream_chunk_size = 64
247 | self.timeout = timeout
248 | self.last_response = {}
249 | self.headers = {
250 | "Content-Type": "application/json",
251 | "Accept": "application/json",
252 | }
253 |
254 | self.__available_optimizers = (
255 | method
256 | for method in dir(Optimizers)
257 | if callable(getattr(Optimizers, method)) and not method.startswith("__")
258 | )
259 | Conversation.intro = (
260 | AwesomePrompts().get_act(
261 | act, raise_not_found=True, default=None, case_insensitive=True
262 | )
263 | if act
264 | else intro or Conversation.intro
265 | )
266 | self.conversation = Conversation(
267 | is_conversation, self.max_tokens_to_sample, filepath, update_file
268 | )
269 | self.conversation.history_offset = history_offset
270 | self.session = httpx.AsyncClient(headers=self.headers, proxies=proxies)
271 |
272 | async def ask(
273 | self,
274 | prompt: str,
275 | stream: bool = False,
276 | raw: bool = False,
277 | optimizer: str = None,
278 | conversationally: bool = False,
279 | ) -> dict | AsyncGenerator:
280 | """Chat with AI asynchronously.
281 |
282 | Args:
283 | prompt (str): Prompt to be send.
284 | stream (bool, optional): Flag for streaming response. Defaults to False.
285 | raw (bool, optional): Stream back raw response as received. Defaults to False.
286 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
287 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
288 | Returns:
289 | dict|AsyncGenerator : ai content
290 | ```json
291 | {
292 | "token" : "How may I assist you today?"
293 | }
294 | ```
295 | """
296 | conversation_prompt = self.conversation.gen_complete_prompt(prompt)
297 | if optimizer:
298 | if optimizer in self.__available_optimizers:
299 | conversation_prompt = getattr(Optimizers, optimizer)(
300 | conversation_prompt if conversationally else prompt
301 | )
302 | else:
303 | raise Exception(
304 | f"Optimizer is not one of {self.__available_optimizers}"
305 | )
306 |
307 | payload = {
308 | "prompt": conversation_prompt,
309 | "temperature": self.temperature,
310 | "top_p": self.top_p,
311 | }
312 |
313 | async def for_stream():
314 | async with self.session.stream(
315 | "POST", self.chat_endpoint, json=payload, timeout=self.timeout
316 | ) as response:
317 | if not response.is_success:
318 | raise exceptions.FailedToGenerateResponseError(
319 | f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
320 | )
321 |
322 | message_load = ""
323 | async for value in response.aiter_lines():
324 | try:
325 | resp = sanitize_stream(value)
326 | message_load += await self.get_message(resp)
327 | resp["token"] = message_load
328 | self.last_response.update(resp)
329 | yield value if raw else resp
330 | except json.decoder.JSONDecodeError:
331 | pass
332 |
333 | self.conversation.update_chat_history(
334 | prompt, await self.get_message(self.last_response)
335 | )
336 |
337 | async def for_non_stream():
338 | # let's make use of stream
339 | async for _ in for_stream():
340 | pass
341 | return self.last_response
342 |
343 | return for_stream() if stream else await for_non_stream()
344 |
345 | async def chat(
346 | self,
347 | prompt: str,
348 | stream: bool = False,
349 | optimizer: str = None,
350 | conversationally: bool = False,
351 | ) -> str | AsyncGenerator:
352 | """Generate response `str` asynchronously.
353 | Args:
354 | prompt (str): Prompt to be send.
355 | stream (bool, optional): Flag for streaming response. Defaults to False.
356 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
357 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
358 | Returns:
359 | str: Response generated
360 | """
361 |
362 | async def for_stream():
363 | async_ask = await self.ask(
364 | prompt, True, optimizer=optimizer, conversationally=conversationally
365 | )
366 | async for response in async_ask:
367 | yield await self.get_message(response)
368 |
369 | async def for_non_stream():
370 | return await self.get_message(
371 | await self.ask(
372 | prompt,
373 | False,
374 | optimizer=optimizer,
375 | conversationally=conversationally,
376 | )
377 | )
378 |
379 | return for_stream() if stream else await for_non_stream()
380 |
381 | async def get_message(self, response: dict) -> str:
382 | """Retrieves message only from response
383 |
384 | Args:
385 | response (dict): Response generated by `self.ask`
386 |
387 | Returns:
388 | str: Message extracted
389 | """
390 | assert isinstance(response, dict), "Response should be of dict data-type only"
391 | return response.get("token")
--------------------------------------------------------------------------------
/ai4free/Blackbox.py:
--------------------------------------------------------------------------------
1 | import time
2 | import uuid
3 | import requests
4 | from requests import get
5 | from uuid import uuid4
6 | from re import findall
7 | from requests.exceptions import RequestException
8 | from curl_cffi.requests import get, RequestsError
9 | from random import randint
10 | import json
11 | import yaml
12 | from webscout.AIutel import Optimizers
13 | from webscout.AIutel import Conversation
14 | from webscout.AIutel import AwesomePrompts, sanitize_stream
15 | from webscout.AIbase import Provider, AsyncProvider
16 | from Helpingai_T2 import Perplexity
17 | from webscout import exceptions
18 | from typing import Any, AsyncGenerator
19 | import logging
20 | import httpx
21 |
22 | class BLACKBOXAI:
23 | def __init__(
24 | self,
25 | is_conversation: bool = True,
26 | max_tokens: int = 8000,
27 | timeout: int = 30,
28 | intro: str = None,
29 | filepath: str = None,
30 | update_file: bool = True,
31 | proxies: dict = {},
32 | history_offset: int = 10250,
33 | act: str = None,
34 | model: str = None,
35 | ):
36 | """Instantiates BLACKBOXAI
37 |
38 | Args:
39 | is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
40 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
41 | timeout (int, optional): Http request timeout. Defaults to 30.
42 | intro (str, optional): Conversation introductory prompt. Defaults to None.
43 | filepath (str, optional): Path to file containing conversation history. Defaults to None.
44 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
45 | proxies (dict, optional): Http request proxies. Defaults to {}.
46 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
47 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
48 | model (str, optional): Model name. Defaults to "Phind Model".
49 | """
50 | self.session = requests.Session()
51 | self.max_tokens_to_sample = max_tokens
52 | self.is_conversation = is_conversation
53 | self.chat_endpoint = "https://www.blackbox.ai/api/chat"
54 | self.stream_chunk_size = 64
55 | self.timeout = timeout
56 | self.last_response = {}
57 | self.model = model
58 | self.previewToken: str = None
59 | self.userId: str = ""
60 | self.codeModelMode: bool = True
61 | self.id: str = ""
62 | self.agentMode: dict = {}
63 | self.trendingAgentMode: dict = {}
64 | self.isMicMode: bool = False
65 |
66 | self.headers = {
67 | "Content-Type": "application/json",
68 | "User-Agent": "",
69 | "Accept": "*/*",
70 | "Accept-Encoding": "Identity",
71 | }
72 |
73 | self.__available_optimizers = (
74 | method
75 | for method in dir(Optimizers)
76 | if callable(getattr(Optimizers, method)) and not method.startswith("__")
77 | )
78 | self.session.headers.update(self.headers)
79 | Conversation.intro = (
80 | AwesomePrompts().get_act(
81 | act, raise_not_found=True, default=None, case_insensitive=True
82 | )
83 | if act
84 | else intro or Conversation.intro
85 | )
86 | self.conversation = Conversation(
87 | is_conversation, self.max_tokens_to_sample, filepath, update_file
88 | )
89 | self.conversation.history_offset = history_offset
90 | self.session.proxies = proxies
91 |
92 | def ask(
93 | self,
94 | prompt: str,
95 | stream: bool = False,
96 | raw: bool = False,
97 | optimizer: str = None,
98 | conversationally: bool = False,
99 | ) -> dict:
100 | """Chat with AI
101 |
102 | Args:
103 | prompt (str): Prompt to be send.
104 | stream (bool, optional): Flag for streaming response. Defaults to False.
105 | raw (bool, optional): Stream back raw response as received. Defaults to False.
106 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
107 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
108 | Returns:
109 | dict : {}
110 | ```json
111 | {
112 | "text" : "print('How may I help you today?')"
113 | }
114 | ```
115 | """
116 | conversation_prompt = self.conversation.gen_complete_prompt(prompt)
117 | if optimizer:
118 | if optimizer in self.__available_optimizers:
119 | conversation_prompt = getattr(Optimizers, optimizer)(
120 | conversation_prompt if conversationally else prompt
121 | )
122 | else:
123 | raise Exception(
124 | f"Optimizer is not one of {self.__available_optimizers}"
125 | )
126 |
127 | self.session.headers.update(self.headers)
128 | payload = {
129 | "messages": [
130 | # json.loads(prev_messages),
131 | {"content": conversation_prompt, "role": "user"}
132 | ],
133 | "id": self.id,
134 | "previewToken": self.previewToken,
135 | "userId": self.userId,
136 | "codeModelMode": self.codeModelMode,
137 | "agentMode": self.agentMode,
138 | "trendingAgentMode": self.trendingAgentMode,
139 | "isMicMode": self.isMicMode,
140 | }
141 |
142 | def for_stream():
143 | response = self.session.post(
144 | self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
145 | )
146 | if (
147 | not response.ok
148 | or not response.headers.get("Content-Type")
149 | == "text/plain; charset=utf-8"
150 | ):
151 | raise Exception(
152 | f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
153 | )
154 | streaming_text = ""
155 | for value in response.iter_lines(
156 | decode_unicode=True,
157 | chunk_size=self.stream_chunk_size,
158 | delimiter="\n",
159 | ):
160 | try:
161 | if bool(value):
162 | streaming_text += value + ("\n" if stream else "")
163 |
164 | resp = dict(text=streaming_text)
165 | self.last_response.update(resp)
166 | yield value if raw else resp
167 | except json.decoder.JSONDecodeError:
168 | pass
169 | self.conversation.update_chat_history(
170 | prompt, self.get_message(self.last_response)
171 | )
172 |
173 | def for_non_stream():
174 | for _ in for_stream():
175 | pass
176 | return self.last_response
177 |
178 | return for_stream() if stream else for_non_stream()
179 |
180 | def chat(
181 | self,
182 | prompt: str,
183 | stream: bool = False,
184 | optimizer: str = None,
185 | conversationally: bool = False,
186 | ) -> str:
187 | """Generate response `str`
188 | Args:
189 | prompt (str): Prompt to be send.
190 | stream (bool, optional): Flag for streaming response. Defaults to False.
191 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
192 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
193 | Returns:
194 | str: Response generated
195 | """
196 |
197 | def for_stream():
198 | for response in self.ask(
199 | prompt, True, optimizer=optimizer, conversationally=conversationally
200 | ):
201 | yield self.get_message(response)
202 |
203 | def for_non_stream():
204 | return self.get_message(
205 | self.ask(
206 | prompt,
207 | False,
208 | optimizer=optimizer,
209 | conversationally=conversationally,
210 | )
211 | )
212 |
213 | return for_stream() if stream else for_non_stream()
214 |
215 | def get_message(self, response: dict) -> str:
216 | """Retrieves message only from response
217 |
218 | Args:
219 | response (dict): Response generated by `self.ask`
220 |
221 | Returns:
222 | str: Message extracted
223 | """
224 | assert isinstance(response, dict), "Response should be of dict data-type only"
225 | return response["text"]
226 |
227 | class AsyncBLACKBOXAI(AsyncProvider):
228 | def __init__(
229 | self,
230 | is_conversation: bool = True,
231 | max_tokens: int = 600,
232 | timeout: int = 30,
233 | intro: str = None,
234 | filepath: str = None,
235 | update_file: bool = True,
236 | proxies: dict = {},
237 | history_offset: int = 10250,
238 | act: str = None,
239 | model: str = None,
240 | ):
241 | """Instantiates BLACKBOXAI
242 |
243 | Args:
244 | is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
245 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
246 | timeout (int, optional): Http request timeout. Defaults to 30.
247 | intro (str, optional): Conversation introductory prompt. Defaults to None.
248 | filepath (str, optional): Path to file containing conversation history. Defaults to None.
249 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
250 | proxies (dict, optional): Http request proxies. Defaults to {}.
251 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
252 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
253 | model (str, optional): Model name. Defaults to "Phind Model".
254 | """
255 | self.max_tokens_to_sample = max_tokens
256 | self.is_conversation = is_conversation
257 | self.chat_endpoint = "https://www.blackbox.ai/api/chat"
258 | self.stream_chunk_size = 64
259 | self.timeout = timeout
260 | self.last_response = {}
261 | self.model = model
262 | self.previewToken: str = None
263 | self.userId: str = ""
264 | self.codeModelMode: bool = True
265 | self.id: str = ""
266 | self.agentMode: dict = {}
267 | self.trendingAgentMode: dict = {}
268 | self.isMicMode: bool = False
269 |
270 | self.headers = {
271 | "Content-Type": "application/json",
272 | "User-Agent": "",
273 | "Accept": "*/*",
274 | "Accept-Encoding": "Identity",
275 | }
276 |
277 | self.__available_optimizers = (
278 | method
279 | for method in dir(Optimizers)
280 | if callable(getattr(Optimizers, method)) and not method.startswith("__")
281 | )
282 | Conversation.intro = (
283 | AwesomePrompts().get_act(
284 | act, raise_not_found=True, default=None, case_insensitive=True
285 | )
286 | if act
287 | else intro or Conversation.intro
288 | )
289 | self.conversation = Conversation(
290 | is_conversation, self.max_tokens_to_sample, filepath, update_file
291 | )
292 | self.conversation.history_offset = history_offset
293 | self.session = httpx.AsyncClient(headers=self.headers, proxies=proxies)
294 |
295 | async def ask(
296 | self,
297 | prompt: str,
298 | stream: bool = False,
299 | raw: bool = False,
300 | optimizer: str = None,
301 | conversationally: bool = False,
302 | ) -> dict | AsyncGenerator:
303 | """Chat with AI asynchronously.
304 |
305 | Args:
306 | prompt (str): Prompt to be send.
307 | stream (bool, optional): Flag for streaming response. Defaults to False.
308 | raw (bool, optional): Stream back raw response as received. Defaults to False.
309 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
310 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
311 | Returns:
312 | dict|AsyncGenerator : ai content
313 | ```json
314 | {
315 | "text" : "print('How may I help you today?')"
316 | }
317 | ```
318 | """
319 | conversation_prompt = self.conversation.gen_complete_prompt(prompt)
320 | if optimizer:
321 | if optimizer in self.__available_optimizers:
322 | conversation_prompt = getattr(Optimizers, optimizer)(
323 | conversation_prompt if conversationally else prompt
324 | )
325 | else:
326 | raise Exception(
327 | f"Optimizer is not one of {self.__available_optimizers}"
328 | )
329 |
330 | payload = {
331 | "messages": [
332 | # json.loads(prev_messages),
333 | {"content": conversation_prompt, "role": "user"}
334 | ],
335 | "id": self.id,
336 | "previewToken": self.previewToken,
337 | "userId": self.userId,
338 | "codeModelMode": self.codeModelMode,
339 | "agentMode": self.agentMode,
340 | "trendingAgentMode": self.trendingAgentMode,
341 | "isMicMode": self.isMicMode,
342 | }
343 |
344 | async def for_stream():
345 | async with self.session.stream(
346 | "POST", self.chat_endpoint, json=payload, timeout=self.timeout
347 | ) as response:
348 | if (
349 | not response.is_success
350 | or not response.headers.get("Content-Type")
351 | == "text/plain; charset=utf-8"
352 | ):
353 | raise exceptions.FailedToGenerateResponseError(
354 | f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
355 | )
356 | streaming_text = ""
357 | async for value in response.aiter_lines():
358 | try:
359 | if bool(value):
360 | streaming_text += value + ("\n" if stream else "")
361 | resp = dict(text=streaming_text)
362 | self.last_response.update(resp)
363 | yield value if raw else resp
364 | except json.decoder.JSONDecodeError:
365 | pass
366 | self.conversation.update_chat_history(
367 | prompt, await self.get_message(self.last_response)
368 | )
369 |
370 | async def for_non_stream():
371 | async for _ in for_stream():
372 | pass
373 | return self.last_response
374 |
375 | return for_stream() if stream else await for_non_stream()
376 |
377 | async def chat(
378 | self,
379 | prompt: str,
380 | stream: bool = False,
381 | optimizer: str = None,
382 | conversationally: bool = False,
383 | ) -> str | AsyncGenerator:
384 | """Generate response `str` asynchronously.
385 | Args:
386 | prompt (str): Prompt to be send.
387 | stream (bool, optional): Flag for streaming response. Defaults to False.
388 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
389 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
390 | Returns:
391 | str|AsyncGenerator: Response generated
392 | """
393 |
394 | async def for_stream():
395 | async_ask = await self.ask(
396 | prompt, True, optimizer=optimizer, conversationally=conversationally
397 | )
398 | async for response in async_ask:
399 | yield await self.get_message(response)
400 |
401 | async def for_non_stream():
402 | return await self.get_message(
403 | await self.ask(
404 | prompt,
405 | False,
406 | optimizer=optimizer,
407 | conversationally=conversationally,
408 | )
409 | )
410 |
411 | return for_stream() if stream else await for_non_stream()
412 |
413 | async def get_message(self, response: dict) -> str:
414 | """Retrieves message only from response
415 |
416 | Args:
417 | response (dict): Response generated by `self.ask`
418 |
419 | Returns:
420 | str: Message extracted
421 | """
422 | assert isinstance(response, dict), "Response should be of dict data-type only"
423 | return response["text"]
--------------------------------------------------------------------------------
/ai4free/OpenGPT.py:
--------------------------------------------------------------------------------
1 | import time
2 | import uuid
3 | import click
4 | import requests
5 | from requests import get
6 | from uuid import uuid4
7 | from re import findall
8 | from requests.exceptions import RequestException
9 | from curl_cffi.requests import get, RequestsError
10 | import g4f
11 | from random import randint
12 | from PIL import Image
13 | import io
14 | import re
15 | import json
16 | import yaml
17 | from webscout.AIutel import Optimizers
18 | from webscout.AIutel import Conversation
19 | from webscout.AIutel import AwesomePrompts, sanitize_stream
20 | from webscout.AIbase import Provider, AsyncProvider
21 | from webscout import exceptions
22 | from typing import Any, AsyncGenerator
23 | import logging
24 | import httpx
25 | class OPENGPT:
26 | def __init__(
27 | self,
28 | is_conversation: bool = True,
29 | max_tokens: int = 600,
30 | timeout: int = 30,
31 | intro: str = None,
32 | filepath: str = None,
33 | update_file: bool = True,
34 | proxies: dict = {},
35 | history_offset: int = 10250,
36 | act: str = None,
37 | ):
38 | """Instantiates OPENGPT
39 |
40 | Args:
41 | is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
42 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
43 | timeout (int, optional): Http request timeout. Defaults to 30.
44 | intro (str, optional): Conversation introductory prompt. Defaults to None.
45 | filepath (str, optional): Path to file containing conversation history. Defaults to None.
46 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
47 | proxies (dict, optional): Http request proxies. Defaults to {}.
48 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
49 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
50 | """
51 | self.session = requests.Session()
52 | self.max_tokens_to_sample = max_tokens
53 | self.is_conversation = is_conversation
54 | self.chat_endpoint = (
55 | "https://opengpts-example-vz4y4ooboq-uc.a.run.app/runs/stream"
56 | )
57 | self.stream_chunk_size = 64
58 | self.timeout = timeout
59 | self.last_response = {}
60 | self.assistant_id = "bca37014-6f97-4f2b-8928-81ea8d478d88"
61 | self.authority = "opengpts-example-vz4y4ooboq-uc.a.run.app"
62 |
63 | self.headers = {
64 | "authority": self.authority,
65 | "accept": "text/event-stream",
66 | "accept-language": "en-US,en;q=0.7",
67 | "cache-control": "no-cache",
68 | "content-type": "application/json",
69 | "origin": "https://opengpts-example-vz4y4ooboq-uc.a.run.app",
70 | "pragma": "no-cache",
71 | "referer": "https://opengpts-example-vz4y4ooboq-uc.a.run.app/",
72 | "sec-fetch-site": "same-origin",
73 | "sec-gpc": "1",
74 | "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
75 | }
76 |
77 | self.__available_optimizers = (
78 | method
79 | for method in dir(Optimizers)
80 | if callable(getattr(Optimizers, method)) and not method.startswith("__")
81 | )
82 | self.session.headers.update(self.headers)
83 | Conversation.intro = (
84 | AwesomePrompts().get_act(
85 | act, raise_not_found=True, default=None, case_insensitive=True
86 | )
87 | if act
88 | else intro or Conversation.intro
89 | )
90 | self.conversation = Conversation(
91 | is_conversation, self.max_tokens_to_sample, filepath, update_file
92 | )
93 | self.conversation.history_offset = history_offset
94 | self.session.proxies = proxies
95 |
96 | def ask(
97 | self,
98 | prompt: str,
99 | stream: bool = False,
100 | raw: bool = False,
101 | optimizer: str = None,
102 | conversationally: bool = False,
103 | ) -> dict:
104 | """Chat with AI
105 |
106 | Args:
107 | prompt (str): Prompt to be send.
108 | stream (bool, optional): Flag for streaming response. Defaults to False.
109 | raw (bool, optional): Stream back raw response as received. Defaults to False.
110 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
111 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
112 | Returns:
113 | dict : {}
114 | ```json
115 | {
116 | "messages": [
117 | {
118 | "content": "Hello there",
119 | "additional_kwargs": {},
120 | "type": "human",
121 | "example": false
122 | },
123 | {
124 | "content": "Hello! How can I assist you today?",
125 | "additional_kwargs": {
126 | "agent": {
127 | "return_values": {
128 | "output": "Hello! How can I assist you today?"
129 | },
130 | "log": "Hello! How can I assist you today?",
131 | "type": "AgentFinish"
132 | }
133 | },
134 | "type": "ai",
135 | "example": false
136 | }]
137 | }
138 | ```
139 | """
140 | conversation_prompt = self.conversation.gen_complete_prompt(prompt)
141 | if optimizer:
142 | if optimizer in self.__available_optimizers:
143 | conversation_prompt = getattr(Optimizers, optimizer)(
144 | conversation_prompt if conversationally else prompt
145 | )
146 | else:
147 | raise Exception(
148 | f"Optimizer is not one of {self.__available_optimizers}"
149 | )
150 |
151 | self.session.headers.update(self.headers)
152 | self.session.headers.update(
153 | dict(
154 | cookie=f"opengpts_user_id={uuid4().__str__()}",
155 | )
156 | )
157 | payload = {
158 | "input": [
159 | {
160 | "content": conversation_prompt,
161 | "additional_kwargs": {},
162 | "type": "human",
163 | "example": False,
164 | },
165 | ],
166 | "assistant_id": self.assistant_id,
167 | "thread_id": "",
168 | }
169 |
170 | def for_stream():
171 | response = self.session.post(
172 | self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
173 | )
174 | if (
175 | not response.ok
176 | or not response.headers.get("Content-Type")
177 | == "text/event-stream; charset=utf-8"
178 | ):
179 | raise Exception(
180 | f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
181 | )
182 |
183 | for value in response.iter_lines(
184 | decode_unicode=True,
185 | chunk_size=self.stream_chunk_size,
186 | ):
187 | try:
188 | modified_value = re.sub("data:", "", value)
189 | resp = json.loads(modified_value)
190 | if len(resp) == 1:
191 | continue
192 | self.last_response.update(resp[1])
193 | yield value if raw else resp[1]
194 | except json.decoder.JSONDecodeError:
195 | pass
196 | self.conversation.update_chat_history(
197 | prompt, self.get_message(self.last_response)
198 | )
199 |
200 | def for_non_stream():
201 | for _ in for_stream():
202 | pass
203 | return self.last_response
204 |
205 | return for_stream() if stream else for_non_stream()
206 |
207 | def chat(
208 | self,
209 | prompt: str,
210 | stream: bool = False,
211 | optimizer: str = None,
212 | conversationally: bool = False,
213 | ) -> str:
214 | """Generate response `str`
215 | Args:
216 | prompt (str): Prompt to be send.
217 | stream (bool, optional): Flag for streaming response. Defaults to False.
218 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
219 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
220 | Returns:
221 | str: Response generated
222 | """
223 |
224 | def for_stream():
225 | for response in self.ask(
226 | prompt, True, optimizer=optimizer, conversationally=conversationally
227 | ):
228 | yield self.get_message(response)
229 |
230 | def for_non_stream():
231 | return self.get_message(
232 | self.ask(
233 | prompt,
234 | False,
235 | optimizer=optimizer,
236 | conversationally=conversationally,
237 | )
238 | )
239 |
240 | return for_stream() if stream else for_non_stream()
241 |
242 | def get_message(self, response: dict) -> str:
243 | """Retrieves message only from response
244 |
245 | Args:
246 | response (dict): Response generated by `self.ask`
247 |
248 | Returns:
249 | str: Message extracted
250 | """
251 | assert isinstance(response, dict), "Response should be of dict data-type only"
252 | return response["content"]
253 | class AsyncOPENGPT(AsyncProvider):
254 | def __init__(
255 | self,
256 | is_conversation: bool = True,
257 | max_tokens: int = 600,
258 | timeout: int = 30,
259 | intro: str = None,
260 | filepath: str = None,
261 | update_file: bool = True,
262 | proxies: dict = {},
263 | history_offset: int = 10250,
264 | act: str = None,
265 | ):
266 | """Instantiates OPENGPT
267 |
268 | Args:
269 | is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
270 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
271 | timeout (int, optional): Http request timeout. Defaults to 30.
272 | intro (str, optional): Conversation introductory prompt. Defaults to None.
273 | filepath (str, optional): Path to file containing conversation history. Defaults to None.
274 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
275 | proxies (dict, optional): Http request proxies. Defaults to {}.
276 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
277 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
278 | """
279 | self.max_tokens_to_sample = max_tokens
280 | self.is_conversation = is_conversation
281 | self.chat_endpoint = (
282 | "https://opengpts-example-vz4y4ooboq-uc.a.run.app/runs/stream"
283 | )
284 | self.stream_chunk_size = 64
285 | self.timeout = timeout
286 | self.last_response = {}
287 | self.assistant_id = "bca37014-6f97-4f2b-8928-81ea8d478d88"
288 | self.authority = "opengpts-example-vz4y4ooboq-uc.a.run.app"
289 |
290 | self.headers = {
291 | "authority": self.authority,
292 | "accept": "text/event-stream",
293 | "accept-language": "en-US,en;q=0.7",
294 | "cache-control": "no-cache",
295 | "content-type": "application/json",
296 | "origin": "https://opengpts-example-vz4y4ooboq-uc.a.run.app",
297 | "pragma": "no-cache",
298 | "referer": "https://opengpts-example-vz4y4ooboq-uc.a.run.app/",
299 | "sec-fetch-site": "same-origin",
300 | "sec-gpc": "1",
301 | "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
302 | }
303 |
304 | self.__available_optimizers = (
305 | method
306 | for method in dir(Optimizers)
307 | if callable(getattr(Optimizers, method)) and not method.startswith("__")
308 | )
309 | Conversation.intro = (
310 | AwesomePrompts().get_act(
311 | act, raise_not_found=True, default=None, case_insensitive=True
312 | )
313 | if act
314 | else intro or Conversation.intro
315 | )
316 | self.conversation = Conversation(
317 | is_conversation, self.max_tokens_to_sample, filepath, update_file
318 | )
319 | self.conversation.history_offset = history_offset
320 | self.session = httpx.AsyncClient(headers=self.headers, proxies=proxies)
321 |
322 | async def ask(
323 | self,
324 | prompt: str,
325 | stream: bool = False,
326 | raw: bool = False,
327 | optimizer: str = None,
328 | conversationally: bool = False,
329 | ) -> dict | AsyncGenerator:
330 | """Chat with AI asynchronously
331 |
332 | Args:
333 | prompt (str): Prompt to be send.
334 | stream (bool, optional): Flag for streaming response. Defaults to False.
335 | raw (bool, optional): Stream back raw response as received. Defaults to False.
336 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
337 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
338 | Returns:
339 | dict|AsyncGenerator : ai content.
340 | ```json
341 | {
342 | "messages": [
343 | {
344 | "content": "Hello there",
345 | "additional_kwargs": {},
346 | "type": "human",
347 | "example": false
348 | },
349 | {
350 | "content": "Hello! How can I assist you today?",
351 | "additional_kwargs": {
352 | "agent": {
353 | "return_values": {
354 | "output": "Hello! How can I assist you today?"
355 | },
356 | "log": "Hello! How can I assist you today?",
357 | "type": "AgentFinish"
358 | }
359 | },
360 | "type": "ai",
361 | "example": false
362 | }]
363 | }
364 | ```
365 | """
366 | conversation_prompt = self.conversation.gen_complete_prompt(prompt)
367 | if optimizer:
368 | if optimizer in self.__available_optimizers:
369 | conversation_prompt = getattr(Optimizers, optimizer)(
370 | conversation_prompt if conversationally else prompt
371 | )
372 | else:
373 | raise Exception(
374 | f"Optimizer is not one of {self.__available_optimizers}"
375 | )
376 | self.headers.update(
377 | dict(
378 | cookie=f"opengpts_user_id={uuid4().__str__()}",
379 | )
380 | )
381 | payload = {
382 | "input": [
383 | {
384 | "content": conversation_prompt,
385 | "additional_kwargs": {},
386 | "type": "human",
387 | "example": False,
388 | },
389 | ],
390 | "assistant_id": self.assistant_id,
391 | "thread_id": "",
392 | }
393 |
394 | async def for_stream():
395 | async with self.session.stream(
396 | "POST",
397 | self.chat_endpoint,
398 | json=payload,
399 | timeout=self.timeout,
400 | headers=self.headers,
401 | ) as response:
402 | if (
403 | not response.is_success
404 | or not response.headers.get("Content-Type")
405 | == "text/event-stream; charset=utf-8"
406 | ):
407 | raise exceptions.FailedToGenerateResponseError(
408 | f"Failed to generate response - ({response.status_code}, {response.reason_phrase}) - {response.text}"
409 | )
410 |
411 | async for value in response.aiter_lines():
412 | try:
413 | modified_value = re.sub("data:", "", value)
414 | resp = json.loads(modified_value)
415 | if len(resp) == 1:
416 | continue
417 | self.last_response.update(resp[1])
418 | yield value if raw else resp[1]
419 | except json.decoder.JSONDecodeError:
420 | pass
421 |
422 | self.conversation.update_chat_history(
423 | prompt, await self.get_message(self.last_response)
424 | )
425 |
426 | async def for_non_stream():
427 | async for _ in for_stream():
428 | pass
429 | return self.last_response
430 |
431 | return for_stream() if stream else await for_non_stream()
432 |
433 | async def chat(
434 | self,
435 | prompt: str,
436 | stream: bool = False,
437 | optimizer: str = None,
438 | conversationally: bool = False,
439 | ) -> str | AsyncGenerator:
440 | """Generate response `str` asynchronously.
441 | Args:
442 | prompt (str): Prompt to be send.
443 | stream (bool, optional): Flag for streaming response. Defaults to False.
444 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
445 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
446 | Returns:
447 | str|AsyncGenerator: Response generated
448 | """
449 |
450 | async def for_stream():
451 | async_ask = await self.ask(
452 | prompt, True, optimizer=optimizer, conversationally=conversationally
453 | )
454 | async for response in async_ask:
455 | yield await self.get_message(response)
456 |
457 | async def for_non_stream():
458 | return await self.get_message(
459 | await self.ask(
460 | prompt,
461 | False,
462 | optimizer=optimizer,
463 | conversationally=conversationally,
464 | )
465 | )
466 |
467 | return for_stream() if stream else await for_non_stream()
468 |
469 | async def get_message(self, response: dict) -> str:
470 | """Retrieves message only from response
471 |
472 | Args:
473 | response (dict): Response generated by `self.ask`
474 |
475 | Returns:
476 | str: Message extracted
477 | """
478 | assert isinstance(response, dict), "Response should be of dict data-type only"
479 | return response["content"]
--------------------------------------------------------------------------------
/ai4free/Deepinfra.py:
--------------------------------------------------------------------------------
1 | import time
2 | import uuid
3 | import click
4 | import requests
5 | from requests import get
6 | from uuid import uuid4
7 | from re import findall
8 | from requests.exceptions import RequestException
9 | from curl_cffi.requests import get, RequestsError
10 | import g4f
11 | from random import randint
12 | from PIL import Image
13 | import io
14 | import re
15 | import json
16 | import yaml
17 | from webscout.AIutel import Optimizers
18 | from webscout.AIutel import Conversation
19 | from webscout.AIutel import AwesomePrompts, sanitize_stream
20 | from webscout.AIbase import Provider, AsyncProvider
21 | from webscout import exceptions
22 | from typing import Any, AsyncGenerator
23 | import logging
24 | import httpx
25 |
26 | class DeepInfra(Provider):
27 | def __init__(
28 | self,
29 | is_conversation: bool = True,
30 | max_tokens: int = 600,
31 | timeout: int = 30,
32 | intro: str = None,
33 | filepath: str = None,
34 | update_file: bool = True,
35 | proxies: dict = {},
36 | history_offset: int = 10250,
37 | act: str = None,
38 | model: str = "meta-llama/Meta-Llama-3-70B-Instruct",
39 | system_prompt: str = "You are a Helpful AI."
40 | ):
41 | """Instantiates DeepInfra
42 |
43 | Args:
44 | is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
45 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
46 | timeout (int, optional): Http request timeout. Defaults to 30.
47 | intro (str, optional): Conversation introductory prompt. Defaults to None.
48 | filepath (str, optional): Path to file containing conversation history. Defaults to None.
49 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
50 | proxies (dict, optional): Http request proxies. Defaults to {}.
51 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
52 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
53 | model (str, optional): DeepInfra model name. Defaults to "meta-llama/Meta-Llama-3-70B-Instruct".
54 | system_prompt (str, optional): System prompt for DeepInfra. Defaults to "You are a Helpful AI.".
55 | """
56 | self.session = requests.Session()
57 | self.is_conversation = is_conversation
58 | self.max_tokens_to_sample = max_tokens
59 | self.chat_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions"
60 | self.timeout = timeout
61 | self.last_response = {}
62 | self.model = model
63 | self.system_prompt = system_prompt
64 |
65 | self.headers = {
66 | 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
67 | 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
68 | 'Cache-Control': 'no-cache',
69 | 'Connection': 'keep-alive',
70 | 'Content-Type': 'application/json',
71 | 'Origin': 'https://deepinfra.com',
72 | 'Pragma': 'no-cache',
73 | 'Referer': 'https://deepinfra.com/',
74 | 'Sec-Fetch-Dest': 'empty',
75 | 'Sec-Fetch-Mode': 'cors',
76 | 'Sec-Fetch-Site': 'same-site',
77 | 'X-Deepinfra-Source': 'web-embed',
78 | 'accept': 'text/event-stream',
79 | 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
80 | 'sec-ch-ua-mobile': '?0',
81 | 'sec-ch-ua-platform': '"macOS"'
82 | }
83 |
84 | self.__available_optimizers = (
85 | method
86 | for method in dir(Optimizers)
87 | if callable(getattr(Optimizers, method)) and not method.startswith("__")
88 | )
89 | self.session.headers.update(self.headers)
90 | Conversation.intro = (
91 | AwesomePrompts().get_act(
92 | act, raise_not_found=True, default=None, case_insensitive=True
93 | )
94 | if act
95 | else intro or Conversation.intro
96 | )
97 | self.conversation = Conversation(
98 | is_conversation, self.max_tokens_to_sample, filepath, update_file
99 | )
100 | self.conversation.history_offset = history_offset
101 | self.session.proxies = proxies
102 |
103 | def ask(
104 | self,
105 | prompt: str,
106 | raw: bool = False,
107 | optimizer: str = None,
108 | conversationally: bool = False,
109 | ) -> dict:
110 | """Chat with AI
111 |
112 | Args:
113 | prompt (str): Prompt to be sent.
114 | raw (bool, optional): Stream back raw response as received. Defaults to False.
115 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
116 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
117 | Returns:
118 | dict : {}
119 | """
120 | conversation_prompt = self.conversation.gen_complete_prompt(prompt)
121 | if optimizer:
122 | if optimizer in self.__available_optimizers:
123 | conversation_prompt = getattr(Optimizers, optimizer)(
124 | conversation_prompt if conversationally else prompt
125 | )
126 | else:
127 | raise Exception(
128 | f"Optimizer is not one of {self.__available_optimizers}"
129 | )
130 | self.session.headers.update(self.headers)
131 | payload = {
132 | 'model': self.model,
133 | 'messages': [
134 | {"role": "system", "content": self.system_prompt},
135 | {"role": "user", "content": conversation_prompt},
136 | ],
137 | 'temperature': 0.7,
138 | 'max_tokens': 8028,
139 | 'stop': []
140 | }
141 |
142 | response = self.session.post(
143 | self.chat_endpoint, json=payload, timeout=self.timeout
144 | )
145 | if not response.ok:
146 | raise Exception(
147 | f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
148 | )
149 |
150 | resp = response.json()
151 | message_load = self.get_message(resp)
152 | self.conversation.update_chat_history(
153 | prompt, message_load
154 | )
155 | return resp
156 |
157 | def chat(
158 | self,
159 | prompt: str,
160 | optimizer: str = None,
161 | conversationally: bool = False,
162 | ) -> str:
163 | """Generate response `str`
164 | Args:
165 | prompt (str): Prompt to be send.
166 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
167 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
168 | Returns:
169 | str: Response generated
170 | """
171 | return self.get_message(
172 | self.ask(
173 | prompt,
174 | optimizer=optimizer,
175 | conversationally=conversationally,
176 | )
177 | )
178 |
179 | def get_message(self, response: dict) -> str:
180 | """Retrieves message only from response
181 |
182 | Args:
183 | response (dict): Response generated by `self.ask`
184 |
185 | Returns:
186 | str: Message extracted
187 | """
188 | assert isinstance(response, dict), "Response should be of dict data-type only"
189 | try:
190 | return response["choices"][0]["message"]["content"]
191 | except KeyError:
192 | return ""
193 |
194 | class AsyncDeepInfra(AsyncProvider):
195 | def __init__(
196 | self,
197 | is_conversation: bool = True,
198 | max_tokens: int = 600,
199 | timeout: int = 30,
200 | intro: str = None,
201 | filepath: str = None,
202 | update_file: bool = True,
203 | proxies: dict = {},
204 | history_offset: int = 10250,
205 | act: str = None,
206 | model: str = "meta-llama/Meta-Llama-3-70B-Instruct",
207 | system_prompt: str = "You are a Helpful AI."
208 | ):
209 | """Instantiates DeepInfra
210 |
211 | Args:
212 | is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
213 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
214 | timeout (int, optional): Http request timeout. Defaults to 30.
215 | intro (str, optional): Conversation introductory prompt. Defaults to None.
216 | filepath (str, optional): Path to file containing conversation history. Defaults to None.
217 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
218 | proxies (dict, optional): Http request proxies. Defaults to {}.
219 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
220 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
221 | model (str, optional): DeepInfra model name. Defaults to "meta-llama/Meta-Llama-3-70B-Instruct".
222 | system_prompt (str, optional): System prompt for DeepInfra. Defaults to "You are a Helpful AI.".
223 | """
224 | self.is_conversation = is_conversation
225 | self.max_tokens_to_sample = max_tokens
226 | self.chat_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions"
227 | self.timeout = timeout
228 | self.last_response = {}
229 | self.model = model
230 | self.system_prompt = system_prompt
231 |
232 | self.headers = {
233 | 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
234 | 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
235 | 'Cache-Control': 'no-cache',
236 | 'Connection': 'keep-alive',
237 | 'Content-Type': 'application/json',
238 | 'Origin': 'https://deepinfra.com',
239 | 'Pragma': 'no-cache',
240 | 'Referer': 'https://deepinfra.com/',
241 | 'Sec-Fetch-Dest': 'empty',
242 | 'Sec-Fetch-Mode': 'cors',
243 | 'Sec-Fetch-Site': 'same-site',
244 | 'X-Deepinfra-Source': 'web-embed',
245 | 'accept': 'text/event-stream',
246 | 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
247 | 'sec-ch-ua-mobile': '?0',
248 | 'sec-ch-ua-platform': '"macOS"'
249 | }
250 |
251 | self.__available_optimizers = (
252 | method
253 | for method in dir(Optimizers)
254 | if callable(getattr(Optimizers, method)) and not method.startswith("__")
255 | )
256 | self.client = httpx.AsyncClient(proxies=proxies, headers=self.headers)
257 | Conversation.intro = (
258 | AwesomePrompts().get_act(
259 | act, raise_not_found=True, default=None, case_insensitive=True
260 | )
261 | if act
262 | else intro or Conversation.intro
263 | )
264 | self.conversation = Conversation(
265 | is_conversation, self.max_tokens_to_sample, filepath, update_file
266 | )
267 | self.conversation.history_offset = history_offset
268 |
269 | async def ask(
270 | self,
271 | prompt: str,
272 | raw: bool = False,
273 | optimizer: str = None,
274 | conversationally: bool = False,
275 | ) -> dict:
276 | """Chat with AI
277 |
278 | Args:
279 | prompt (str): Prompt to be sent.
280 | raw (bool, optional): Stream back raw response as received. Defaults to False.
281 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
282 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
283 | Returns:
284 | dict : {}
285 | """
286 | conversation_prompt = self.conversation.gen_complete_prompt(prompt)
287 | if optimizer:
288 | if optimizer in self.__available_optimizers:
289 | conversation_prompt = getattr(Optimizers, optimizer)(
290 | conversation_prompt if conversationally else prompt
291 | )
292 | else:
293 | raise Exception(
294 | f"Optimizer is not one of {self.__available_optimizers}"
295 | )
296 | payload = {
297 | 'model': self.model,
298 | 'messages': [
299 | {"role": "system", "content": self.system_prompt},
300 | {"role": "user", "content": conversation_prompt},
301 | ],
302 | 'temperature': 0.7,
303 | 'max_tokens': 8028,
304 | 'stop': []
305 | }
306 |
307 | response = await self.client.post(self.chat_endpoint, json=payload, timeout=self.timeout)
308 | if response.status_code != 200:
309 | raise Exception(
310 | f"Failed to generate response - ({response.status_code}, {response.reason_phrase}) - {response.text}"
311 | )
312 |
313 | resp = response.json()
314 | message_load = self.get_message(resp)
315 | self.conversation.update_chat_history(
316 | prompt, message_load
317 | )
318 | return resp
319 |
320 | async def chat(
321 | self,
322 | prompt: str,
323 | optimizer: str = None,
324 | conversationally: bool = False,
325 | ) -> str:
326 | """Generate response `str`
327 | Args:
328 | prompt (str): Prompt to be send.
329 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
330 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
331 | Returns:
332 | str: Response generated
333 | """
334 | return self.get_message(
335 | await self.ask(
336 | prompt,
337 | optimizer=optimizer,
338 | conversationally=conversationally,
339 | )
340 | )
341 |
342 | def get_message(self, response: dict) -> str:
343 | """Retrieves message only from response
344 |
345 | Args:
346 | response (dict): Response generated by `self.ask`
347 |
348 | Returns:
349 | str: Message extracted
350 | """
351 | assert isinstance(response, dict), "Response should be of dict data-type only"
352 | try:
353 | return response["choices"][0]["message"]["content"]
354 | except KeyError:
355 | return ""
356 | import requests
357 | import base64
358 | from typing import List, Dict, Union, Any
359 |
360 | class VLM:
361 | def __init__(
362 | self,
363 | model: str,
364 | is_conversation: bool = True,
365 | max_tokens: int = 600,
366 | timeout: int = 30,
367 | system_prompt: str = "You are a Helpful AI.",
368 | proxies: dict = {}
369 | ):
370 | """Instantiates VLM
371 |
372 | Args:
373 | model (str): VLM model name.
374 | is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
375 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
376 | timeout (int, optional): Http request timeout. Defaults to 30.
377 | system_prompt (str, optional): System prompt for VLM. Defaults to "You are a Helpful AI.".
378 | proxies (dict, optional): Http request proxies. Defaults to {}.
379 | """
380 | self.model = model
381 | self.is_conversation = is_conversation
382 | self.max_tokens_to_sample = max_tokens
383 | self.timeout = timeout
384 | self.system_prompt = system_prompt
385 | self.headers = {
386 | 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
387 | 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q.0.7,es;q.0.6,en-US;q.0.5,am;q.0.4,de;q.0.3',
388 | 'Cache-Control': 'no-cache',
389 | 'Connection': 'keep-alive',
390 | 'Content-Type': 'application/json',
391 | 'Origin': 'https://deepinfra.com',
392 | 'Pragma': 'no-cache',
393 | 'Referer': 'https://deepinfra.com/',
394 | 'Sec-Fetch-Dest': 'empty',
395 | 'Sec-Fetch-Mode': 'cors',
396 | 'Sec-Fetch-Site': 'same-site',
397 | 'X-Deepinfra-Source': 'web-embed',
398 | 'accept': 'text/event-stream',
399 | 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
400 | 'sec-ch-ua-mobile': '?0',
401 | 'sec-ch-ua-platform': '"macOS"'
402 | }
403 |
404 | self.session = requests.Session()
405 | self.session.headers.update(self.headers)
406 | self.session.proxies.update(proxies)
407 |
408 | def encode_image_to_base64(self, image_path: str) -> str:
409 | with open(image_path, "rb") as image_file:
410 | return base64.b64encode(image_file.read()).decode("utf-8")
411 |
412 | def get_message(self, response: dict) -> str:
413 | """Retrieves message only from response
414 |
415 | Args:
416 | response (dict): Response generated by `self.ask`
417 |
418 | Returns:
419 | str: Message extracted
420 | """
421 | assert isinstance(response, dict), "Response should be of dict data-type only"
422 | try:
423 | return response["choices"][0]["message"]["content"]
424 | except KeyError:
425 | return ""
426 |
427 | def ask(
428 | self,
429 | prompt: Union[str, Dict[str, str]],
430 | raw: bool = False
431 | ) -> dict:
432 | """Chat with AI
433 |
434 | Args:
435 | prompt (Union[str, Dict[str, str]]): Prompt to be sent, can be text or a dict with base64 image.
436 | raw (bool, optional): Stream back raw response as received. Defaults to False.
437 |
438 | Returns:
439 | dict: Response from the API
440 | """
441 | messages = [
442 | {"role": "system", "content": self.system_prompt},
443 | {"role": "user", "content": prompt if isinstance(prompt, str) else prompt['content']}
444 | ]
445 |
446 | payload = {
447 | 'model': self.model,
448 | 'messages': messages,
449 | 'temperature': 0.7,
450 | 'max_tokens': self.max_tokens_to_sample,
451 | 'stop': [],
452 | 'stream': False
453 | }
454 |
455 | response = self.session.post(
456 | "https://api.deepinfra.com/v1/openai/chat/completions",
457 | json=payload,
458 | timeout=self.timeout
459 | )
460 | if not response.ok:
461 | raise Exception(
462 | f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
463 | )
464 |
465 | return response.json()
466 |
467 | def chat(
468 | self,
469 | prompt: Union[str, Dict[str, str]]
470 | ) -> str:
471 | """Generate response `str`
472 |
473 | Args:
474 | prompt (Union[str, Dict[str, str]]): Prompt to be sent, can be text or a dict with base64 image.
475 |
476 | Returns:
477 | str: Response generated
478 | """
479 | return self.get_message(self.ask(prompt))
--------------------------------------------------------------------------------
/ai4free/Phind.py:
--------------------------------------------------------------------------------
1 | import time
2 | import uuid
3 | import click
4 | import requests
5 | from requests import get
6 | from uuid import uuid4
7 | from re import findall
8 | from requests.exceptions import RequestException
9 | from curl_cffi.requests import get, RequestsError
10 | import g4f
11 | from random import randint
12 | from PIL import Image
13 | import io
14 | import re
15 | import json
16 | import yaml
17 | from webscout.AIutel import Optimizers
18 | from webscout.AIutel import Conversation
19 | from webscout.AIutel import AwesomePrompts, sanitize_stream
20 | from webscout.AIbase import Provider, AsyncProvider
21 | from webscout import exceptions
22 | from typing import Any, AsyncGenerator
23 | import logging
24 | import httpx
25 | class PhindSearch:
26 | # default_model = "Phind Model"
27 | def __init__(
28 | self,
29 | is_conversation: bool = True,
30 | max_tokens: int = 8000,
31 | timeout: int = 30,
32 | intro: str = None,
33 | filepath: str = None,
34 | update_file: bool = True,
35 | proxies: dict = {},
36 | history_offset: int = 10250,
37 | act: str = None,
38 | model: str = "Phind Model",
39 | quiet: bool = False,
40 | ):
41 | """Instantiates PHIND
42 |
43 | Args:
44 | is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
45 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
46 | timeout (int, optional): Http request timeout. Defaults to 30.
47 | intro (str, optional): Conversation introductory prompt. Defaults to None.
48 | filepath (str, optional): Path to file containing conversation history. Defaults to None.
49 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
50 | proxies (dict, optional): Http request proxies. Defaults to {}.
51 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
52 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
53 | model (str, optional): Model name. Defaults to "Phind Model".
54 | quiet (bool, optional): Ignore web search-results and yield final response only. Defaults to False.
55 | """
56 | self.session = requests.Session()
57 | self.max_tokens_to_sample = max_tokens
58 | self.is_conversation = is_conversation
59 | self.chat_endpoint = "https://https.extension.phind.com/agent/"
60 | self.stream_chunk_size = 64
61 | self.timeout = timeout
62 | self.last_response = {}
63 | self.model = model
64 | self.quiet = quiet
65 |
66 | self.headers = {
67 | "Content-Type": "application/json",
68 | "User-Agent": "",
69 | "Accept": "*/*",
70 | "Accept-Encoding": "Identity",
71 | }
72 |
73 | self.__available_optimizers = (
74 | method
75 | for method in dir(Optimizers)
76 | if callable(getattr(Optimizers, method)) and not method.startswith("__")
77 | )
78 | self.session.headers.update(self.headers)
79 | Conversation.intro = (
80 | AwesomePrompts().get_act(
81 | act, raise_not_found=True, default=None, case_insensitive=True
82 | )
83 | if act
84 | else intro or Conversation.intro
85 | )
86 | self.conversation = Conversation(
87 | is_conversation, self.max_tokens_to_sample, filepath, update_file
88 | )
89 | self.conversation.history_offset = history_offset
90 | self.session.proxies = proxies
91 |
92 | def ask(
93 | self,
94 | prompt: str,
95 | stream: bool = False,
96 | raw: bool = False,
97 | optimizer: str = None,
98 | conversationally: bool = False,
99 | ) -> dict:
100 | """Chat with AI
101 |
102 | Args:
103 | prompt (str): Prompt to be send.
104 | stream (bool, optional): Flag for streaming response. Defaults to False.
105 | raw (bool, optional): Stream back raw response as received. Defaults to False.
106 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
107 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
108 | Returns:
109 | dict : {}
110 | ```json
111 | {
112 | "id": "chatcmpl-r0wujizf2i2xb60mjiwt",
113 | "object": "chat.completion.chunk",
114 | "created": 1706775384,
115 | "model": "trt-llm-phind-model-serving",
116 | "choices": [
117 | {
118 | "index": 0,
119 | "delta": {
120 | "content": "Hello! How can I assist you with your programming today?"
121 | },
122 | "finish_reason": null
123 | }
124 | ]
125 | }
126 | ```
127 | """
128 | conversation_prompt = self.conversation.gen_complete_prompt(prompt)
129 | if optimizer:
130 | if optimizer in self.__available_optimizers:
131 | conversation_prompt = getattr(Optimizers, optimizer)(
132 | conversation_prompt if conversationally else prompt
133 | )
134 | else:
135 | raise Exception(
136 | f"Optimizer is not one of {self.__available_optimizers}"
137 | )
138 |
139 | self.session.headers.update(self.headers)
140 | payload = {
141 | "additional_extension_context": "",
142 | "allow_magic_buttons": True,
143 | "is_vscode_extension": True,
144 | "message_history": [
145 | {"content": conversation_prompt, "metadata": {}, "role": "user"}
146 | ],
147 | "requested_model": self.model,
148 | "user_input": prompt,
149 | }
150 |
151 | def for_stream():
152 | response = self.session.post(
153 | self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
154 | )
155 | if (
156 | not response.ok
157 | or not response.headers.get("Content-Type")
158 | == "text/event-stream; charset=utf-8"
159 | ):
160 | raise exceptions.FailedToGenerateResponseError(
161 | f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
162 | )
163 | streaming_text = ""
164 | for value in response.iter_lines(
165 | decode_unicode=True,
166 | chunk_size=self.stream_chunk_size,
167 | ):
168 | try:
169 | modified_value = re.sub("data:", "", value)
170 | json_modified_value = json.loads(modified_value)
171 | retrieved_text = self.get_message(json_modified_value)
172 | if not retrieved_text:
173 | continue
174 | streaming_text += retrieved_text
175 | json_modified_value["choices"][0]["delta"][
176 | "content"
177 | ] = streaming_text
178 | self.last_response.update(json_modified_value)
179 | yield value if raw else json_modified_value
180 | except json.decoder.JSONDecodeError:
181 | pass
182 | self.conversation.update_chat_history(
183 | prompt, self.get_message(self.last_response)
184 | )
185 |
186 | def for_non_stream():
187 | for _ in for_stream():
188 | pass
189 | return self.last_response
190 |
191 | return for_stream() if stream else for_non_stream()
192 |
193 | def chat(
194 | self,
195 | prompt: str,
196 | stream: bool = False,
197 | optimizer: str = None,
198 | conversationally: bool = False,
199 | ) -> str:
200 | """Generate response `str`
201 | Args:
202 | prompt (str): Prompt to be send.
203 | stream (bool, optional): Flag for streaming response. Defaults to False.
204 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
205 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
206 | Returns:
207 | str: Response generated
208 | """
209 |
210 | def for_stream():
211 | for response in self.ask(
212 | prompt, True, optimizer=optimizer, conversationally=conversationally
213 | ):
214 | yield self.get_message(response)
215 |
216 | def for_non_stream():
217 | return self.get_message(
218 | self.ask(
219 | prompt,
220 | False,
221 | optimizer=optimizer,
222 | conversationally=conversationally,
223 | )
224 | )
225 |
226 | return for_stream() if stream else for_non_stream()
227 |
228 | def get_message(self, response: dict) -> str:
229 | """Retrieves message only from response
230 |
231 | Args:
232 | response (dict): Response generated by `self.ask`
233 |
234 | Returns:
235 | str: Message extracted
236 | """
237 | assert isinstance(response, dict), "Response should be of dict data-type only"
238 | if response.get("type", "") == "metadata":
239 | return
240 |
241 | delta: dict = response["choices"][0]["delta"]
242 |
243 | if not delta:
244 | return ""
245 |
246 | elif delta.get("function_call"):
247 | if self.quiet:
248 | return ""
249 |
250 | function_call: dict = delta["function_call"]
251 | if function_call.get("name"):
252 | return function_call["name"]
253 | elif function_call.get("arguments"):
254 | return function_call.get("arguments")
255 |
256 | elif delta.get("metadata"):
257 | if self.quiet:
258 | return ""
259 | return yaml.dump(delta["metadata"])
260 |
261 | else:
262 | return (
263 | response["choices"][0]["delta"].get("content")
264 | if response["choices"][0].get("finish_reason") is None
265 | else ""
266 | )
267 | class AsyncPhindSearch(AsyncProvider):
268 | def __init__(
269 | self,
270 | is_conversation: bool = True,
271 | max_tokens: int = 600,
272 | timeout: int = 30,
273 | intro: str = None,
274 | filepath: str = None,
275 | update_file: bool = True,
276 | proxies: dict = {},
277 | history_offset: int = 10250,
278 | act: str = None,
279 | model: str = "Phind Model",
280 | quiet: bool = False,
281 | ):
282 | """Instantiates PHIND
283 |
284 | Args:
285 | is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
286 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
287 | timeout (int, optional): Http request timeout. Defaults to 30.
288 | intro (str, optional): Conversation introductory prompt. Defaults to None.
289 | filepath (str, optional): Path to file containing conversation history. Defaults to None.
290 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
291 | proxies (dict, optional): Http request proxies. Defaults to {}.
292 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
293 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
294 | model (str, optional): Model name. Defaults to "Phind Model".
295 | quiet (bool, optional): Ignore web search-results and yield final response only. Defaults to False.
296 | """
297 | self.max_tokens_to_sample = max_tokens
298 | self.is_conversation = is_conversation
299 | self.chat_endpoint = "https://https.extension.phind.com/agent/"
300 | self.stream_chunk_size = 64
301 | self.timeout = timeout
302 | self.last_response = {}
303 | self.model = model
304 | self.quiet = quiet
305 |
306 | self.headers = {
307 | "Content-Type": "application/json",
308 | "User-Agent": "",
309 | "Accept": "*/*",
310 | "Accept-Encoding": "Identity",
311 | }
312 |
313 | self.__available_optimizers = (
314 | method
315 | for method in dir(Optimizers)
316 | if callable(getattr(Optimizers, method)) and not method.startswith("__")
317 | )
318 | Conversation.intro = (
319 | AwesomePrompts().get_act(
320 | act, raise_not_found=True, default=None, case_insensitive=True
321 | )
322 | if act
323 | else intro or Conversation.intro
324 | )
325 | self.conversation = Conversation(
326 | is_conversation, self.max_tokens_to_sample, filepath, update_file
327 | )
328 | self.conversation.history_offset = history_offset
329 | self.session = httpx.AsyncClient(headers=self.headers, proxies=proxies)
330 |
331 | async def ask(
332 | self,
333 | prompt: str,
334 | stream: bool = False,
335 | raw: bool = False,
336 | optimizer: str = None,
337 | conversationally: bool = False,
338 | synchronous_generator=False,
339 | ) -> dict | AsyncGenerator:
340 | """Asynchronously Chat with AI
341 |
342 | Args:
343 | prompt (str): Prompt to be send.
344 | stream (bool, optional): Flag for streaming response. Defaults to False.
345 | raw (bool, optional): Stream back raw response as received. Defaults to False.
346 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
347 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
348 | Returns:
349 | dict|AsyncGenerator : ai content.
350 | ```json
351 | {
352 | "id": "chatcmpl-r0wujizf2i2xb60mjiwt",
353 | "object": "chat.completion.chunk",
354 | "created": 1706775384,
355 | "model": "trt-llm-phind-model-serving",
356 | "choices": [
357 | {
358 | "index": 0,
359 | "delta": {
360 | "content": "Hello! How can I assist you with your programming today?"
361 | },
362 | "finish_reason": null
363 | }
364 | ]
365 | }
366 | ```
367 | """
368 | conversation_prompt = self.conversation.gen_complete_prompt(prompt)
369 | if optimizer:
370 | if optimizer in self.__available_optimizers:
371 | conversation_prompt = getattr(Optimizers, optimizer)(
372 | conversation_prompt if conversationally else prompt
373 | )
374 | else:
375 | raise Exception(
376 | f"Optimizer is not one of {self.__available_optimizers}"
377 | )
378 |
379 | payload = {
380 | "additional_extension_context": "",
381 | "allow_magic_buttons": True,
382 | "is_vscode_extension": True,
383 | "message_history": [
384 | {"content": conversation_prompt, "metadata": {}, "role": "user"}
385 | ],
386 | "requested_model": self.model,
387 | "user_input": prompt,
388 | }
389 |
390 | async def for_stream():
391 | async with self.session.stream(
392 | "POST",
393 | self.chat_endpoint,
394 | json=payload,
395 | timeout=self.timeout,
396 | ) as response:
397 | if (
398 | not response.is_success
399 | or not response.headers.get("Content-Type")
400 | == "text/event-stream; charset=utf-8"
401 | ):
402 | raise exceptions.FailedToGenerateResponseError(
403 | f"Failed to generate response - ({response.status_code}, {response.reason_phrase})"
404 | )
405 | streaming_text = ""
406 | async for value in response.aiter_lines():
407 | try:
408 | modified_value = re.sub("data:", "", value)
409 | json_modified_value = json.loads(modified_value)
410 | retrieved_text = await self.get_message(json_modified_value)
411 | if not retrieved_text:
412 | continue
413 | streaming_text += retrieved_text
414 | json_modified_value["choices"][0]["delta"][
415 | "content"
416 | ] = streaming_text
417 | self.last_response.update(json_modified_value)
418 | yield value if raw else json_modified_value
419 | except json.decoder.JSONDecodeError:
420 | pass
421 | self.conversation.update_chat_history(
422 | prompt, await self.get_message(self.last_response)
423 | )
424 |
425 | async def for_non_stream():
426 | async for _ in for_stream():
427 | pass
428 | return self.last_response
429 |
430 | return (
431 | for_stream()
432 | if stream and not synchronous_generator
433 | else await for_non_stream()
434 | )
435 |
436 | async def chat(
437 | self,
438 | prompt: str,
439 | stream: bool = False,
440 | optimizer: str = None,
441 | conversationally: bool = False,
442 | ) -> str | AsyncGenerator:
443 | """Generate response `str`
444 | Args:
445 | prompt (str): Prompt to be send.
446 | stream (bool, optional): Flag for streaming response. Defaults to False.
447 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
448 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
449 | Returns:
450 | str|AsyncGenerator: Response generated
451 | """
452 |
453 | async def for_stream():
454 | ask_resp = await self.ask(
455 | prompt, True, optimizer=optimizer, conversationally=conversationally
456 | )
457 | async for response in ask_resp:
458 | yield await self.get_message(response)
459 |
460 | async def for_non_stream():
461 | return await self.get_message(
462 | await self.ask(
463 | prompt,
464 | False,
465 | optimizer=optimizer,
466 | conversationally=conversationally,
467 | )
468 | )
469 |
470 | return for_stream() if stream else await for_non_stream()
471 |
472 | async def get_message(self, response: dict) -> str:
473 | """Retrieves message only from response
474 |
475 | Args:
476 | response (dict): Response generated by `self.ask`
477 |
478 | Returns:
479 | str: Message extracted
480 | """
481 | assert isinstance(response, dict), "Response should be of dict data-type only"
482 | if response.get("type", "") == "metadata":
483 | return
484 |
485 | delta: dict = response["choices"][0]["delta"]
486 |
487 | if not delta:
488 | return ""
489 |
490 | elif delta.get("function_call"):
491 | if self.quiet:
492 | return ""
493 |
494 | function_call: dict = delta["function_call"]
495 | if function_call.get("name"):
496 | return function_call["name"]
497 | elif function_call.get("arguments"):
498 | return function_call.get("arguments")
499 |
500 | elif delta.get("metadata"):
501 | if self.quiet:
502 | return ""
503 | return yaml.dump(delta["metadata"])
504 |
505 | else:
506 | return (
507 | response["choices"][0]["delta"].get("content")
508 | if response["choices"][0].get("finish_reason") is None
509 | else ""
510 | )
--------------------------------------------------------------------------------
/ai4free/yep.py:
--------------------------------------------------------------------------------
1 | import time
2 | import uuid
3 | import click
4 | import requests
5 | from requests import get
6 | from uuid import uuid4
7 | from re import findall
8 | from requests.exceptions import RequestException
9 | from curl_cffi.requests import get, RequestsError
10 | import g4f
11 | from random import randint
12 | from PIL import Image
13 | import io
14 | import re
15 | import json
16 | import yaml
17 | from webscout.AIutel import Optimizers
18 | from webscout.AIutel import Conversation
19 | from webscout.AIutel import AwesomePrompts, sanitize_stream
20 | from webscout.AIbase import Provider, AsyncProvider
21 | from webscout import exceptions
22 | from typing import Any, AsyncGenerator
23 | import logging
24 | import httpx
25 | class YEPCHAT(Provider):
26 | def __init__(
27 | self,
28 | is_conversation: bool = True,
29 | max_tokens: int = 600,
30 | temperature: float = 0.6,
31 | presence_penalty: int = 0,
32 | frequency_penalty: int = 0,
33 | top_p: float = 0.7,
34 | model: str = "Mixtral-8x7B-Instruct-v0.1",
35 | timeout: int = 30,
36 | intro: str = None,
37 | filepath: str = None,
38 | update_file: bool = True,
39 | proxies: dict = {},
40 | history_offset: int = 10250,
41 | act: str = None,
42 | ):
43 | """Instantiates YEPCHAT
44 |
45 | Args:
46 | is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
47 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
48 | temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.6.
49 | presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
50 | frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
51 | top_p (float, optional): Sampling threshold during inference time. Defaults to 0.7.
52 | model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo".
53 | timeout (int, optional): Http request timeout. Defaults to 30.
54 | intro (str, optional): Conversation introductory prompt. Defaults to None.
55 | filepath (str, optional): Path to file containing conversation history. Defaults to None.
56 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
57 | proxies (dict, optional): Http request proxies. Defaults to {}.
58 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
59 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
60 | """
61 | self.session = requests.Session()
62 | self.is_conversation = is_conversation
63 | self.max_tokens_to_sample = max_tokens
64 | self.model = model
65 | self.temperature = temperature
66 | self.presence_penalty = presence_penalty
67 | self.frequency_penalty = frequency_penalty
68 | self.top_p = top_p
69 | self.chat_endpoint = "https://api.yep.com/v1/chat/completions"
70 | self.stream_chunk_size = 64
71 | self.timeout = timeout
72 | self.last_response = {}
73 | self.headers = {
74 | "Accept": "*/*",
75 | "Accept-Encoding": "gzip, deflate",
76 | "Accept-Language": "en-US,en;q=0.9",
77 | "Content-Type": "application/json; charset=utf-8",
78 | "Origin": "https://yep.com",
79 | "Referer": "https://yep.com/",
80 | "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
81 | }
82 |
83 | self.__available_optimizers = (
84 | method
85 | for method in dir(Optimizers)
86 | if callable(getattr(Optimizers, method)) and not method.startswith("__")
87 | )
88 | self.session.headers.update(self.headers)
89 | Conversation.intro = (
90 | AwesomePrompts().get_act(
91 | act, raise_not_found=True, default=None, case_insensitive=True
92 | )
93 | if act
94 | else intro or Conversation.intro
95 | )
96 | self.conversation = Conversation(
97 | is_conversation, self.max_tokens_to_sample, filepath, update_file
98 | )
99 | self.conversation.history_offset = history_offset
100 | self.session.proxies = proxies
101 |
102 | def ask(
103 | self,
104 | prompt: str,
105 | stream: bool = False,
106 | raw: bool = False,
107 | optimizer: str = None,
108 | conversationally: bool = False,
109 | ) -> dict:
110 | """Chat with AI
111 |
112 | Args:
113 | prompt (str): Prompt to be send.
114 | stream (bool, optional): Flag for streaming response. Defaults to False.
115 | raw (bool, optional): Stream back raw response as received. Defaults to False.
116 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
117 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
118 | Returns:
119 | dict : {}
120 | ```json
121 | {
122 | "id": "cmpl-c61c1c88de4e4ad3a79134775d17ea0c",
123 | "object": "chat.completion.chunk",
124 | "created": 1713876886,
125 | "model": "Mixtral-8x7B-Instruct-v0.1",
126 | "choices": [
127 | {
128 | "index": 0,
129 | "delta": {
130 | "role": null,
131 | "content": " Sure, I can help with that. Are you looking for information on how to start coding, or do you need help with a specific coding problem? We can discuss various programming languages like Python, JavaScript, Java, C++, or others. Please provide more details so I can assist you better."
132 | },
133 | "finish_reason": null
134 | }
135 | ]
136 | }
137 | ```
138 | """
139 | conversation_prompt = self.conversation.gen_complete_prompt(prompt)
140 | if optimizer:
141 | if optimizer in self.__available_optimizers:
142 | conversation_prompt = getattr(Optimizers, optimizer)(
143 | conversation_prompt if conversationally else prompt
144 | )
145 | else:
146 | raise Exception(
147 | f"Optimizer is not one of {self.__available_optimizers}"
148 | )
149 | self.session.headers.update(self.headers)
150 | payload = {
151 | "stream": True,
152 | "max_tokens": 1280,
153 | "top_p": self.top_p,
154 | "temperature": self.temperature,
155 | "messages": [{"content": conversation_prompt, "role": "user"}],
156 | "model": self.model,
157 | }
158 |
159 | def for_stream():
160 | response = self.session.post(
161 | self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
162 | )
163 | if not response.ok:
164 | raise exceptions.FailedToGenerateResponseError(
165 | f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
166 | )
167 |
168 | message_load = ""
169 | for value in response.iter_lines(
170 | decode_unicode=True,
171 | delimiter="" if raw else "data:",
172 | chunk_size=self.stream_chunk_size,
173 | ):
174 | try:
175 | resp = json.loads(value)
176 | incomplete_message = self.get_message(resp)
177 | if incomplete_message:
178 | message_load += incomplete_message
179 | resp["choices"][0]["delta"]["content"] = message_load
180 | self.last_response.update(resp)
181 | yield value if raw else resp
182 | elif raw:
183 | yield value
184 | except json.decoder.JSONDecodeError:
185 | pass
186 | self.conversation.update_chat_history(
187 | prompt, self.get_message(self.last_response)
188 | )
189 |
190 | def for_non_stream():
191 | for _ in for_stream():
192 | pass
193 | return self.last_response
194 |
195 | return for_stream() if stream else for_non_stream()
196 |
197 | def chat(
198 | self,
199 | prompt: str,
200 | stream: bool = False,
201 | optimizer: str = None,
202 | conversationally: bool = False,
203 | ) -> str:
204 | """Generate response `str`
205 | Args:
206 | prompt (str): Prompt to be send.
207 | stream (bool, optional): Flag for streaming response. Defaults to False.
208 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
209 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
210 | Returns:
211 | str: Response generated
212 | """
213 |
214 | def for_stream():
215 | for response in self.ask(
216 | prompt, True, optimizer=optimizer, conversationally=conversationally
217 | ):
218 | yield self.get_message(response)
219 |
220 | def for_non_stream():
221 | return self.get_message(
222 | self.ask(
223 | prompt,
224 | False,
225 | optimizer=optimizer,
226 | conversationally=conversationally,
227 | )
228 | )
229 |
230 | return for_stream() if stream else for_non_stream()
231 |
232 | def get_message(self, response: dict) -> str:
233 | """Retrieves message only from response
234 |
235 | Args:
236 | response (dict): Response generated by `self.ask`
237 |
238 | Returns:
239 | str: Message extracted
240 | """
241 | assert isinstance(response, dict), "Response should be of dict data-type only"
242 | try:
243 | if response["choices"][0].get("delta"):
244 | return response["choices"][0]["delta"]["content"]
245 | return response["choices"][0]["message"]["content"]
246 | except KeyError:
247 | return ""
248 | class AsyncYEPCHAT(AsyncProvider):
249 | def __init__(
250 | self,
251 | is_conversation: bool = True,
252 | max_tokens: int = 600,
253 | temperature: float = 0.6,
254 | presence_penalty: int = 0,
255 | frequency_penalty: int = 0,
256 | top_p: float = 0.7,
257 | model: str = "Mixtral-8x7B-Instruct-v0.1",
258 | timeout: int = 30,
259 | intro: str = None,
260 | filepath: str = None,
261 | update_file: bool = True,
262 | proxies: dict = {},
263 | history_offset: int = 10250,
264 | act: str = None,
265 | ):
266 | """Instantiates YEPCHAT
267 |
268 | Args:
269 | is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
270 | max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
271 | temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.6.
272 | presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
273 | frequency_penalty (int, optional): Chances of word being repeated. Defaults to 0.
274 | top_p (float, optional): Sampling threshold during inference time. Defaults to 0.7.
275 | model (str, optional): LLM model name. Defaults to "gpt-3.5-turbo".
276 | timeout (int, optional): Http request timeout. Defaults to 30.
277 | intro (str, optional): Conversation introductory prompt. Defaults to None.
278 | filepath (str, optional): Path to file containing conversation history. Defaults to None.
279 | update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
280 | proxies (dict, optional): Http request proxies. Defaults to {}.
281 | history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
282 | act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
283 | """
284 | self.session = requests.Session()
285 | self.is_conversation = is_conversation
286 | self.max_tokens_to_sample = max_tokens
287 | self.model = model
288 | self.temperature = temperature
289 | self.presence_penalty = presence_penalty
290 | self.frequency_penalty = frequency_penalty
291 | self.top_p = top_p
292 | self.chat_endpoint = "https://api.yep.com/v1/chat/completions"
293 | self.stream_chunk_size = 64
294 | self.timeout = timeout
295 | self.last_response = {}
296 | self.headers = {
297 | "Accept": "*/*",
298 | "Accept-Encoding": "gzip, deflate",
299 | "Accept-Language": "en-US,en;q=0.9",
300 | "Content-Type": "application/json; charset=utf-8",
301 | "Origin": "https://yep.com",
302 | "Referer": "https://yep.com/",
303 | "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
304 | }
305 |
306 | self.__available_optimizers = (
307 | method
308 | for method in dir(Optimizers)
309 | if callable(getattr(Optimizers, method)) and not method.startswith("__")
310 | )
311 | Conversation.intro = (
312 | AwesomePrompts().get_act(
313 | act, raise_not_found=True, default=None, case_insensitive=True
314 | )
315 | if act
316 | else intro or Conversation.intro
317 | )
318 | self.conversation = Conversation(
319 | is_conversation, self.max_tokens_to_sample, filepath, update_file
320 | )
321 | self.conversation.history_offset = history_offset
322 | self.session = httpx.AsyncClient(
323 | headers=self.headers,
324 | proxies=proxies,
325 | )
326 |
327 | async def ask(
328 | self,
329 | prompt: str,
330 | stream: bool = False,
331 | raw: bool = False,
332 | optimizer: str = None,
333 | conversationally: bool = False,
334 | ) -> dict:
335 | """Chat with AI asynchronously.
336 |
337 | Args:
338 | prompt (str): Prompt to be send.
339 | stream (bool, optional): Flag for streaming response. Defaults to False.
340 | raw (bool, optional): Stream back raw response as received. Defaults to False.
341 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
342 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
343 | Returns:
344 | dict : {}
345 | ```json
346 | {
347 | "id": "cmpl-c61c1c88de4e4ad3a79134775d17ea0c",
348 | "object": "chat.completion.chunk",
349 | "created": 1713876886,
350 | "model": "Mixtral-8x7B-Instruct-v0.1",
351 | "choices": [
352 | {
353 | "index": 0,
354 | "delta": {
355 | "role": null,
356 | "content": " Sure, I can help with that. Are you looking for information on how to start coding, or do you need help with a specific coding problem? We can discuss various programming languages like Python, JavaScript, Java, C++, or others. Please provide more details so I can assist you better."
357 | },
358 | "finish_reason": null
359 | }
360 | ]
361 | }
362 | ```
363 | """
364 | conversation_prompt = self.conversation.gen_complete_prompt(prompt)
365 | if optimizer:
366 | if optimizer in self.__available_optimizers:
367 | conversation_prompt = getattr(Optimizers, optimizer)(
368 | conversation_prompt if conversationally else prompt
369 | )
370 | else:
371 | raise Exception(
372 | f"Optimizer is not one of {self.__available_optimizers}"
373 | )
374 | payload = {
375 | "stream": True,
376 | "max_tokens": 1280,
377 | "top_p": self.top_p,
378 | "temperature": self.temperature,
379 | "messages": [{"content": conversation_prompt, "role": "user"}],
380 | "model": self.model,
381 | }
382 |
383 | async def for_stream():
384 | async with self.session.stream(
385 | "POST", self.chat_endpoint, json=payload, timeout=self.timeout
386 | ) as response:
387 | if not response.is_success:
388 | raise exceptions.FailedToGenerateResponseError(
389 | f"Failed to generate response - ({response.status_code}, {response.reason_phrase}) - {response.text}"
390 | )
391 |
392 | message_load = ""
393 | async for value in response.aiter_lines():
394 | try:
395 | resp = sanitize_stream(value)
396 | incomplete_message = await self.get_message(resp)
397 | if incomplete_message:
398 | message_load += incomplete_message
399 | resp["choices"][0]["delta"]["content"] = message_load
400 | self.last_response.update(resp)
401 | yield value if raw else resp
402 | elif raw:
403 | yield value
404 | except json.decoder.JSONDecodeError:
405 | pass
406 |
407 | self.conversation.update_chat_history(
408 | prompt, await self.get_message(self.last_response)
409 | )
410 |
411 | async def for_non_stream():
412 | async for _ in for_stream():
413 | pass
414 | return self.last_response
415 |
416 | return for_stream() if stream else await for_non_stream()
417 |
418 | async def chat(
419 | self,
420 | prompt: str,
421 | stream: bool = False,
422 | optimizer: str = None,
423 | conversationally: bool = False,
424 | ) -> str:
425 | """Generate response `str` asynchronously.
426 | Args:
427 | prompt (str): Prompt to be send.
428 | stream (bool, optional): Flag for streaming response. Defaults to False.
429 | optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
430 | conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
431 | Returns:
432 | str: Response generated
433 | """
434 |
435 | async def for_stream():
436 | async_ask = await self.ask(
437 | prompt, True, optimizer=optimizer, conversationally=conversationally
438 | )
439 |
440 | async for response in async_ask:
441 | yield await self.get_message(response)
442 |
443 | async def for_non_stream():
444 | return await self.get_message(
445 | await self.ask(
446 | prompt,
447 | False,
448 | optimizer=optimizer,
449 | conversationally=conversationally,
450 | )
451 | )
452 |
453 | return for_stream() if stream else await for_non_stream()
454 |
455 | async def get_message(self, response: dict) -> str:
456 | """Retrieves message only from response
457 |
458 | Args:
459 | response (dict): Response generated by `self.ask`
460 |
461 | Returns:
462 | str: Message extracted
463 | """
464 | assert isinstance(response, dict), "Response should be of dict data-type only"
465 | try:
466 | if response["choices"][0].get("delta"):
467 | return response["choices"][0]["delta"]["content"]
468 | return response["choices"][0]["message"]["content"]
469 | except KeyError:
470 | return ""
471 |
472 |
473 | if __name__ == "__main__":
474 | yepchat = YEPCHAT(is_conversation=True, filepath="conversation_history.txt") # Keep conversation history
475 |
476 | while True:
477 | prompt = input("You: ")
478 | print("YepChat :", end="", flush=True)
479 | for response in yepchat.chat(
480 | prompt, stream=True, optimizer="code", conversationally=True
481 | ): # Use optimizer with conversation history
482 | print(response, end="")
--------------------------------------------------------------------------------