├── gui
├── __init__.py
├── image1.png
├── image2.png
├── pywebio-gui
│ ├── README.md
│ └── pywebio-usesless.py
├── streamlit_app.py
├── README.md
├── query_methods.py
└── streamlit_chat_app.py
├── unfinished
├── vercelai
│ ├── token.py
│ ├── vercelai_test.py
│ ├── test.js
│ ├── __init__.py
│ └── test.py
├── bard
│ ├── README.md
│ ├── typings.py
│ └── __init__.py
├── bing
│ ├── README.md
│ └── __ini__.py
├── gptbz
│ ├── README.md
│ └── __init__.py
├── openprompt
│ ├── README.md
│ ├── test.py
│ ├── main.py
│ ├── create.py
│ └── mail.py
├── t3nsor
│ ├── README.md
│ └── __init__.py
├── chatpdf
│ └── __init__.py
└── test.py
├── gpt4free
├── quora
│ ├── graphql
│ │ ├── __init__.py
│ │ ├── ChatAddedSubscription.graphql
│ │ ├── SummarizePlainPostQuery.graphql
│ │ ├── ChatFragment.graphql
│ │ ├── BioFragment.graphql
│ │ ├── HandleFragment.graphql
│ │ ├── DeleteMessageMutation.graphql
│ │ ├── SettingsDeleteAccountButton_deleteAccountMutation_Mutation.graphql
│ │ ├── SummarizeQuotePostQuery.graphql
│ │ ├── MessageDeletedSubscription.graphql
│ │ ├── ChatViewQuery.graphql
│ │ ├── MessageRemoveVoteMutation.graphql
│ │ ├── StaleChatUpdateMutation.graphql
│ │ ├── DeleteHumanMessagesMutation.graphql
│ │ ├── SubscriptionsMutation.graphql
│ │ ├── SummarizeSharePostQuery.graphql
│ │ ├── AutoSubscriptionMutation.graphql
│ │ ├── MessageSetVoteMutation.graphql
│ │ ├── MessageFragment.graphql
│ │ ├── ShareMessagesMutation.graphql
│ │ ├── SendVerificationCodeForLoginMutation.graphql
│ │ ├── LoginWithVerificationCodeMutation.graphql
│ │ ├── SignupWithVerificationCodeMutation.graphql
│ │ ├── UserSnippetFragment.graphql
│ │ ├── AddMessageBreakMutation.graphql
│ │ ├── ViewerInfoQuery.graphql
│ │ ├── ChatPaginationQuery.graphql
│ │ ├── ViewerStateUpdatedSubscription.graphql
│ │ ├── SendMessageMutation.graphql
│ │ ├── PoeBotEditMutation.graphql
│ │ ├── ViewerStateFragment.graphql
│ │ ├── AddHumanMessageMutation.graphql
│ │ ├── PoeBotCreateMutation.graphql
│ │ ├── MessageAddedSubscription.graphql
│ │ └── ChatListPaginationQuery.graphql
│ ├── cookies.txt
│ ├── backup-mail.py
│ ├── README.md
│ ├── mail.py
│ ├── api.py
│ └── __init__.py
├── theb
│ ├── theb_test.py
│ ├── README.md
│ └── __init__.py
├── forefront
│ ├── README.md
│ ├── typing.py
│ └── __init__.py
├── italygpt
│ ├── README.md
│ └── __init__.py
├── usesless
│ ├── README.md
│ └── __init__.py
├── cocalc
│ ├── readme.md
│ └── __init__.py
├── you
│ ├── README.md
│ └── __init__.py
├── __init__.py
└── README.md
├── .github
├── FUNDING.yml
└── workflows
│ └── ci.yml
├── .dockerignore
├── testing
├── theb_test.py
├── t3nsor_test.py
├── sqlchat_test.py
├── forefront_test.py
├── poe_test.py
├── usesless_test.py
├── quora_test_2.py
├── openaihosted_test.py
├── you_test.py
├── useless_test.py
├── test_main.py
├── writesonic_test.py
└── poe_account_create_test.py
├── requirements.txt
├── Singularity
└── gpt4free.sif
├── docker-compose.yaml
├── .gitignore
├── pyproject.toml
├── Dockerfile
└── README.md
/gui/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/unfinished/vercelai/token.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/unfinished/bard/README.md:
--------------------------------------------------------------------------------
1 | to do:
2 | - code refractoring
--------------------------------------------------------------------------------
/unfinished/bing/README.md:
--------------------------------------------------------------------------------
1 | to do:
2 | - code refractoring
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | github: [onlp]
2 | patreon: xtekky
3 | ko_fi: xtekky
4 |
--------------------------------------------------------------------------------
/gui/image1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/release/gpt4free/main/gui/image1.png
--------------------------------------------------------------------------------
/gui/image2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/release/gpt4free/main/gui/image2.png
--------------------------------------------------------------------------------
/unfinished/gptbz/README.md:
--------------------------------------------------------------------------------
1 | https://chat.gpt.bz
2 |
3 | to do:
4 | - code refractoring
--------------------------------------------------------------------------------
/unfinished/openprompt/README.md:
--------------------------------------------------------------------------------
1 | https://openprompt.co/
2 |
3 | to do:
4 | - finish integrating email client
5 | - code refractoring
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | # Development
2 | .dockerignore
3 | .git
4 | .gitignore
5 | .github
6 | .idea
7 |
8 | # Application
9 | venv/
10 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/ChatAddedSubscription.graphql:
--------------------------------------------------------------------------------
1 | subscription ChatAddedSubscription {
2 | chatAdded {
3 | ...ChatFragment
4 | }
5 | }
6 |
--------------------------------------------------------------------------------
/gpt4free/theb/theb_test.py:
--------------------------------------------------------------------------------
1 | import theb
2 |
3 | for token in theb.Completion.create('hello world'):
4 | print(token, end='', flush=True)
5 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/SummarizePlainPostQuery.graphql:
--------------------------------------------------------------------------------
1 | query SummarizePlainPostQuery($comment: String!) {
2 | summarizePlainPost(comment: $comment)
3 | }
4 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/ChatFragment.graphql:
--------------------------------------------------------------------------------
1 | fragment ChatFragment on Chat {
2 | id
3 | chatId
4 | defaultBotNickname
5 | shouldShowDisclaimer
6 | }
7 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/BioFragment.graphql:
--------------------------------------------------------------------------------
1 | fragment BioFragment on Viewer {
2 | id
3 | poeUser {
4 | id
5 | uid
6 | bio
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/testing/theb_test.py:
--------------------------------------------------------------------------------
1 | from gpt4free import theb
2 |
3 | for token in theb.Completion.create('hello world'):
4 | print(token, end='', flush=True)
5 | print('asdsos')
6 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/HandleFragment.graphql:
--------------------------------------------------------------------------------
1 | fragment HandleFragment on Viewer {
2 | id
3 | poeUser {
4 | id
5 | uid
6 | handle
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/unfinished/vercelai/vercelai_test.py:
--------------------------------------------------------------------------------
1 | import vercelai
2 |
3 | for token in vercelai.Completion.create('summarize the gnu gpl 1.0'):
4 | print(token, end='', flush=True)
5 |
6 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/DeleteMessageMutation.graphql:
--------------------------------------------------------------------------------
1 | mutation deleteMessageMutation(
2 | $messageIds: [BigInt!]!
3 | ) {
4 | messagesDelete(messageIds: $messageIds) {
5 | edgeIds
6 | }
7 | }
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/SettingsDeleteAccountButton_deleteAccountMutation_Mutation.graphql:
--------------------------------------------------------------------------------
1 | mutation SettingsDeleteAccountButton_deleteAccountMutation_Mutation{ deleteAccount { viewer { uid id } }}
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/SummarizeQuotePostQuery.graphql:
--------------------------------------------------------------------------------
1 | query SummarizeQuotePostQuery($comment: String, $quotedPostId: BigInt!) {
2 | summarizeQuotePost(comment: $comment, quotedPostId: $quotedPostId)
3 | }
4 |
--------------------------------------------------------------------------------
/testing/t3nsor_test.py:
--------------------------------------------------------------------------------
1 | import t3nsor
2 |
3 | for response in t3nsor.StreamCompletion.create(prompt='write python code to reverse a string', messages=[]):
4 | print(response.completion.choices[0].text)
5 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/MessageDeletedSubscription.graphql:
--------------------------------------------------------------------------------
1 | subscription MessageDeletedSubscription($chatId: BigInt!) {
2 | messageDeleted(chatId: $chatId) {
3 | id
4 | messageId
5 | }
6 | }
7 |
--------------------------------------------------------------------------------
/testing/sqlchat_test.py:
--------------------------------------------------------------------------------
1 | import sqlchat
2 |
3 | for response in sqlchat.StreamCompletion.create(prompt='write python code to reverse a string', messages=[]):
4 | print(response.completion.choices[0].text, end='')
5 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/ChatViewQuery.graphql:
--------------------------------------------------------------------------------
1 | query ChatViewQuery($bot: String!) {
2 | chatOfBot(bot: $bot) {
3 | id
4 | chatId
5 | defaultBotNickname
6 | shouldShowDisclaimer
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/MessageRemoveVoteMutation.graphql:
--------------------------------------------------------------------------------
1 | mutation MessageRemoveVoteMutation($messageId: BigInt!) {
2 | messageRemoveVote(messageId: $messageId) {
3 | message {
4 | ...MessageFragment
5 | }
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/StaleChatUpdateMutation.graphql:
--------------------------------------------------------------------------------
1 | mutation StaleChatUpdateMutation($chatId: BigInt!) {
2 | staleChatUpdate(chatId: $chatId) {
3 | message {
4 | ...MessageFragment
5 | }
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/DeleteHumanMessagesMutation.graphql:
--------------------------------------------------------------------------------
1 | mutation DeleteHumanMessagesMutation($messageIds: [BigInt!]!) {
2 | messagesDelete(messageIds: $messageIds) {
3 | viewer {
4 | id
5 | }
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/SubscriptionsMutation.graphql:
--------------------------------------------------------------------------------
1 | mutation subscriptionsMutation(
2 | $subscriptions: [AutoSubscriptionQuery!]!
3 | ) {
4 | autoSubscribe(subscriptions: $subscriptions) {
5 | viewer {
6 | id
7 | }
8 | }
9 | }
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/SummarizeSharePostQuery.graphql:
--------------------------------------------------------------------------------
1 | query SummarizeSharePostQuery($comment: String!, $chatId: BigInt!, $messageIds: [BigInt!]!) {
2 | summarizeSharePost(comment: $comment, chatId: $chatId, messageIds: $messageIds)
3 | }
4 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/AutoSubscriptionMutation.graphql:
--------------------------------------------------------------------------------
1 | mutation AutoSubscriptionMutation($subscriptions: [AutoSubscriptionQuery!]!) {
2 | autoSubscribe(subscriptions: $subscriptions) {
3 | viewer {
4 | id
5 | }
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/MessageSetVoteMutation.graphql:
--------------------------------------------------------------------------------
1 | mutation MessageSetVoteMutation($messageId: BigInt!, $voteType: VoteType!, $reason: String) {
2 | messageSetVote(messageId: $messageId, voteType: $voteType, reason: $reason) {
3 | message {
4 | ...MessageFragment
5 | }
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/MessageFragment.graphql:
--------------------------------------------------------------------------------
1 | fragment MessageFragment on Message {
2 | id
3 | __typename
4 | messageId
5 | text
6 | linkifiedText
7 | authorNickname
8 | state
9 | vote
10 | voteReason
11 | creationTime
12 | suggestedReplies
13 | }
14 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/ShareMessagesMutation.graphql:
--------------------------------------------------------------------------------
1 | mutation ShareMessagesMutation(
2 | $chatId: BigInt!
3 | $messageIds: [BigInt!]!
4 | $comment: String
5 | ) {
6 | messagesShare(chatId: $chatId, messageIds: $messageIds, comment: $comment) {
7 | shareCode
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | websocket-client
2 | requests
3 | tls-client
4 | pypasser
5 | names
6 | colorama
7 | curl_cffi
8 | streamlit==1.21.0
9 | selenium
10 | fake-useragent
11 | twocaptcha
12 | https://github.com/AI-Yash/st-chat/archive/refs/pull/24/head.zip
13 | pydantic
14 | pymailtm
15 | Levenshtein
16 |
--------------------------------------------------------------------------------
/testing/forefront_test.py:
--------------------------------------------------------------------------------
1 | from gpt4free import forefront
2 |
3 | # create an account
4 | token = forefront.Account.create(logging=True)
5 | print(token)
6 |
7 | # get a response
8 | for response in forefront.StreamingCompletion.create(token=token, prompt='hello world', model='gpt-4'):
9 | print(response.text, end='')
10 |
--------------------------------------------------------------------------------
/gpt4free/theb/README.md:
--------------------------------------------------------------------------------
1 | ### Example: `theb` (use like openai pypi package)
2 |
3 | ```python
4 | # import library
5 | from gpt4free import theb
6 |
7 | # simple streaming completion
8 |
9 | while True:
10 | x = input()
11 | for token in theb.Completion.create(x):
12 | print(token, end='', flush=True)
13 | print("")
14 | ```
15 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/SendVerificationCodeForLoginMutation.graphql:
--------------------------------------------------------------------------------
1 | mutation SendVerificationCodeForLoginMutation(
2 | $emailAddress: String
3 | $phoneNumber: String
4 | ) {
5 | sendVerificationCode(
6 | verificationReason: login
7 | emailAddress: $emailAddress
8 | phoneNumber: $phoneNumber
9 | ) {
10 | status
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/testing/poe_test.py:
--------------------------------------------------------------------------------
1 | from time import sleep
2 |
3 | from gpt4free import quora
4 |
5 | token = quora.Account.create(proxy=None, logging=True)
6 | print('token', token)
7 |
8 | sleep(2)
9 |
10 | for response in quora.StreamingCompletion.create(model='ChatGPT', prompt='hello world', token=token):
11 | print(response.text, flush=True)
12 |
13 | quora.Account.delete(token)
14 |
--------------------------------------------------------------------------------
/Singularity/gpt4free.sif:
--------------------------------------------------------------------------------
1 | Bootstrap: docker
2 | From: python:3.10-slim
3 |
4 | %post
5 | apt-get update && apt-get install -y git
6 | git clone https://github.com/xtekky/gpt4free.git
7 | cd gpt4free
8 | pip install --no-cache-dir -r requirements.txt
9 | cp gui/streamlit_app.py .
10 |
11 | %expose
12 | 8501
13 |
14 | %startscript
15 | exec streamlit run streamlit_app.py
16 |
--------------------------------------------------------------------------------
/testing/usesless_test.py:
--------------------------------------------------------------------------------
1 | import usesless
2 |
3 | question1 = "Who won the world series in 2020?"
4 | req = usesless.Completion.create(prompt=question1)
5 | answer = req["text"]
6 | message_id = req["parentMessageId"]
7 |
8 | question2 = "Where was it played?"
9 | req2 = usesless.Completion.create(prompt=question2, parentMessageId=message_id)
10 | answer2 = req2["text"]
11 |
12 | print(answer)
13 | print(answer2)
14 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/LoginWithVerificationCodeMutation.graphql:
--------------------------------------------------------------------------------
1 | mutation LoginWithVerificationCodeMutation(
2 | $verificationCode: String!
3 | $emailAddress: String
4 | $phoneNumber: String
5 | ) {
6 | loginWithVerificationCode(
7 | verificationCode: $verificationCode
8 | emailAddress: $emailAddress
9 | phoneNumber: $phoneNumber
10 | ) {
11 | status
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/SignupWithVerificationCodeMutation.graphql:
--------------------------------------------------------------------------------
1 | mutation SignupWithVerificationCodeMutation(
2 | $verificationCode: String!
3 | $emailAddress: String
4 | $phoneNumber: String
5 | ) {
6 | signupWithVerificationCode(
7 | verificationCode: $verificationCode
8 | emailAddress: $emailAddress
9 | phoneNumber: $phoneNumber
10 | ) {
11 | status
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: "3.9"
2 |
3 | services:
4 | gpt4free:
5 | build:
6 | context: ./
7 | dockerfile: Dockerfile
8 | container_name: dc_gpt4free
9 | # environment:
10 | # - http_proxy=http://127.0.0.1:1080 # modify this for your proxy
11 | # - https_proxy=http://127.0.0.1:1080 # modify this for your proxy
12 | image: img_gpt4free
13 | ports:
14 | - 8501:8501
15 | restart: always
--------------------------------------------------------------------------------
/testing/quora_test_2.py:
--------------------------------------------------------------------------------
1 | from gpt4free import quora
2 |
3 | token = quora.Account.create(logging=True, enable_bot_creation=True)
4 |
5 | model = quora.Model.create(
6 | token=token, model='ChatGPT', system_prompt='you are ChatGPT a large language model ...' # or claude-instant-v1.0
7 | )
8 |
9 | print(model.name)
10 |
11 | for response in quora.StreamingCompletion.create(custom_model=model.name, prompt='hello world', token=token):
12 | print(response.text)
13 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/UserSnippetFragment.graphql:
--------------------------------------------------------------------------------
1 | fragment UserSnippetFragment on PoeUser {
2 | id
3 | uid
4 | bio
5 | handle
6 | fullName
7 | viewerIsFollowing
8 | isPoeOnlyUser
9 | profilePhotoURLTiny: profilePhotoUrl(size: tiny)
10 | profilePhotoURLSmall: profilePhotoUrl(size: small)
11 | profilePhotoURLMedium: profilePhotoUrl(size: medium)
12 | profilePhotoURLLarge: profilePhotoUrl(size: large)
13 | isFollowable
14 | }
15 |
--------------------------------------------------------------------------------
/gpt4free/forefront/README.md:
--------------------------------------------------------------------------------
1 | ### Example: `forefront` (use like openai pypi package)
2 |
3 | ```python
4 | from gpt4free import forefront
5 | # create an account
6 | token = forefront.Account.create(logging=False)
7 | print(token)
8 | # get a response
9 | for response in forefront.StreamingCompletion.create(
10 | token=token,
11 | prompt='hello world',
12 | model='gpt-4'
13 | ):
14 | print(response.choices[0].text, end='')
15 | print("")
16 | ```
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/AddMessageBreakMutation.graphql:
--------------------------------------------------------------------------------
1 | mutation AddMessageBreakMutation($chatId: BigInt!) {
2 | messageBreakCreate(chatId: $chatId) {
3 | message {
4 | id
5 | __typename
6 | messageId
7 | text
8 | linkifiedText
9 | authorNickname
10 | state
11 | vote
12 | voteReason
13 | creationTime
14 | suggestedReplies
15 | }
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/gpt4free/italygpt/README.md:
--------------------------------------------------------------------------------
1 | ### Example: `italygpt`
2 |
3 | ```python
4 | # create an instance
5 | from gpt4free import italygpt
6 | italygpt = italygpt.Completion()
7 |
8 | # initialize api
9 | italygpt.init()
10 |
11 | # get an answer
12 | italygpt.create(prompt="What is the meaning of life?")
13 | print(italygpt.answer) # html formatted
14 |
15 | # keep the old conversation
16 | italygpt.create(prompt="Are you a human?", messages=italygpt.messages)
17 | print(italygpt.answer)
18 | ```
--------------------------------------------------------------------------------
/testing/openaihosted_test.py:
--------------------------------------------------------------------------------
1 | import openaihosted
2 |
3 | messages = [{"role": "system", "content": "You are a helpful assistant."}]
4 | while True:
5 | question = input("Question: ")
6 | if question == "!stop":
7 | break
8 |
9 | messages.append({"role": "user", "content": question})
10 | request = openaihosted.Completion.create(messages=messages)
11 |
12 | response = request["responses"]
13 | messages.append({"role": "assistant", "content": response})
14 | print(f"Answer: {response}")
15 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/ViewerInfoQuery.graphql:
--------------------------------------------------------------------------------
1 | query ViewerInfoQuery {
2 | viewer {
3 | id
4 | uid
5 | ...ViewerStateFragment
6 | ...BioFragment
7 | ...HandleFragment
8 | hasCompletedMultiplayerNux
9 | poeUser {
10 | id
11 | ...UserSnippetFragment
12 | }
13 | messageLimit{
14 | canSend
15 | numMessagesRemaining
16 | resetTime
17 | shouldShowReminder
18 | }
19 | }
20 | }
21 |
22 |
--------------------------------------------------------------------------------
/gpt4free/forefront/typing.py:
--------------------------------------------------------------------------------
1 | from typing import Any, List
2 |
3 | from pydantic import BaseModel
4 |
5 |
6 | class Choice(BaseModel):
7 | text: str
8 | index: int
9 | logprobs: Any
10 | finish_reason: str
11 |
12 |
13 | class Usage(BaseModel):
14 | prompt_tokens: int
15 | completion_tokens: int
16 | total_tokens: int
17 |
18 |
19 | class ForeFrontResponse(BaseModel):
20 | id: str
21 | object: str
22 | created: int
23 | model: str
24 | choices: List[Choice]
25 | usage: Usage
26 | text: str
27 |
--------------------------------------------------------------------------------
/gpt4free/usesless/README.md:
--------------------------------------------------------------------------------
1 | ai.usesless.com
2 |
3 | to do:
4 |
5 | - use random user agent in header
6 | - make the code better I guess (?)
7 |
8 | ### Example: `usesless`
9 |
10 | ```python
11 | import usesless
12 |
13 | message_id = ""
14 | while True:
15 | prompt = input("Question: ")
16 | if prompt == "!stop":
17 | break
18 |
19 | req = usesless.Completion.create(prompt=prompt, parentMessageId=message_id)
20 |
21 | print(f"Answer: {req['text']}")
22 | message_id = req["id"]
23 | ```
24 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Editor-based HTTP Client requests
5 | /httpRequests/
6 | # Datasource local storage ignored files
7 | /dataSources/
8 | /dataSources.local.xml
9 |
10 | # Ignore local python virtual environment
11 | venv/
12 |
13 | # Ignore streamlit_chat_app.py conversations pickle
14 | conversations.pkl
15 | *.pkl
16 |
17 | # Ignore accounts created by api's
18 | accounts.txt
19 |
20 | .idea/
21 |
22 | **/__pycache__/
23 |
24 | __pycache__/
25 |
26 | *.log
27 |
28 | cookie.json
29 |
30 | *.pyc
31 |
32 | dist/
33 |
--------------------------------------------------------------------------------
/gui/pywebio-gui/README.md:
--------------------------------------------------------------------------------
1 | # GUI with PyWebIO
2 | Simple, fast, and with fewer errors
3 | Only requires
4 | ```bash
5 | pip install gpt4free
6 | pip install pywebio
7 | ```
8 | clicking on 'pywebio-usesless.py' will run it
9 |
10 | PS: Currently, only 'usesless' is implemented, and the GUI is expected to be updated infrequently, with a focus on stability.
11 |
12 | ↓ Here is the introduction in zh-Hans-CN below.
13 |
14 | # 使用pywebio实现的极简GUI
15 | 简单,快捷,报错少
16 | 只需要
17 | ```bash
18 | pip install gpt4free
19 | pip install pywebio
20 | ```
21 |
22 | 双击pywebio-usesless.py即可运行
23 |
24 | ps:目前仅实现usesless,这个gui更新频率应该会比较少,目的是追求稳定
25 |
--------------------------------------------------------------------------------
/gpt4free/cocalc/readme.md:
--------------------------------------------------------------------------------
1 | ### Example: `cocalc`
2 |
3 | ```python
4 | # import library
5 | from gpt4free import cocalc
6 |
7 | cocalc.Completion.create(prompt="How are you!", cookie_input="cookieinput") ## Tutorial
8 | ```
9 |
10 | ### How to grab cookie input
11 | ```js
12 | // input this into ur developer tools console and the exact response u get from this u put into ur cookieInput!
13 | var cookies = document.cookie.split("; ");
14 | var cookieString = "";
15 | for (var i = 0; i < cookies.length; i++) {
16 | cookieString += cookies[i] + "; ";
17 | }
18 | console.log(cookieString);
19 | ```
20 |
--------------------------------------------------------------------------------
/testing/you_test.py:
--------------------------------------------------------------------------------
1 | from gpt4free import you
2 |
3 | # simple request with links and details
4 | response = you.Completion.create(prompt="hello world", detailed=True, include_links=True)
5 |
6 | print(response)
7 |
8 | # {
9 | # "response": "...",
10 | # "links": [...],
11 | # "extra": {...},
12 | # "slots": {...}
13 | # }
14 | # }
15 |
16 | # chatbot
17 |
18 | chat = []
19 |
20 | while True:
21 | prompt = input("You: ")
22 |
23 | response = you.Completion.create(prompt=prompt, chat=chat)
24 |
25 | print("Bot:", response.text)
26 |
27 | chat.append({"question": prompt, "answer": response.text})
28 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "openai-rev"
3 | version = "0.1.0"
4 | description = ""
5 | authors = []
6 | license = "GPL-3.0"
7 | readme = "README.md"
8 | packages = [{ include = "gpt4free" }]
9 | exclude = ["**/*.txt"]
10 |
11 | [tool.poetry.dependencies]
12 | python = "^3.7"
13 | websocket-client = "^1.5.1"
14 | requests = "2.29.0"
15 | tls-client = "^0.2"
16 | pypasser = "^0.0.5"
17 | names = "^0.3.0"
18 | colorama = "^0.4.6"
19 | curl-cffi = "^0.5.5"
20 | selenium = "^4.9.0"
21 | fake-useragent = "^1.1.3"
22 | twocaptcha = "^0.0.1"
23 | pydantic = "^1.10.7"
24 |
25 |
26 | [build-system]
27 | requires = ["poetry-core"]
28 | build-backend = "poetry.core.masonry.api"
29 |
--------------------------------------------------------------------------------
/testing/useless_test.py:
--------------------------------------------------------------------------------
1 | from gpt4free import usesless
2 |
3 | message_id = ""
4 | while True:
5 | prompt = input("Question: ")
6 | if prompt == "!stop":
7 | break
8 |
9 | req = usesless.Completion.create(prompt=prompt, parentMessageId=message_id)
10 |
11 | print(f"Answer: {req['text']}")
12 | message_id = req["id"]
13 |
14 | import gpt4free
15 |
16 | message_id = ""
17 | while True:
18 | prompt = input("Question: ")
19 | if prompt == "!stop":
20 | break
21 |
22 | req = gpt4free.Completion.create(provider=gpt4free.Provider.UseLess, prompt=prompt, parentMessageId=message_id)
23 |
24 | print(f"Answer: {req['text']}")
25 | message_id = req["id"]
26 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.11 as builder
2 |
3 | WORKDIR /usr/app
4 | ENV PATH="/usr/app/venv/bin:$PATH"
5 |
6 | RUN apt-get update && apt-get install -y git
7 | RUN mkdir -p /usr/app
8 | RUN python -m venv ./venv
9 |
10 | COPY requirements.txt .
11 |
12 | RUN pip install -r requirements.txt
13 |
14 | # RUN pip config set global.index-url https://mirrors.aliyun.com/pypi/simple/
15 | # RUN pip config set global.trusted-host mirrors.aliyun.com
16 |
17 | FROM python:3.11-alpine
18 |
19 | WORKDIR /usr/app
20 | ENV PATH="/usr/app/venv/bin:$PATH"
21 |
22 | COPY --from=builder /usr/app/venv ./venv
23 | COPY . .
24 |
25 | RUN cp ./gui/streamlit_app.py .
26 |
27 | CMD ["streamlit", "run", "streamlit_app.py"]
28 |
29 | EXPOSE 8501
30 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/ChatPaginationQuery.graphql:
--------------------------------------------------------------------------------
1 | query ChatPaginationQuery($bot: String!, $before: String, $last: Int! = 10) {
2 | chatOfBot(bot: $bot) {
3 | id
4 | __typename
5 | messagesConnection(before: $before, last: $last) {
6 | pageInfo {
7 | hasPreviousPage
8 | }
9 | edges {
10 | node {
11 | id
12 | __typename
13 | messageId
14 | text
15 | linkifiedText
16 | authorNickname
17 | state
18 | vote
19 | voteReason
20 | creationTime
21 | suggestedReplies
22 | }
23 | }
24 | }
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/gpt4free/you/README.md:
--------------------------------------------------------------------------------
1 | ### Example: `you` (use like openai pypi package)
2 |
3 | ```python
4 |
5 | from gpt4free import you
6 |
7 | # simple request with links and details
8 | response = you.Completion.create(
9 | prompt="hello world",
10 | detailed=True,
11 | include_links=True, )
12 |
13 | print(response.dict())
14 |
15 | # {
16 | # "response": "...",
17 | # "links": [...],
18 | # "extra": {...},
19 | # "slots": {...}
20 | # }
21 | # }
22 |
23 | # chatbot
24 |
25 | chat = []
26 |
27 | while True:
28 | prompt = input("You: ")
29 | if prompt == 'q':
30 | break
31 | response = you.Completion.create(
32 | prompt=prompt,
33 | chat=chat)
34 |
35 | print("Bot:", response.text)
36 |
37 | chat.append({"question": prompt, "answer": response.text})
38 | ```
39 |
--------------------------------------------------------------------------------
/gpt4free/quora/cookies.txt:
--------------------------------------------------------------------------------
1 | SmPiNXZI9hBTuf3viz74PA==
2 | zw7RoKQfeEehiaelYMRWeA==
3 | NEttgJ_rRQdO05Tppx6hFw==
4 | 3OnmC0r9njYdNWhWszdQJg==
5 | 8hZKR7MxwUTEHvO45TEViw==
6 | Eea6BqK0AmosTKzoI3AAow==
7 | pUEbtxobN_QUSpLIR8RGww==
8 | 9_dUWxKkHHhpQRSvCvBk2Q==
9 | UV45rvGwUwi2qV9QdIbMcw==
10 | cVIN0pK1Wx-F7zCdUxlYqA==
11 | UP2wQVds17VFHh6IfCQFrA==
12 | 18eKr0ME2Tzifdfqat38Aw==
13 | FNgKEpc2r-XqWe0rHBfYpg==
14 | juCAh6kB0sUpXHvKik2woA==
15 | nBvuNYRLaE4xE4HuzBPiIQ==
16 | oyae3iClomSrk6RJywZ4iw==
17 | 1Z27Ul8BTdNOhncT5H6wdg==
18 | wfUfJIlwQwUss8l-3kDt3w==
19 | f6Jw_Nr0PietpNCtOCXJTw==
20 | 6Jc3yCs7XhDRNHa4ZML09g==
21 | 3vy44sIy-ZlTMofFiFDttw==
22 | p9FbMGGiK1rShKgL3YWkDg==
23 | pw6LI5Op84lf4HOY7fn91A==
24 | QemKm6aothMvqcEgeKFDlQ==
25 | cceZzucA-CEHR0Gt6VLYLQ==
26 | JRRObMp2RHVn5u4730DPvQ==
27 | XNt0wLTjX7Z-EsRR3TJMIQ==
28 | csjjirAUKtT5HT1KZUq1kg==
29 | 8qZdCatCPQZyS7jsO4hkdQ==
30 | esnUxcBhvH1DmCJTeld0qw==
31 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/ViewerStateUpdatedSubscription.graphql:
--------------------------------------------------------------------------------
1 | subscription viewerStateUpdated {
2 | viewerStateUpdated {
3 | id
4 | ...ChatPageBotSwitcher_viewer
5 | }
6 | }
7 |
8 | fragment BotHeader_bot on Bot {
9 | displayName
10 | messageLimit {
11 | dailyLimit
12 | }
13 | ...BotImage_bot
14 | }
15 |
16 | fragment BotImage_bot on Bot {
17 | image {
18 | __typename
19 | ... on LocalBotImage {
20 | localName
21 | }
22 | ... on UrlBotImage {
23 | url
24 | }
25 | }
26 | displayName
27 | }
28 |
29 | fragment BotLink_bot on Bot {
30 | displayName
31 | }
32 |
33 | fragment ChatPageBotSwitcher_viewer on Viewer {
34 | availableBots {
35 | id
36 | messageLimit {
37 | dailyLimit
38 | }
39 | ...BotLink_bot
40 | ...BotHeader_bot
41 | }
42 | allowUserCreatedBots: booleanGate(gateName: "enable_user_created_bots")
43 | }
44 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/SendMessageMutation.graphql:
--------------------------------------------------------------------------------
1 | mutation chatHelpers_sendMessageMutation_Mutation(
2 | $chatId: BigInt!
3 | $bot: String!
4 | $query: String!
5 | $source: MessageSource
6 | $withChatBreak: Boolean!
7 | ) {
8 | messageEdgeCreate(chatId: $chatId, bot: $bot, query: $query, source: $source, withChatBreak: $withChatBreak) {
9 | chatBreak {
10 | cursor
11 | node {
12 | id
13 | messageId
14 | text
15 | author
16 | suggestedReplies
17 | creationTime
18 | state
19 | }
20 | id
21 | }
22 | message {
23 | cursor
24 | node {
25 | id
26 | messageId
27 | text
28 | author
29 | suggestedReplies
30 | creationTime
31 | state
32 | chat {
33 | shouldShowDisclaimer
34 | id
35 | }
36 | }
37 | id
38 | }
39 | }
40 | }
41 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/PoeBotEditMutation.graphql:
--------------------------------------------------------------------------------
1 | mutation EditBotMain_poeBotEdit_Mutation(
2 | $botId: BigInt!
3 | $handle: String!
4 | $description: String!
5 | $introduction: String!
6 | $isPromptPublic: Boolean!
7 | $baseBot: String!
8 | $profilePictureUrl: String
9 | $prompt: String!
10 | $apiUrl: String
11 | $apiKey: String
12 | $hasLinkification: Boolean
13 | $hasMarkdownRendering: Boolean
14 | $hasSuggestedReplies: Boolean
15 | $isPrivateBot: Boolean
16 | ) {
17 | poeBotEdit(botId: $botId, handle: $handle, description: $description, introduction: $introduction, isPromptPublic: $isPromptPublic, model: $baseBot, promptPlaintext: $prompt, profilePicture: $profilePictureUrl, apiUrl: $apiUrl, apiKey: $apiKey, hasLinkification: $hasLinkification, hasMarkdownRendering: $hasMarkdownRendering, hasSuggestedReplies: $hasSuggestedReplies, isPrivateBot: $isPrivateBot) {
18 | status
19 | bot {
20 | handle
21 | id
22 | }
23 | }
24 | }
--------------------------------------------------------------------------------
/testing/test_main.py:
--------------------------------------------------------------------------------
1 | import gpt4free
2 | from gpt4free import Provider, quora, forefront
3 |
4 | # usage You
5 | response = gpt4free.Completion.create(Provider.You, prompt='Write a poem on Lionel Messi')
6 | print(response)
7 |
8 | # usage Poe
9 | token = quora.Account.create(logging=False)
10 | response = gpt4free.Completion.create(Provider.Poe, prompt='Write a poem on Lionel Messi', token=token, model='ChatGPT')
11 | print(response)
12 |
13 | # usage forefront
14 | token = forefront.Account.create(logging=False)
15 | response = gpt4free.Completion.create(
16 | Provider.ForeFront, prompt='Write a poem on Lionel Messi', model='gpt-4', token=token
17 | )
18 | print(response)
19 | print(f'END')
20 |
21 | # usage theb
22 | response = gpt4free.Completion.create(Provider.Theb, prompt='Write a poem on Lionel Messi')
23 | print(response)
24 |
25 | # usage cocalc
26 | response = gpt4free.Completion.create(Provider.CoCalc, prompt='Write a poem on Lionel Messi', cookie_input='')
27 | print(response)
28 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: Build and push `gpt4free` docker image
2 |
3 | on:
4 | workflow_dispatch:
5 | push:
6 | branches:
7 | - main
8 | pull_request:
9 | branches:
10 | - main
11 |
12 | jobs:
13 | build-and-push:
14 | runs-on: ubuntu-latest
15 | steps:
16 | - uses: actions/checkout@v3
17 |
18 | - name: Set up qemu
19 | uses: docker/setup-qemu-action@v2
20 |
21 | - name: Set up docker buildx
22 | uses: docker/setup-buildx-action@v2
23 |
24 | - name: Login to docker hub
25 | uses: docker/login-action@v2
26 | with:
27 | username: ${{ secrets.DOCKER_USERNAME }}
28 | password: ${{ secrets.DOCKER_PASSWORD }}
29 |
30 | - name: Build and push docker image
31 | uses: docker/build-push-action@v4
32 | with:
33 | context: .
34 | platforms: linux/amd64,linux/arm64
35 | push: ${{ github.ref == 'refs/heads/main' }}
36 | tags: |
37 | ${{ secrets.DOCKER_USERNAME }}/gpt4free:latest
38 |
--------------------------------------------------------------------------------
/unfinished/t3nsor/README.md:
--------------------------------------------------------------------------------
1 | ### note: currently patched
2 |
3 | ### Example: `t3nsor` (use like openai pypi package)
4 |
5 | ```python
6 | # Import t3nsor
7 | import t3nsor
8 |
9 | # t3nsor.Completion.create
10 | # t3nsor.StreamCompletion.create
11 |
12 | [...]
13 |
14 | ```
15 |
16 | #### Example Chatbot
17 | ```python
18 | messages = []
19 |
20 | while True:
21 | user = input('you: ')
22 |
23 | t3nsor_cmpl = t3nsor.Completion.create(
24 | prompt = user,
25 | messages = messages
26 | )
27 |
28 | print('gpt:', t3nsor_cmpl.completion.choices[0].text)
29 |
30 | messages.extend([
31 | {'role': 'user', 'content': user },
32 | {'role': 'assistant', 'content': t3nsor_cmpl.completion.choices[0].text}
33 | ])
34 | ```
35 |
36 | #### Streaming Response:
37 |
38 | ```python
39 | for response in t3nsor.StreamCompletion.create(
40 | prompt = 'write python code to reverse a string',
41 | messages = []):
42 |
43 | print(response.completion.choices[0].text)
44 | ```
45 |
--------------------------------------------------------------------------------
/unfinished/vercelai/test.js:
--------------------------------------------------------------------------------
1 | (async () => {
2 |
3 | let response = await fetch("https://play.vercel.ai/openai.jpeg", {
4 | "headers": {
5 | "accept": "*/*",
6 | "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
7 | "sec-ch-ua": "\"Chromium\";v=\"112\", \"Google Chrome\";v=\"112\", \"Not:A-Brand\";v=\"99\"",
8 | "sec-ch-ua-mobile": "?0",
9 | "sec-ch-ua-platform": "\"macOS\"",
10 | "sec-fetch-dest": "empty",
11 | "sec-fetch-mode": "cors",
12 | "sec-fetch-site": "same-origin"
13 | },
14 | "referrer": "https://play.vercel.ai/",
15 | "referrerPolicy": "strict-origin-when-cross-origin",
16 | "body": null,
17 | "method": "GET",
18 | "mode": "cors",
19 | "credentials": "omit"
20 | });
21 |
22 |
23 | let data = JSON.parse(atob(await response.text()))
24 | let ret = eval("(".concat(data.c, ")(data.a)"));
25 |
26 | botPreventionToken = btoa(JSON.stringify({
27 | r: ret,
28 | t: data.t
29 | }))
30 |
31 | console.log(botPreventionToken);
32 |
33 | })()
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/ViewerStateFragment.graphql:
--------------------------------------------------------------------------------
1 | fragment ViewerStateFragment on Viewer {
2 | id
3 | __typename
4 | iosMinSupportedVersion: integerGate(gateName: "poe_ios_min_supported_version")
5 | iosMinEncouragedVersion: integerGate(
6 | gateName: "poe_ios_min_encouraged_version"
7 | )
8 | macosMinSupportedVersion: integerGate(
9 | gateName: "poe_macos_min_supported_version"
10 | )
11 | macosMinEncouragedVersion: integerGate(
12 | gateName: "poe_macos_min_encouraged_version"
13 | )
14 | showPoeDebugPanel: booleanGate(gateName: "poe_show_debug_panel")
15 | enableCommunityFeed: booleanGate(gateName: "enable_poe_shares_feed")
16 | linkifyText: booleanGate(gateName: "poe_linkify_response")
17 | enableSuggestedReplies: booleanGate(gateName: "poe_suggested_replies")
18 | removeInviteLimit: booleanGate(gateName: "poe_remove_invite_limit")
19 | enableInAppPurchases: booleanGate(gateName: "poe_enable_in_app_purchases")
20 | availableBots {
21 | nickname
22 | displayName
23 | profilePicture
24 | isDown
25 | disclaimer
26 | subtitle
27 | poweredBy
28 | }
29 | }
30 |
31 |
--------------------------------------------------------------------------------
/gpt4free/italygpt/__init__.py:
--------------------------------------------------------------------------------
1 | import requests, time, ast, json
2 | from bs4 import BeautifulSoup
3 | from hashlib import sha256
4 |
5 | class Completion:
6 | # answer is returned with html formatting
7 | next_id = None
8 | messages = []
9 | answer = None
10 |
11 | def init(self):
12 | r = requests.get("https://italygpt.it")
13 | soup = BeautifulSoup(r.text, "html.parser")
14 | self.next_id = soup.find("input", {"name": "next_id"})["value"]
15 |
16 | def create(self, prompt: str, messages: list = []):
17 | try:
18 | r = requests.get("https://italygpt.it/question", params={"hash": sha256(self.next_id.encode()).hexdigest(), "prompt": prompt, "raw_messages": json.dumps(messages)}).json()
19 | except:
20 | r = requests.get("https://italygpt.it/question", params={"hash": sha256(self.next_id.encode()).hexdigest(), "prompt": prompt, "raw_messages": json.dumps(messages)}).text
21 | if "too many requests" in r.lower():
22 | # rate limit is 17 requests per 1 minute
23 | time.sleep(20)
24 | return self.create(prompt, messages)
25 | self.next_id = r["next_id"]
26 | self.messages = ast.literal_eval(r["raw_messages"])
27 | self.answer = r["response"]
28 | return self
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/AddHumanMessageMutation.graphql:
--------------------------------------------------------------------------------
1 | mutation AddHumanMessageMutation(
2 | $chatId: BigInt!
3 | $bot: String!
4 | $query: String!
5 | $source: MessageSource
6 | $withChatBreak: Boolean! = false
7 | ) {
8 | messageCreateWithStatus(
9 | chatId: $chatId
10 | bot: $bot
11 | query: $query
12 | source: $source
13 | withChatBreak: $withChatBreak
14 | ) {
15 | message {
16 | id
17 | __typename
18 | messageId
19 | text
20 | linkifiedText
21 | authorNickname
22 | state
23 | vote
24 | voteReason
25 | creationTime
26 | suggestedReplies
27 | chat {
28 | id
29 | shouldShowDisclaimer
30 | }
31 | }
32 | messageLimit{
33 | canSend
34 | numMessagesRemaining
35 | resetTime
36 | shouldShowReminder
37 | }
38 | chatBreak {
39 | id
40 | __typename
41 | messageId
42 | text
43 | linkifiedText
44 | authorNickname
45 | state
46 | vote
47 | voteReason
48 | creationTime
49 | suggestedReplies
50 | }
51 | }
52 | }
53 |
--------------------------------------------------------------------------------
/testing/writesonic_test.py:
--------------------------------------------------------------------------------
1 | # import writesonic
2 | import writesonic
3 |
4 | # create account (3-4s)
5 | account = writesonic.Account.create(logging=True)
6 |
7 | # with loging:
8 | # 2023-04-06 21:50:25 INFO __main__ -> register success : '{"id":"51aa0809-3053-44f7-922a...' (2s)
9 | # 2023-04-06 21:50:25 INFO __main__ -> id : '51aa0809-3053-44f7-922a-2b85d8d07edf'
10 | # 2023-04-06 21:50:25 INFO __main__ -> token : 'eyJhbGciOiJIUzI1NiIsInR5cCI6Ik...'
11 | # 2023-04-06 21:50:28 INFO __main__ -> got key : '194158c4-d249-4be0-82c6-5049e869533c' (2s)
12 |
13 | # simple completion
14 | response = writesonic.Completion.create(api_key=account.key, prompt='hello world')
15 |
16 | print(response.completion.choices[0].text) # Hello! How may I assist you today?
17 |
18 | # conversation
19 |
20 | response = writesonic.Completion.create(
21 | api_key=account.key,
22 | prompt='what is my name ?',
23 | enable_memory=True,
24 | history_data=[{'is_sent': True, 'message': 'my name is Tekky'}, {'is_sent': False, 'message': 'hello Tekky'}],
25 | )
26 |
27 | print(response.completion.choices[0].text) # Your name is Tekky.
28 |
29 | # enable internet
30 |
31 | response = writesonic.Completion.create(
32 | api_key=account.key, prompt='who won the quatar world cup ?', enable_google_results=True
33 | )
34 |
35 | print(response.completion.choices[0].text) # Argentina won the 2022 FIFA World Cup tournament held in Qatar ...
36 |
--------------------------------------------------------------------------------
/gpt4free/quora/backup-mail.py:
--------------------------------------------------------------------------------
1 | from json import loads
2 | from re import findall
3 | from time import sleep
4 |
5 | from requests import Session
6 |
7 |
8 | class Mail:
9 | def __init__(self) -> None:
10 | self.client = Session()
11 | self.client.post("https://etempmail.com/")
12 | self.cookies = {'acceptcookie': 'true'}
13 | self.cookies["ci_session"] = self.client.cookies.get_dict()["ci_session"]
14 | self.email = None
15 |
16 | def get_mail(self):
17 | respone = self.client.post("https://etempmail.com/getEmailAddress")
18 | # cookies
19 | self.cookies["lisansimo"] = eval(respone.text)["recover_key"]
20 | self.email = eval(respone.text)["address"]
21 | return self.email
22 |
23 | def get_message(self):
24 | print("Waiting for message...")
25 | while True:
26 | sleep(5)
27 | respone = self.client.post("https://etempmail.com/getInbox")
28 | mail_token = loads(respone.text)
29 | print(self.client.cookies.get_dict())
30 | if len(mail_token) == 1:
31 | break
32 |
33 | params = {
34 | 'id': '1',
35 | }
36 | self.mail_context = self.client.post("https://etempmail.com/getInbox", params=params)
37 | self.mail_context = eval(self.mail_context.text)[0]["body"]
38 | return self.mail_context
39 |
40 | # ,cookies=self.cookies
41 | def get_verification_code(self):
42 | message = self.mail_context
43 | code = findall(r';">(\d{6,7})', message)[0]
44 | print(f"Verification code: {code}")
45 | return code
46 |
--------------------------------------------------------------------------------
/unfinished/gptbz/__init__.py:
--------------------------------------------------------------------------------
1 | from json import dumps, loads
2 |
3 | import websockets
4 |
5 |
6 | # Define the asynchronous function to test the WebSocket connection
7 |
8 |
9 | async def test():
10 | # Establish a WebSocket connection with the specified URL
11 | async with websockets.connect('wss://chatgpt.func.icu/conversation+ws') as wss:
12 |
13 | # Prepare the message payload as a JSON object
14 | payload = {
15 | 'content_type': 'text',
16 | 'engine': 'chat-gpt',
17 | 'parts': ['hello world'],
18 | 'options': {}
19 | }
20 |
21 | # Send the payload to the WebSocket server
22 | await wss.send(dumps(obj=payload, separators=(',', ':')))
23 |
24 | # Initialize a variable to track the end of the conversation
25 | ended = None
26 |
27 | # Continuously receive and process messages until the conversation ends
28 | while not ended:
29 | try:
30 | # Receive and parse the JSON response from the server
31 | response = await wss.recv()
32 | json_response = loads(response)
33 |
34 | # Print the entire JSON response
35 | print(json_response)
36 |
37 | # Check for the end of the conversation
38 | ended = json_response.get('eof')
39 |
40 | # If the conversation has not ended, print the received message
41 | if not ended:
42 | print(json_response['content']['parts'][0])
43 |
44 | # Handle cases when the connection is closed by the server
45 | except websockets.ConnectionClosed:
46 | break
47 |
--------------------------------------------------------------------------------
/gui/streamlit_app.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 |
4 | sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
5 |
6 | import streamlit as st
7 | from gpt4free import you
8 |
9 |
10 | def get_answer(question: str) -> str:
11 | # Set cloudflare clearance cookie and get answer from GPT-4 model
12 | try:
13 | result = you.Completion.create(prompt=question)
14 |
15 | return result.text
16 |
17 | except Exception as e:
18 | # Return error message if an exception occurs
19 | return (
20 | f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
21 | )
22 |
23 |
24 | # Set page configuration and add header
25 | st.set_page_config(
26 | page_title="gpt4freeGUI",
27 | initial_sidebar_state="expanded",
28 | page_icon="🧠",
29 | menu_items={
30 | 'Get Help': 'https://github.com/xtekky/gpt4free/blob/main/README.md',
31 | 'Report a bug': "https://github.com/xtekky/gpt4free/issues",
32 | 'About': "### gptfree GUI",
33 | },
34 | )
35 | st.header('GPT4free GUI')
36 |
37 | # Add text area for user input and button to get answer
38 | question_text_area = st.text_area('🤖 Ask Any Question :', placeholder='Explain quantum computing in 50 words')
39 | if st.button('🧠 Think'):
40 | answer = get_answer(question_text_area)
41 | escaped = answer.encode('utf-8').decode('unicode-escape')
42 | # Display answer
43 | st.caption("Answer :")
44 | st.markdown(escaped)
45 |
46 | # Hide Streamlit footer
47 | hide_streamlit_style = """
48 |
51 | """
52 | st.markdown(hide_streamlit_style, unsafe_allow_html=True)
53 |
--------------------------------------------------------------------------------
/unfinished/openprompt/test.py:
--------------------------------------------------------------------------------
1 | access_token = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV'
2 | supabase_auth_token = '%5B%22eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8%22%2C%22_Zp8uXIA2InTDKYgo8TCqA%22%2Cnull%2Cnull%2Cnull%5D'
3 |
4 | idk = [
5 | "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8",
6 | "_Zp8uXIA2InTDKYgo8TCqA", None, None, None]
7 |
--------------------------------------------------------------------------------
/unfinished/vercelai/__init__.py:
--------------------------------------------------------------------------------
1 | import requests
2 |
3 | class Completion:
4 | def create(prompt: str,
5 | model: str = 'openai:gpt-3.5-turbo',
6 | temperature: float = 0.7,
7 | max_tokens: int = 200,
8 | top_p: float = 1,
9 | top_k: int = 1,
10 | frequency_penalty: float = 1,
11 | presence_penalty: float = 1,
12 | stopSequences: list = []):
13 |
14 | token = requests.get('https://play.vercel.ai/openai.jpeg', headers={
15 | 'authority': 'play.vercel.ai',
16 | 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
17 | 'referer': 'https://play.vercel.ai/',
18 | 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'}).text.replace('=','')
19 |
20 | print(token)
21 |
22 | headers = {
23 | 'authority': 'play.vercel.ai',
24 | 'custom-encoding': token,
25 | 'origin': 'https://play.vercel.ai',
26 | 'referer': 'https://play.vercel.ai/',
27 | 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
28 | }
29 |
30 | for chunk in requests.post('https://play.vercel.ai/api/generate', headers=headers, stream=True, json={
31 | 'prompt': prompt,
32 | 'model': model,
33 | 'temperature': temperature,
34 | 'maxTokens': max_tokens,
35 | 'topK': top_p,
36 | 'topP': top_k,
37 | 'frequencyPenalty': frequency_penalty,
38 | 'presencePenalty': presence_penalty,
39 | 'stopSequences': stopSequences}).iter_lines():
40 |
41 | yield (chunk)
--------------------------------------------------------------------------------
/gpt4free/usesless/__init__.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | import requests
4 |
5 |
6 | class Completion:
7 | headers = {
8 | "authority": "ai.usesless.com",
9 | "accept": "application/json, text/plain, */*",
10 | "accept-language": "en-US,en;q=0.5",
11 | "cache-control": "no-cache",
12 | "sec-fetch-dest": "empty",
13 | "sec-fetch-mode": "cors",
14 | "sec-fetch-site": "same-origin",
15 | "user-agent": "Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/112.0",
16 | }
17 |
18 | @staticmethod
19 | def create(
20 | systemMessage: str = "You are a helpful assistant",
21 | prompt: str = "",
22 | parentMessageId: str = "",
23 | presence_penalty: float = 1,
24 | temperature: float = 1,
25 | model: str = "gpt-3.5-turbo",
26 | ):
27 | print(parentMessageId, prompt)
28 |
29 | json_data = {
30 | "openaiKey": "",
31 | "prompt": prompt,
32 | "options": {
33 | "parentMessageId": parentMessageId,
34 | "systemMessage": systemMessage,
35 | "completionParams": {
36 | "presence_penalty": presence_penalty,
37 | "temperature": temperature,
38 | "model": model,
39 | },
40 | },
41 | }
42 |
43 | url = "https://ai.usesless.com/api/chat-process"
44 | request = requests.post(url, headers=Completion.headers, json=json_data)
45 | content = request.content
46 |
47 | response = Completion.__response_to_json(content)
48 | return response
49 |
50 | @classmethod
51 | def __response_to_json(cls, text) -> dict:
52 | text = str(text.decode("utf-8"))
53 |
54 | split_text = text.rsplit("\n", 1)[1]
55 | to_json = json.loads(split_text)
56 | return to_json
57 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/PoeBotCreateMutation.graphql:
--------------------------------------------------------------------------------
1 | mutation CreateBotMain_poeBotCreate_Mutation(
2 | $model: String!
3 | $handle: String!
4 | $prompt: String!
5 | $isPromptPublic: Boolean!
6 | $introduction: String!
7 | $description: String!
8 | $profilePictureUrl: String
9 | $apiUrl: String
10 | $apiKey: String
11 | $isApiBot: Boolean
12 | $hasLinkification: Boolean
13 | $hasMarkdownRendering: Boolean
14 | $hasSuggestedReplies: Boolean
15 | $isPrivateBot: Boolean
16 | ) {
17 | poeBotCreate(model: $model, handle: $handle, promptPlaintext: $prompt, isPromptPublic: $isPromptPublic, introduction: $introduction, description: $description, profilePicture: $profilePictureUrl, apiUrl: $apiUrl, apiKey: $apiKey, isApiBot: $isApiBot, hasLinkification: $hasLinkification, hasMarkdownRendering: $hasMarkdownRendering, hasSuggestedReplies: $hasSuggestedReplies, isPrivateBot: $isPrivateBot) {
18 | status
19 | bot {
20 | id
21 | ...BotHeader_bot
22 | }
23 | }
24 | }
25 |
26 | fragment BotHeader_bot on Bot {
27 | displayName
28 | messageLimit {
29 | dailyLimit
30 | }
31 | ...BotImage_bot
32 | ...BotLink_bot
33 | ...IdAnnotation_node
34 | ...botHelpers_useViewerCanAccessPrivateBot
35 | ...botHelpers_useDeletion_bot
36 | }
37 |
38 | fragment BotImage_bot on Bot {
39 | displayName
40 | ...botHelpers_useDeletion_bot
41 | ...BotImage_useProfileImage_bot
42 | }
43 |
44 | fragment BotImage_useProfileImage_bot on Bot {
45 | image {
46 | __typename
47 | ... on LocalBotImage {
48 | localName
49 | }
50 | ... on UrlBotImage {
51 | url
52 | }
53 | }
54 | ...botHelpers_useDeletion_bot
55 | }
56 |
57 | fragment BotLink_bot on Bot {
58 | displayName
59 | }
60 |
61 | fragment IdAnnotation_node on Node {
62 | __isNode: __typename
63 | id
64 | }
65 |
66 | fragment botHelpers_useDeletion_bot on Bot {
67 | deletionState
68 | }
69 |
70 | fragment botHelpers_useViewerCanAccessPrivateBot on Bot {
71 | isPrivateBot
72 | viewerIsCreator
73 | }
--------------------------------------------------------------------------------
/gui/pywebio-gui/pywebio-usesless.py:
--------------------------------------------------------------------------------
1 | from gpt4free import usesless
2 | import time
3 | from pywebio import start_server,config
4 | from pywebio.input import *
5 | from pywebio.output import *
6 | from pywebio.session import local
7 | message_id = ""
8 | def status():
9 | try:
10 | req = usesless.Completion.create(prompt="hello", parentMessageId=message_id)
11 | print(f"Answer: {req['text']}")
12 | put_success(f"Answer: {req['text']}",scope="body")
13 | except:
14 | put_error("Program Error",scope="body")
15 |
16 | def ask(prompt):
17 | req = usesless.Completion.create(prompt=prompt, parentMessageId=local.message_id)
18 | rp=req['text']
19 | local.message_id=req["id"]
20 | print("AI:\n"+rp)
21 | local.conversation.extend([
22 | {"role": "user", "content": prompt},
23 | {"role": "assistant", "content": rp}
24 | ])
25 | print(local.conversation)
26 | return rp
27 |
28 | def msg():
29 | while True:
30 | text= input_group("You:",[textarea('You:',name='text',rows=3, placeholder='请输入问题')])
31 | if not(bool(text)):
32 | break
33 | if not(bool(text["text"])):
34 | continue
35 | time.sleep(0.5)
36 | put_code("You:"+text["text"],scope="body")
37 | print("Question:"+text["text"])
38 | with use_scope('foot'):
39 | put_loading(color="info")
40 | rp= ask(text["text"])
41 | clear(scope="foot")
42 | time.sleep(0.5)
43 | put_markdown("Bot:\n"+rp,scope="body")
44 | time.sleep(0.7)
45 |
46 | @config(title="AIchat",theme="dark")
47 | def main():
48 | put_scope("heads")
49 | with use_scope('heads'):
50 | put_html("
AI Chat
")
51 | put_scope("body")
52 | put_scope("foot")
53 | status()
54 | local.conversation=[]
55 | local.message_id=""
56 | msg()
57 |
58 | print("Click link to chat page")
59 | start_server(main, port=8099,allowed_origins="*",auto_open_webbrowser=True,debug=True)
60 |
--------------------------------------------------------------------------------
/unfinished/bard/typings.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, List, Union
2 |
3 |
4 | class BardResponse:
5 | def __init__(self, json_dict: Dict[str, Union[str, List]]) -> None:
6 | """
7 | Initialize a BardResponse object.
8 |
9 | :param json_dict: A dictionary containing the JSON response data.
10 | """
11 | self.json = json_dict
12 |
13 | self.content = json_dict.get('content')
14 | self.conversation_id = json_dict.get('conversation_id')
15 | self.response_id = json_dict.get('response_id')
16 | self.factuality_queries = json_dict.get('factualityQueries', [])
17 | self.text_query = json_dict.get('textQuery', [])
18 | self.choices = [self.BardChoice(choice)
19 | for choice in json_dict.get('choices', [])]
20 |
21 | def __repr__(self) -> str:
22 | """
23 | Return a string representation of the BardResponse object.
24 |
25 | :return: A string representation of the BardResponse object.
26 | """
27 | return f"BardResponse(conversation_id={self.conversation_id}, response_id={self.response_id}, content={self.content})"
28 |
29 | def filter_choices(self, keyword: str) -> List['BardChoice']:
30 | """
31 | Filter the choices based on a keyword.
32 |
33 | :param keyword: The keyword to filter choices by.
34 | :return: A list of filtered BardChoice objects.
35 | """
36 | return [choice for choice in self.choices if keyword.lower() in choice.content.lower()]
37 |
38 | class BardChoice:
39 | def __init__(self, choice_dict: Dict[str, str]) -> None:
40 | """
41 | Initialize a BardChoice object.
42 |
43 | :param choice_dict: A dictionary containing the choice data.
44 | """
45 | self.id = choice_dict.get('id')
46 | self.content = choice_dict.get('content')[0]
47 |
48 | def __repr__(self) -> str:
49 | """
50 | Return a string representation of the BardChoice object.
51 |
52 | :return: A string representation of the BardChoice object.
53 | """
54 | return f"BardChoice(id={self.id}, content={self.content})"
55 |
--------------------------------------------------------------------------------
/gpt4free/quora/README.md:
--------------------------------------------------------------------------------
1 |
2 | > ⚠ Warning !!!
3 | poe.com added security and can detect if you are making automated requests. You may get your account banned if you are using this api.
4 | The normal non-driver api is also currently not very stable
5 |
6 |
7 | ### Example: `quora (poe)` (use like openai pypi package) - GPT-4
8 |
9 | ```python
10 | # quora model names: (use left key as argument)
11 | models = {
12 | 'sage' : 'capybara',
13 | 'gpt-4' : 'beaver',
14 | 'claude-v1.2' : 'a2_2',
15 | 'claude-instant-v1.0' : 'a2',
16 | 'gpt-3.5-turbo' : 'chinchilla'
17 | }
18 | ```
19 |
20 | ### New: bot creation
21 |
22 | ```python
23 | # import quora (poe) package
24 | from gpt4free import quora
25 |
26 | # create account
27 | # make sure to set enable_bot_creation to True
28 | token = quora.Account.create(logging=True, enable_bot_creation=True)
29 |
30 | model = quora.Model.create(
31 | token=token,
32 | model='gpt-3.5-turbo', # or claude-instant-v1.0
33 | system_prompt='you are ChatGPT a large language model ...'
34 | )
35 |
36 | print(model.name) # gptx....
37 |
38 | # streaming response
39 | for response in quora.StreamingCompletion.create(
40 | custom_model=model.name,
41 | prompt='hello world',
42 | token=token):
43 | print(response.completion.choices[0].text)
44 | ```
45 |
46 | ### Normal Response:
47 | ```python
48 |
49 | response = quora.Completion.create(model = 'gpt-4',
50 | prompt = 'hello world',
51 | token = token)
52 |
53 | print(response.completion.choices[0].text)
54 | ```
55 |
56 | ### Update Use This For Poe
57 | ```python
58 | from gpt4free.quora import Poe
59 |
60 | # available models: ['Sage', 'GPT-4', 'Claude+', 'Claude-instant', 'ChatGPT', 'Dragonfly', 'NeevaAI']
61 |
62 | poe = Poe(model='ChatGPT', driver='firefox', cookie_path='cookie.json', driver_path='path_of_driver')
63 | poe.chat('who won the football world cup most?')
64 |
65 | # new bot creation
66 | poe.create_bot('new_bot_name', prompt='You are new test bot', base_model='gpt-3.5-turbo')
67 |
68 | # delete account
69 | poe.delete_account()
70 | ```
71 |
72 | ### Deleting the Poe Account
73 | ```python
74 | from gpt4free import quora
75 |
76 | quora.Account.delete(token='')
77 | ```
78 |
--------------------------------------------------------------------------------
/unfinished/vercelai/test.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from base64 import b64decode, b64encode
3 | from json import loads
4 | from json import dumps
5 |
6 | headers = {
7 | 'Accept': '*/*',
8 | 'Accept-Language': 'en-GB,en-US;q=0.9,en;q=0.8',
9 | 'Connection': 'keep-alive',
10 | 'Referer': 'https://play.vercel.ai/',
11 | 'Sec-Fetch-Dest': 'empty',
12 | 'Sec-Fetch-Mode': 'cors',
13 | 'Sec-Fetch-Site': 'same-origin',
14 | 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36',
15 | 'sec-ch-ua': '"Chromium";v="110", "Google Chrome";v="110", "Not:A-Brand";v="99"',
16 | 'sec-ch-ua-mobile': '?0',
17 | 'sec-ch-ua-platform': '"macOS"',
18 | }
19 |
20 | response = requests.get('https://play.vercel.ai/openai.jpeg', headers=headers)
21 |
22 | token_data = loads(b64decode(response.text))
23 | print(token_data)
24 |
25 | raw_token = {
26 | 'a': token_data['a'] * .1 * .2,
27 | 't': token_data['t']
28 | }
29 |
30 | print(raw_token)
31 |
32 | new_token = b64encode(dumps(raw_token, separators=(',', ':')).encode()).decode()
33 | print(new_token)
34 |
35 | import requests
36 |
37 | headers = {
38 | 'authority': 'play.vercel.ai',
39 | 'accept': '*/*',
40 | 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
41 | 'content-type': 'application/json',
42 | 'custom-encoding': new_token,
43 | 'origin': 'https://play.vercel.ai',
44 | 'referer': 'https://play.vercel.ai/',
45 | 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
46 | 'sec-ch-ua-mobile': '?0',
47 | 'sec-ch-ua-platform': '"macOS"',
48 | 'sec-fetch-dest': 'empty',
49 | 'sec-fetch-mode': 'cors',
50 | 'sec-fetch-site': 'same-origin',
51 | 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
52 | }
53 |
54 | json_data = {
55 | 'prompt': 'hello\n',
56 | 'model': 'openai:gpt-3.5-turbo',
57 | 'temperature': 0.7,
58 | 'maxTokens': 200,
59 | 'topK': 1,
60 | 'topP': 1,
61 | 'frequencyPenalty': 1,
62 | 'presencePenalty': 1,
63 | 'stopSequences': [],
64 | }
65 |
66 | response = requests.post('https://play.vercel.ai/api/generate', headers=headers, json=json_data)
67 | print(response.text)
--------------------------------------------------------------------------------
/gpt4free/theb/__init__.py:
--------------------------------------------------------------------------------
1 | from json import loads
2 | from queue import Queue, Empty
3 | from re import findall
4 | from threading import Thread
5 | from typing import Generator, Optional
6 |
7 | from curl_cffi import requests
8 | from fake_useragent import UserAgent
9 |
10 |
11 | class Completion:
12 | # experimental
13 | part1 = '{"role":"assistant","id":"chatcmpl'
14 | part2 = '"},"index":0,"finish_reason":null}]}}'
15 | regex = rf'{part1}(.*){part2}'
16 |
17 | timer = None
18 | message_queue = Queue()
19 | stream_completed = False
20 | last_msg_id = None
21 |
22 | @staticmethod
23 | def request(prompt: str, proxy: Optional[str] = None):
24 | headers = {
25 | 'authority': 'chatbot.theb.ai',
26 | 'content-type': 'application/json',
27 | 'origin': 'https://chatbot.theb.ai',
28 | 'user-agent': UserAgent().random,
29 | }
30 |
31 | proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else None
32 |
33 | options = {}
34 | if Completion.last_msg_id:
35 | options['parentMessageId'] = Completion.last_msg_id
36 |
37 | requests.post(
38 | 'https://chatbot.theb.ai/api/chat-process',
39 | headers=headers,
40 | proxies=proxies,
41 | content_callback=Completion.handle_stream_response,
42 | json={'prompt': prompt, 'options': options},
43 | )
44 |
45 | Completion.stream_completed = True
46 |
47 | @staticmethod
48 |
49 | def create(prompt: str, proxy: Optional[str] = None) -> Generator[str, None, None]:
50 | Completion.stream_completed = False
51 |
52 | Thread(target=Completion.request, args=[prompt, proxy]).start()
53 |
54 | while not Completion.stream_completed or not Completion.message_queue.empty():
55 | try:
56 | message = Completion.message_queue.get(timeout=0.01)
57 | for message in findall(Completion.regex, message):
58 | message_json = loads(Completion.part1 + message + Completion.part2)
59 | Completion.last_msg_id = message_json['id']
60 | yield message_json['delta']
61 |
62 | except Empty:
63 | pass
64 |
65 | @staticmethod
66 | def handle_stream_response(response):
67 | Completion.message_queue.put(response.decode())
68 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/MessageAddedSubscription.graphql:
--------------------------------------------------------------------------------
1 | subscription messageAdded (
2 | $chatId: BigInt!
3 | ) {
4 | messageAdded(chatId: $chatId) {
5 | id
6 | messageId
7 | creationTime
8 | state
9 | ...ChatMessage_message
10 | ...chatHelpers_isBotMessage
11 | }
12 | }
13 |
14 | fragment ChatMessageDownvotedButton_message on Message {
15 | ...MessageFeedbackReasonModal_message
16 | ...MessageFeedbackOtherModal_message
17 | }
18 |
19 | fragment ChatMessageDropdownMenu_message on Message {
20 | id
21 | messageId
22 | vote
23 | text
24 | linkifiedText
25 | ...chatHelpers_isBotMessage
26 | }
27 |
28 | fragment ChatMessageFeedbackButtons_message on Message {
29 | id
30 | messageId
31 | vote
32 | voteReason
33 | ...ChatMessageDownvotedButton_message
34 | }
35 |
36 | fragment ChatMessageOverflowButton_message on Message {
37 | text
38 | ...ChatMessageDropdownMenu_message
39 | ...chatHelpers_isBotMessage
40 | }
41 |
42 | fragment ChatMessageSuggestedReplies_SuggestedReplyButton_message on Message {
43 | messageId
44 | }
45 |
46 | fragment ChatMessageSuggestedReplies_message on Message {
47 | suggestedReplies
48 | ...ChatMessageSuggestedReplies_SuggestedReplyButton_message
49 | }
50 |
51 | fragment ChatMessage_message on Message {
52 | id
53 | messageId
54 | text
55 | author
56 | linkifiedText
57 | state
58 | ...ChatMessageSuggestedReplies_message
59 | ...ChatMessageFeedbackButtons_message
60 | ...ChatMessageOverflowButton_message
61 | ...chatHelpers_isHumanMessage
62 | ...chatHelpers_isBotMessage
63 | ...chatHelpers_isChatBreak
64 | ...chatHelpers_useTimeoutLevel
65 | ...MarkdownLinkInner_message
66 | }
67 |
68 | fragment MarkdownLinkInner_message on Message {
69 | messageId
70 | }
71 |
72 | fragment MessageFeedbackOtherModal_message on Message {
73 | id
74 | messageId
75 | }
76 |
77 | fragment MessageFeedbackReasonModal_message on Message {
78 | id
79 | messageId
80 | }
81 |
82 | fragment chatHelpers_isBotMessage on Message {
83 | ...chatHelpers_isHumanMessage
84 | ...chatHelpers_isChatBreak
85 | }
86 |
87 | fragment chatHelpers_isChatBreak on Message {
88 | author
89 | }
90 |
91 | fragment chatHelpers_isHumanMessage on Message {
92 | author
93 | }
94 |
95 | fragment chatHelpers_useTimeoutLevel on Message {
96 | id
97 | state
98 | text
99 | messageId
100 | }
101 |
--------------------------------------------------------------------------------
/unfinished/openprompt/main.py:
--------------------------------------------------------------------------------
1 | import requests
2 |
3 | cookies = {
4 | 'supabase-auth-token': '["eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk1NzQyLCJzdWIiOiJlOGExOTdiNS03YTAxLTQ3MmEtODQ5My1mNGUzNTNjMzIwNWUiLCJlbWFpbCI6InFlY3RncHZhamlibGNjQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTA5NDJ9XSwic2Vzc2lvbl9pZCI6IjIwNTg5MmE5LWU5YTAtNDk2Yi1hN2FjLWEyMWVkMTkwZDA4NCJ9.o7UgHpiJMfa6W-UKCSCnAncIfeOeiHz-51sBmokg0MA","RtPKeb7KMMC9Dn2fZOfiHA",null,null,null]',
5 | }
6 |
7 | headers = {
8 | 'authority': 'openprompt.co',
9 | 'accept': '*/*',
10 | 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
11 | 'content-type': 'application/json',
12 | # 'cookie': 'supabase-auth-token=%5B%22eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjkzMjQ4LCJzdWIiOiJlODQwNTZkNC0xZWJhLTQwZDktOWU1Mi1jMTc4MTUwN2VmNzgiLCJlbWFpbCI6InNia2didGJnZHB2bHB0ZUBidWdmb28uY29tIiwicGhvbmUiOiIiLCJhcHBfbWV0YWRhdGEiOnsicHJvdmlkZXIiOiJlbWFpbCIsInByb3ZpZGVycyI6WyJlbWFpbCJdfSwidXNlcl9tZXRhZGF0YSI6e30sInJvbGUiOiJhdXRoZW50aWNhdGVkIiwiYWFsIjoiYWFsMSIsImFtciI6W3sibWV0aG9kIjoib3RwIiwidGltZXN0YW1wIjoxNjgxNjg4NDQ4fV0sInNlc3Npb25faWQiOiJiNDhlMmU3NS04NzlhLTQxZmEtYjQ4MS01OWY0OTgxMzg3YWQifQ.5-3E7WvMMVkXewD1qA26Rv4OFSTT82wYUBXNGcYaYfQ%22%2C%22u5TGGMMeT3zZA0agm5HGuA%22%2Cnull%2Cnull%2Cnull%5D',
13 | 'origin': 'https://openprompt.co',
14 | 'referer': 'https://openprompt.co/ChatGPT',
15 | 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
16 | 'sec-ch-ua-mobile': '?0',
17 | 'sec-ch-ua-platform': '"macOS"',
18 | 'sec-fetch-dest': 'empty',
19 | 'sec-fetch-mode': 'cors',
20 | 'sec-fetch-site': 'same-origin',
21 | 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
22 | }
23 |
24 | json_data = {
25 | 'messages': [
26 | {
27 | 'role': 'user',
28 | 'content': 'hello world',
29 | },
30 | ],
31 | }
32 |
33 | response = requests.post('https://openprompt.co/api/chat2', cookies=cookies, headers=headers, json=json_data,
34 | stream=True)
35 | for chunk in response.iter_content(chunk_size=1024):
36 | print(chunk)
37 |
--------------------------------------------------------------------------------
/gpt4free/quora/mail.py:
--------------------------------------------------------------------------------
1 | from json import loads
2 | from re import findall
3 | from time import sleep
4 |
5 | from fake_useragent import UserAgent
6 | from requests import Session
7 |
8 |
9 | class Emailnator:
10 | def __init__(self) -> None:
11 | self.client = Session()
12 | self.client.get("https://www.emailnator.com/", timeout=6)
13 | self.cookies = self.client.cookies.get_dict()
14 |
15 | self.client.headers = {
16 | "authority": "www.emailnator.com",
17 | "origin": "https://www.emailnator.com",
18 | "referer": "https://www.emailnator.com/",
19 | "user-agent": UserAgent().random,
20 | "x-xsrf-token": self.client.cookies.get("XSRF-TOKEN")[:-3] + "=",
21 | }
22 |
23 | self.email = None
24 |
25 | def get_mail(self):
26 | response = self.client.post(
27 | "https://www.emailnator.com/generate-email",
28 | json={
29 | "email": [
30 | "domain",
31 | "plusGmail",
32 | "dotGmail",
33 | ]
34 | },
35 | )
36 |
37 | self.email = loads(response.text)["email"][0]
38 | return self.email
39 |
40 | def get_message(self):
41 | print("Waiting for message...")
42 |
43 | while True:
44 | sleep(2)
45 | mail_token = self.client.post("https://www.emailnator.com/message-list", json={"email": self.email})
46 |
47 | mail_token = loads(mail_token.text)["messageData"]
48 |
49 | if len(mail_token) == 2:
50 | print("Message received!")
51 | print(mail_token[1]["messageID"])
52 | break
53 |
54 | mail_context = self.client.post(
55 | "https://www.emailnator.com/message-list",
56 | json={
57 | "email": self.email,
58 | "messageID": mail_token[1]["messageID"],
59 | },
60 | )
61 |
62 | return mail_context.text
63 |
64 | def get_verification_code(self):
65 | message = self.get_message()
66 | code = findall(r';">(\d{6,7})', message)[0]
67 | print(f"Verification code: {code}")
68 | return code
69 |
70 | def clear_inbox(self):
71 | print("Clearing inbox...")
72 | self.client.post(
73 | "https://www.emailnator.com/delete-all",
74 | json={"email": self.email},
75 | )
76 | print("Inbox cleared!")
77 |
78 | def __del__(self):
79 | if self.email:
80 | self.clear_inbox()
81 |
--------------------------------------------------------------------------------
/unfinished/openprompt/create.py:
--------------------------------------------------------------------------------
1 | from json import dumps
2 | # from mail import MailClient
3 | from re import findall
4 |
5 | from requests import post, get
6 |
7 | html = get('https://developermail.com/mail/')
8 | print(html.cookies.get('mailboxId'))
9 | email = findall(r'mailto:(.*)">', html.text)[0]
10 |
11 | headers = {
12 | 'apikey': 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6InVzanNtdWZ1emRjcnJjZXVobnlqIiwicm9sZSI6ImFub24iLCJpYXQiOjE2NzgyODYyMzYsImV4cCI6MTk5Mzg2MjIzNn0.2MQ9Lkh-gPqQwV08inIgqozfbYm5jdYWtf-rn-wfQ7U',
13 | 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
14 | 'x-client-info': '@supabase/auth-helpers-nextjs@0.5.6',
15 | }
16 |
17 | json_data = {
18 | 'email': email,
19 | 'password': 'T4xyt4Yn6WWQ4NC',
20 | 'data': {},
21 | 'gotrue_meta_security': {},
22 | }
23 |
24 | response = post('https://usjsmufuzdcrrceuhnyj.supabase.co/auth/v1/signup', headers=headers, json=json_data)
25 | print(response.json())
26 |
27 | # email_link = None
28 | # while not email_link:
29 | # sleep(1)
30 |
31 | # mails = mailbox.getmails()
32 | # print(mails)
33 |
34 |
35 | quit()
36 |
37 | url = input("Enter the url: ")
38 | response = get(url, allow_redirects=False)
39 |
40 | # https://openprompt.co/#access_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8&expires_in=604800&refresh_token=_Zp8uXIA2InTDKYgo8TCqA&token_type=bearer&type=signup
41 |
42 | redirect = response.headers.get('location')
43 | access_token = redirect.split('&')[0].split('=')[1]
44 | refresh_token = redirect.split('&')[2].split('=')[1]
45 |
46 | supabase_auth_token = dumps([access_token, refresh_token, None, None, None], separators=(',', ':'))
47 | print(supabase_auth_token)
48 |
49 | cookies = {
50 | 'supabase-auth-token': supabase_auth_token
51 | }
52 |
53 | json_data = {
54 | 'messages': [
55 | {
56 | 'role': 'user',
57 | 'content': 'how do I reverse a string in python?'
58 | }
59 | ]
60 | }
61 |
62 | response = post('https://openprompt.co/api/chat2', cookies=cookies, json=json_data, stream=True)
63 | for chunk in response.iter_content(chunk_size=1024):
64 | print(chunk)
65 |
--------------------------------------------------------------------------------
/gpt4free/cocalc/__init__.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from fake_useragent import UserAgent
3 | from pydantic import BaseModel
4 |
5 |
6 | class CoCalcResponse(BaseModel):
7 | text: str
8 | status: bool
9 |
10 |
11 | class Completion:
12 | """A class for generating text completions using CoCalc's GPT-based chatbot."""
13 |
14 | API_ENDPOINT = "https://cocalc.com/api/v2/openai/chatgpt"
15 | DEFAULT_SYSTEM_PROMPT = "ASSUME I HAVE FULL ACCESS TO COCALC. "
16 |
17 | @staticmethod
18 | def create(prompt: str, cookie_input: str) -> CoCalcResponse:
19 | """
20 | Generate a text completion for the given prompt using CoCalc's GPT-based chatbot.
21 |
22 | Args:
23 | prompt: The text prompt to complete.
24 | cookie_input: The cookie required to authenticate the chatbot API request.
25 |
26 | Returns:
27 | A CoCalcResponse object containing the text completion and a boolean indicating
28 | whether the request was successful.
29 | """
30 |
31 | # Initialize a session with custom headers
32 | session = Completion._initialize_session(cookie_input)
33 |
34 | # Set the data that will be submitted
35 | payload = Completion._create_payload(prompt, Completion.DEFAULT_SYSTEM_PROMPT)
36 |
37 | try:
38 | # Submit the request and return the results
39 | response = session.post(Completion.API_ENDPOINT, json=payload).json()
40 | return CoCalcResponse(text=response['output'], status=response['success'])
41 | except requests.exceptions.RequestException as e:
42 | # Handle exceptions that may occur during the request
43 | print(f"Error: {e}")
44 | return CoCalcResponse(text="", status=False)
45 |
46 | @classmethod
47 | def _initialize_session(cls, conversation_cookie: str) -> requests.Session:
48 | """Initialize a session with custom headers for the request."""
49 |
50 | session = requests.Session()
51 | headers = {
52 | "Accept": "*/*",
53 | "Accept-Language": "en-US,en;q=0.5",
54 | "Origin": "https://cocalc.com",
55 | "Referer": "https://cocalc.com/api/v2/openai/chatgpt",
56 | "Cookie": conversation_cookie,
57 | "User-Agent": UserAgent().random,
58 | }
59 | session.headers.update(headers)
60 |
61 | return session
62 |
63 | @staticmethod
64 | def _create_payload(prompt: str, system_prompt: str) -> dict:
65 | """Create the payload for the API request."""
66 |
67 | return {"input": prompt, "system": system_prompt, "tag": "next:index"}
68 |
--------------------------------------------------------------------------------
/gpt4free/__init__.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 |
3 | from gpt4free import cocalc
4 | from gpt4free import forefront
5 | from gpt4free import quora
6 | from gpt4free import theb
7 | from gpt4free import usesless
8 | from gpt4free import you
9 |
10 |
11 | class Provider(Enum):
12 | """An enum representing different providers."""
13 |
14 | You = 'you'
15 | Poe = 'poe'
16 | ForeFront = 'fore_front'
17 | Theb = 'theb'
18 | CoCalc = 'cocalc'
19 | UseLess = 'useless'
20 |
21 |
22 | class Completion:
23 | """This class will be used for invoking the given provider"""
24 |
25 | @staticmethod
26 | def create(provider: Provider, prompt: str, **kwargs) -> str:
27 | """
28 | Invokes the given provider with given prompt and addition arguments and returns the string response
29 |
30 | :param provider: an enum representing the provider to use while invoking
31 | :param prompt: input provided by the user
32 | :param kwargs: Additional keyword arguments to pass to the provider while invoking
33 | :return: A string representing the response from the provider
34 | """
35 | if provider == Provider.Poe:
36 | return Completion.__poe_service(prompt, **kwargs)
37 | elif provider == Provider.You:
38 | return Completion.__you_service(prompt, **kwargs)
39 | elif provider == Provider.ForeFront:
40 | return Completion.__fore_front_service(prompt, **kwargs)
41 | elif provider == Provider.Theb:
42 | return Completion.__theb_service(prompt, **kwargs)
43 | elif provider == Provider.CoCalc:
44 | return Completion.__cocalc_service(prompt, **kwargs)
45 | elif provider == Provider.UseLess:
46 | return Completion.__useless_service(prompt, **kwargs)
47 | else:
48 | raise Exception('Provider not exist, Please try again')
49 |
50 | @staticmethod
51 | def __useless_service(prompt: str, **kwargs) -> str:
52 | return usesless.Completion.create(prompt=prompt, **kwargs)
53 |
54 | @staticmethod
55 | def __you_service(prompt: str, **kwargs) -> str:
56 | return you.Completion.create(prompt, **kwargs).text
57 |
58 | @staticmethod
59 | def __poe_service(prompt: str, **kwargs) -> str:
60 | return quora.Completion.create(prompt=prompt, **kwargs).text
61 |
62 | @staticmethod
63 | def __fore_front_service(prompt: str, **kwargs) -> str:
64 | return forefront.Completion.create(prompt=prompt, **kwargs).text
65 |
66 | @staticmethod
67 | def __theb_service(prompt: str, **kwargs):
68 | return ''.join(theb.Completion.create(prompt=prompt))
69 |
70 | @staticmethod
71 | def __cocalc_service(prompt: str, **kwargs):
72 | return cocalc.Completion.create(prompt, cookie_input=kwargs.get('cookie_input', '')).text
73 |
--------------------------------------------------------------------------------
/gui/README.md:
--------------------------------------------------------------------------------
1 | # gpt4free gui
2 |
3 | This code provides a Graphical User Interface (GUI) for gpt4free. Users can ask questions and get answers from GPT-4 API's, utilizing multiple API implementations. The project contains two different Streamlit applications: `streamlit_app.py` and `streamlit_chat_app.py`.
4 |
5 | In addition, a new GUI script specifically implemented using PyWebIO has been added and can be found in the pywebio-gui folder. If there are errors with the Streamlit version, you can try using the PyWebIO version instead
6 |
7 | Installation
8 | ------------
9 |
10 | 1. Clone the repository.
11 | 2. Install the required dependencies with: `pip install -r requirements.txt`.
12 | 3. To use `streamlit_chat_app.py`, note that it depends on a pull request (PR #24) from the https://github.com/AI-Yash/st-chat/ repository, which may change in the future. The current dependency library can be found at https://github.com/AI-Yash/st-chat/archive/refs/pull/24/head.zip.
13 |
14 | Usage
15 | -----
16 |
17 | Choose one of the Streamlit applications to run:
18 |
19 | ### streamlit\_app.py
20 |
21 | This application provides a simple interface for asking GPT-4 questions and receiving answers.
22 |
23 | To run the application:
24 |
25 | run:
26 | ```arduino
27 | streamlit run gui/streamlit_app.py
28 | ```
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 | preview:
37 |
38 |
39 |
40 |
41 | ### streamlit\_chat\_app.py
42 |
43 | This application provides a chat-like interface for asking GPT-4 questions and receiving answers. It supports multiple query methods, and users can select the desired API for their queries. The application also maintains a conversation history.
44 |
45 | To run the application:
46 |
47 | ```arduino
48 | streamlit run streamlit_chat_app.py
49 | ```
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 | preview:
59 |
60 |
61 |
62 | Contributing
63 | ------------
64 |
65 | Feel free to submit pull requests, report bugs, or request new features by opening issues on the GitHub repository.
66 |
67 | Bug
68 | ----
69 | There is a bug in `streamlit_chat_app.py` right now that I haven't pinpointed yet, probably is really simple but havent had the time to look for it. Whenever you open a new conversation or access an old conversation it will only start prompt-answering after the second time you input to the text input, other than that, everything else seems to work accordingly.
70 |
71 | License
72 | -------
73 |
74 | This project is licensed under the MIT License.
75 |
--------------------------------------------------------------------------------
/unfinished/chatpdf/__init__.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import json
3 |
4 | from queue import Queue, Empty
5 | from threading import Thread
6 | from json import loads
7 | from re import findall
8 |
9 |
10 | class Completion:
11 |
12 | def request(prompt: str):
13 | '''TODO: some sort of authentication + upload PDF from URL or local file
14 | Then you should get the atoken and chat ID
15 | '''
16 |
17 | token = "your_token_here"
18 | chat_id = "your_chat_id_here"
19 |
20 | url = "https://chat-pr4yueoqha-ue.a.run.app/"
21 |
22 | payload = json.dumps({
23 | "v": 2,
24 | "chatSession": {
25 | "type": "join",
26 | "chatId": chat_id
27 | },
28 | "history": [
29 | {
30 | "id": "VNsSyJIq_0",
31 | "author": "p_if2GPSfyN8hjDoA7unYe",
32 | "msg": "",
33 | "time": 1682672009270
34 | },
35 | {
36 | "id": "Zk8DRUtx_6",
37 | "author": "uplaceholder",
38 | "msg": prompt,
39 | "time": 1682672181339
40 | }
41 | ]
42 | })
43 |
44 | # TODO: fix headers, use random user-agent, streaming response, etc
45 | headers = {
46 | 'authority': 'chat-pr4yueoqha-ue.a.run.app',
47 | 'accept': '*/*',
48 | 'accept-language': 'en-US,en;q=0.9',
49 | 'atoken': token,
50 | 'content-type': 'application/json',
51 | 'origin': 'https://www.chatpdf.com',
52 | 'referer': 'https://www.chatpdf.com/',
53 | 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
54 | 'sec-ch-ua-mobile': '?0',
55 | 'sec-ch-ua-platform': '"Windows"',
56 | 'sec-fetch-dest': 'empty',
57 | 'sec-fetch-mode': 'cors',
58 | 'sec-fetch-site': 'cross-site',
59 | 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
60 | }
61 |
62 | response = requests.request(
63 | "POST", url, headers=headers, data=payload).text
64 | Completion.stream_completed = True
65 | return {'response': response}
66 |
67 | @staticmethod
68 | def create(prompt: str):
69 | Thread(target=Completion.request, args=[prompt]).start()
70 |
71 | while Completion.stream_completed != True or not Completion.message_queue.empty():
72 | try:
73 | message = Completion.message_queue.get(timeout=0.01)
74 | for message in findall(Completion.regex, message):
75 | yield loads(Completion.part1 + message + Completion.part2)['delta']
76 |
77 | except Empty:
78 | pass
79 |
80 | @staticmethod
81 | def handle_stream_response(response):
82 | Completion.message_queue.put(response.decode())
83 |
--------------------------------------------------------------------------------
/gpt4free/README.md:
--------------------------------------------------------------------------------
1 | # gpt4free package
2 |
3 | ### What is it?
4 |
5 | gpt4free is a python package that provides some language model api's
6 |
7 | ### Main Features
8 |
9 | - It's free to use
10 | - Easy access
11 |
12 | ### Installation:
13 |
14 | ```bash
15 | pip install gpt4free
16 | ```
17 |
18 | #### Usage:
19 |
20 | ```python
21 | import gpt4free
22 | from gpt4free import Provider, quora, forefront
23 |
24 | # usage You
25 | response = gpt4free.Completion.create(Provider.You, prompt='Write a poem on Lionel Messi')
26 | print(response)
27 |
28 | # usage Poe
29 | token = quora.Account.create(logging=False)
30 | response = gpt4free.Completion.create(Provider.Poe, prompt='Write a poem on Lionel Messi', token=token, model='ChatGPT')
31 | print(response)
32 |
33 | # usage forefront
34 | token = forefront.Account.create(logging=False)
35 | response = gpt4free.Completion.create(
36 | Provider.ForeFront, prompt='Write a poem on Lionel Messi', model='gpt-4', token=token
37 | )
38 | print(response)
39 | print(f'END')
40 |
41 | # usage theb
42 | response = gpt4free.Completion.create(Provider.Theb, prompt='Write a poem on Lionel Messi')
43 | print(response)
44 |
45 | # usage cocalc
46 | response = gpt4free.Completion.create(Provider.CoCalc, prompt='Write a poem on Lionel Messi', cookie_input='')
47 | print(response)
48 |
49 | ```
50 |
51 | ### Invocation Arguments
52 |
53 | `gpt4free.Completion.create()` method has two required arguments
54 |
55 | 1. Provider: This is an enum representing different provider
56 | 2. prompt: This is the user input
57 |
58 | #### Keyword Arguments
59 |
60 | Some of the keyword arguments are optional, while others are required.
61 |
62 | - You:
63 | - `safe_search`: boolean - default value is `False`
64 | - `include_links`: boolean - default value is `False`
65 | - `detailed`: boolean - default value is `False`
66 | - Quora:
67 | - `token`: str - this needs to be provided by the user
68 | - `model`: str - default value is `gpt-4`.
69 |
70 | (Available models: `['Sage', 'GPT-4', 'Claude+', 'Claude-instant', 'ChatGPT', 'Dragonfly', 'NeevaAI']`)
71 | - ForeFront:
72 | - `token`: str - this need to be provided by the user
73 |
74 | - Theb:
75 | (no keyword arguments required)
76 | - CoCalc:
77 | - `cookie_input`: str - this needs to be provided by user
78 |
79 | #### Token generation of quora
80 | ```python
81 | from gpt4free import quora
82 |
83 | token = quora.Account.create(logging=False)
84 | ```
85 |
86 | ### Token generation of ForeFront
87 | ```python
88 | from gpt4free import forefront
89 |
90 | token = forefront.Account.create(logging=False)
91 | ```
92 |
93 | ## Copyright:
94 |
95 | This program is licensed under the [GNU GPL v3](https://www.gnu.org/licenses/gpl-3.0.txt)
96 |
97 | ### Copyright Notice:
98 |
99 | ```
100 | xtekky/gpt4free: multiple reverse engineered language-model api's to decentralise the ai industry.
101 | Copyright (C) 2023 xtekky
102 |
103 | This program is free software: you can redistribute it and/or modify
104 | it under the terms of the GNU General Public License as published by
105 | the Free Software Foundation, either version 3 of the License, or
106 | (at your option) any later version.
107 |
108 | This program is distributed in the hope that it will be useful,
109 | but WITHOUT ANY WARRANTY; without even the implied warranty of
110 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
111 | GNU General Public License for more details.
112 |
113 | You should have received a copy of the GNU General Public License
114 | along with this program. If not, see .
115 | ```
116 |
--------------------------------------------------------------------------------
/unfinished/openprompt/mail.py:
--------------------------------------------------------------------------------
1 | import email
2 |
3 | import requests
4 |
5 |
6 | class MailClient:
7 |
8 | def __init__(self):
9 | self.username = None
10 | self.token = None
11 | self.raw = None
12 | self.mailids = None
13 | self.mails = None
14 | self.mail = None
15 |
16 | def create(self, force=False):
17 | headers = {
18 | 'accept': 'application/json',
19 | }
20 |
21 | if self.username:
22 | pass
23 | else:
24 | self.response = requests.put(
25 | 'https://www.developermail.com/api/v1/mailbox', headers=headers)
26 | self.response = self.response.json()
27 | self.username = self.response['result']['name']
28 | self.token = self.response['result']['token']
29 |
30 | return {'username': self.username, 'token': self.token}
31 |
32 | def destroy(self):
33 | headers = {
34 | 'accept': 'application/json',
35 | 'X-MailboxToken': self.token,
36 | }
37 | self.response = requests.delete(
38 | f'https://www.developermail.com/api/v1/mailbox/{self.username}', headers=headers)
39 | self.response = self.response.json()
40 | self.username = None
41 | self.token = None
42 | return self.response
43 |
44 | def newtoken(self):
45 | headers = {
46 | 'accept': 'application/json',
47 | 'X-MailboxToken': self.token,
48 | }
49 | self.response = requests.put(
50 | f'https://www.developermail.com/api/v1/mailbox/{self.username}/token', headers=headers)
51 | self.response = self.response.json()
52 | self.token = self.response['result']['token']
53 | return {'username': self.username, 'token': self.token}
54 |
55 | def getmailids(self):
56 | headers = {
57 | 'accept': 'application/json',
58 | 'X-MailboxToken': self.token,
59 | }
60 |
61 | self.response = requests.get(
62 | f'https://www.developermail.com/api/v1/mailbox/{self.username}', headers=headers)
63 | self.response = self.response.json()
64 | self.mailids = self.response['result']
65 | return self.mailids
66 |
67 | def getmails(self, mailids: list = None):
68 | headers = {
69 | 'accept': 'application/json',
70 | 'X-MailboxToken': self.token,
71 | 'Content-Type': 'application/json',
72 | }
73 |
74 | if mailids is None:
75 | mailids = self.mailids
76 |
77 | data = str(mailids)
78 |
79 | self.response = requests.post(
80 | f'https://www.developermail.com/api/v1/mailbox/{self.username}/messages', headers=headers, data=data)
81 | self.response = self.response.json()
82 | self.mails = self.response['result']
83 | return self.mails
84 |
85 | def getmail(self, mailid: str, raw=False):
86 | headers = {
87 | 'accept': 'application/json',
88 | 'X-MailboxToken': self.token,
89 | }
90 | self.response = requests.get(
91 | f'https://www.developermail.com/api/v1/mailbox/{self.username}/messages/{mailid}', headers=headers)
92 | self.response = self.response.json()
93 | self.mail = self.response['result']
94 | if raw is False:
95 | self.mail = email.message_from_string(self.mail)
96 | return self.mail
97 |
98 | def delmail(self, mailid: str):
99 | headers = {
100 | 'accept': 'application/json',
101 | 'X-MailboxToken': self.token,
102 | }
103 | self.response = requests.delete(
104 | f'https://www.developermail.com/api/v1/mailbox/{self.username}/messages/{mailid}', headers=headers)
105 | self.response = self.response.json()
106 | return self.response
107 |
108 |
109 | client = MailClient()
110 | client.newtoken()
111 | print(client.getmails())
112 |
--------------------------------------------------------------------------------
/unfinished/bing/__ini__.py:
--------------------------------------------------------------------------------
1 | # Import necessary libraries
2 | import asyncio
3 | from json import dumps, loads
4 | from ssl import create_default_context
5 |
6 | import websockets
7 | from browser_cookie3 import edge
8 | from certifi import where
9 | from requests import get
10 |
11 | # Set up SSL context
12 | ssl_context = create_default_context()
13 | ssl_context.load_verify_locations(where())
14 |
15 |
16 | def format(msg: dict) -> str:
17 | """Format message as JSON string with delimiter."""
18 | return dumps(msg) + '\x1e'
19 |
20 |
21 | def get_token():
22 | """Retrieve token from browser cookies."""
23 | cookies = {c.name: c.value for c in edge(domain_name='bing.com')}
24 | return cookies['_U']
25 |
26 |
27 | class AsyncCompletion:
28 | async def create(
29 | prompt: str = 'hello world',
30 | optionSets: list = [
31 | 'deepleo',
32 | 'enable_debug_commands',
33 | 'disable_emoji_spoken_text',
34 | 'enablemm',
35 | 'h3relaxedimg'
36 | ],
37 | token: str = get_token()):
38 | """Create a connection to Bing AI and send the prompt."""
39 |
40 | # Send create request
41 | create = get('https://edgeservices.bing.com/edgesvc/turing/conversation/create',
42 | headers={
43 | 'host': 'edgeservices.bing.com',
44 | 'authority': 'edgeservices.bing.com',
45 | 'cookie': f'_U={token}',
46 | 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
47 | }
48 | )
49 |
50 | # Extract conversation data
51 | conversationId = create.json()['conversationId']
52 | clientId = create.json()['clientId']
53 | conversationSignature = create.json()['conversationSignature']
54 |
55 | # Connect to WebSocket
56 | wss = await websockets.connect('wss://sydney.bing.com/sydney/ChatHub', max_size=None, ssl=ssl_context,
57 | extra_headers={
58 | # Add necessary headers
59 | }
60 | )
61 |
62 | # Send JSON protocol version
63 | await wss.send(format({'protocol': 'json', 'version': 1}))
64 | await wss.recv()
65 |
66 | # Define message structure
67 | struct = {
68 | # Add necessary message structure
69 | }
70 |
71 | # Send message
72 | await wss.send(format(struct))
73 |
74 | # Process responses
75 | base_string = ''
76 | final = False
77 | while not final:
78 | objects = str(await wss.recv()).split('\x1e')
79 | for obj in objects:
80 | if obj is None or obj == '':
81 | continue
82 |
83 | response = loads(obj)
84 | if response.get('type') == 1 and response['arguments'][0].get('messages', ):
85 | response_text = response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get(
86 | 'text')
87 |
88 | yield (response_text.replace(base_string, ''))
89 | base_string = response_text
90 |
91 | elif response.get('type') == 2:
92 | final = True
93 |
94 | await wss.close()
95 |
96 |
97 | async def run():
98 | """Run the async completion and print the result."""
99 | async for value in AsyncCompletion.create(
100 | prompt='summarize cinderella with each word beginning with a consecutive letter of the alphabet, a-z',
101 | optionSets=[
102 | "galileo",
103 | ]
104 | ):
105 | print(value, end='', flush=True)
106 |
107 |
108 | asyncio.run(run())
109 |
--------------------------------------------------------------------------------
/unfinished/bard/__init__.py:
--------------------------------------------------------------------------------
1 | from json import dumps, loads
2 | from os import getenv
3 | from random import randint
4 | from re import search
5 | from urllib.parse import urlencode
6 |
7 | from bard.typings import BardResponse
8 | from dotenv import load_dotenv
9 | from requests import Session
10 |
11 | load_dotenv()
12 | token = getenv('1psid')
13 | proxy = getenv('proxy')
14 |
15 | temperatures = {
16 | 0: "Generate text strictly following known patterns, with no creativity.",
17 | 0.1: "Produce text adhering closely to established patterns, allowing minimal creativity.",
18 | 0.2: "Create text with modest deviations from familiar patterns, injecting a slight creative touch.",
19 | 0.3: "Craft text with a mild level of creativity, deviating somewhat from common patterns.",
20 | 0.4: "Formulate text balancing creativity and recognizable patterns for coherent results.",
21 | 0.5: "Generate text with a moderate level of creativity, allowing for a mix of familiarity and novelty.",
22 | 0.6: "Compose text with an increased emphasis on creativity, while partially maintaining familiar patterns.",
23 | 0.7: "Produce text favoring creativity over typical patterns for more original results.",
24 | 0.8: "Create text heavily focused on creativity, with limited concern for familiar patterns.",
25 | 0.9: "Craft text with a strong emphasis on unique and inventive ideas, largely ignoring established patterns.",
26 | 1: "Generate text with maximum creativity, disregarding any constraints of known patterns or structures."
27 | }
28 |
29 |
30 | class Completion:
31 | def create(
32 | prompt: str = 'hello world',
33 | temperature: int = None,
34 | conversation_id: str = '',
35 | response_id: str = '',
36 | choice_id: str = '') -> BardResponse:
37 |
38 | if temperature:
39 | prompt = f'''settings: follow these settings for your response: [temperature: {temperature} - {temperatures[temperature]}] | prompt : {prompt}'''
40 |
41 | client = Session()
42 | client.proxies = {
43 | 'http': f'http://{proxy}',
44 | 'https': f'http://{proxy}'} if proxy else None
45 |
46 | client.headers = {
47 | 'authority': 'bard.google.com',
48 | 'content-type': 'application/x-www-form-urlencoded;charset=UTF-8',
49 | 'origin': 'https://bard.google.com',
50 | 'referer': 'https://bard.google.com/',
51 | 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
52 | 'x-same-domain': '1',
53 | 'cookie': f'__Secure-1PSID={token}'
54 | }
55 |
56 | snlm0e = search(r'SNlM0e\":\"(.*?)\"',
57 | client.get('https://bard.google.com/').text).group(1)
58 |
59 | params = urlencode({
60 | 'bl': 'boq_assistant-bard-web-server_20230326.21_p0',
61 | '_reqid': randint(1111, 9999),
62 | 'rt': 'c',
63 | })
64 |
65 | response = client.post(
66 | f'https://bard.google.com/_/BardChatUi/data/assistant.lamda.BardFrontendService/StreamGenerate?{params}',
67 | data={
68 | 'at': snlm0e,
69 | 'f.req': dumps([None, dumps([
70 | [prompt],
71 | None,
72 | [conversation_id, response_id, choice_id],
73 | ])])
74 | }
75 | )
76 |
77 | chat_data = loads(response.content.splitlines()[3])[0][2]
78 | if not chat_data:
79 | print('error, retrying')
80 | Completion.create(prompt, temperature,
81 | conversation_id, response_id, choice_id)
82 |
83 | json_chat_data = loads(chat_data)
84 | results = {
85 | 'content': json_chat_data[0][0],
86 | 'conversation_id': json_chat_data[1][0],
87 | 'response_id': json_chat_data[1][1],
88 | 'factualityQueries': json_chat_data[3],
89 | 'textQuery': json_chat_data[2][0] if json_chat_data[2] is not None else '',
90 | 'choices': [{'id': i[0], 'content': i[1]} for i in json_chat_data[4]],
91 | }
92 |
93 | return BardResponse(results)
94 |
--------------------------------------------------------------------------------
/gui/query_methods.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | from typing import Optional
4 |
5 | sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
6 |
7 | from gpt4free import quora, forefront, theb, you
8 | import random
9 |
10 |
11 | def query_forefront(question: str, proxy: Optional[str] = None) -> str:
12 | # create an account
13 | token = forefront.Account.create(logging=False, proxy=proxy)
14 |
15 | response = ""
16 | # get a response
17 | try:
18 | return forefront.Completion.create(token=token, prompt='hello world', model='gpt-4', proxy=proxy).text
19 | except Exception as e:
20 | # Return error message if an exception occurs
21 | return (
22 | f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
23 | )
24 |
25 |
26 | def query_quora(question: str, proxy: Optional[str] = None) -> str:
27 | token = quora.Account.create(logging=False, enable_bot_creation=True, proxy=proxy)
28 | return quora.Completion.create(model='gpt-4', prompt=question, token=token, proxy=proxy).text
29 |
30 |
31 | def query_theb(question: str, proxy: Optional[str] = None) -> str:
32 | # Set cloudflare clearance cookie and get answer from GPT-4 model
33 | response = ""
34 | try:
35 | return ''.join(theb.Completion.create(prompt=question, proxy=proxy))
36 |
37 | except Exception as e:
38 | # Return error message if an exception occurs
39 | return (
40 | f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
41 | )
42 |
43 |
44 | def query_you(question: str, proxy: Optional[str] = None) -> str:
45 | # Set cloudflare clearance cookie and get answer from GPT-4 model
46 | try:
47 | result = you.Completion.create(prompt=question, proxy=proxy)
48 | return result.text
49 |
50 | except Exception as e:
51 | # Return error message if an exception occurs
52 | return (
53 | f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
54 | )
55 |
56 |
57 | # Define a dictionary containing all query methods
58 | avail_query_methods = {
59 | "Forefront": query_forefront,
60 | "Poe": query_quora,
61 | "Theb": query_theb,
62 | "You": query_you,
63 | # "Writesonic": query_writesonic,
64 | # "T3nsor": query_t3nsor,
65 | # "Phind": query_phind,
66 | # "Ora": query_ora,
67 | }
68 |
69 |
70 | def query(user_input: str, selected_method: str = "Random", proxy: Optional[str] = None) -> str:
71 | # If a specific query method is selected (not "Random") and the method is in the dictionary, try to call it
72 | if selected_method != "Random" and selected_method in avail_query_methods:
73 | try:
74 | return avail_query_methods[selected_method](user_input, proxy=proxy)
75 | except Exception as e:
76 | print(f"Error with {selected_method}: {e}")
77 | return "😵 Sorry, some error occurred please try again."
78 |
79 | # Initialize variables for determining success and storing the result
80 | success = False
81 | result = "😵 Sorry, some error occurred please try again."
82 | # Create a list of available query methods
83 | query_methods_list = list(avail_query_methods.values())
84 |
85 | # Continue trying different methods until a successful result is obtained or all methods have been tried
86 | while not success and query_methods_list:
87 | # Choose a random method from the list
88 | chosen_query = random.choice(query_methods_list)
89 | # Find the name of the chosen method
90 | chosen_query_name = [k for k, v in avail_query_methods.items() if v == chosen_query][0]
91 | try:
92 | # Try to call the chosen method with the user input
93 | result = chosen_query(user_input, proxy=proxy)
94 | success = True
95 | except Exception as e:
96 | print(f"Error with {chosen_query_name}: {e}")
97 | # Remove the failed method from the list of available methods
98 | query_methods_list.remove(chosen_query)
99 |
100 | return result
101 |
--------------------------------------------------------------------------------
/gpt4free/you/__init__.py:
--------------------------------------------------------------------------------
1 | import json
2 | import re
3 | from typing import Optional, List, Dict, Any
4 | from uuid import uuid4
5 |
6 | from fake_useragent import UserAgent
7 | from pydantic import BaseModel
8 | from tls_client import Session
9 |
10 |
11 | class PoeResponse(BaseModel):
12 | text: Optional[str] = None
13 | links: List[str] = []
14 | extra: Dict[str, Any] = {}
15 |
16 |
17 | class Completion:
18 | @staticmethod
19 | def create(
20 | prompt: str,
21 | page: int = 1,
22 | count: int = 10,
23 | safe_search: str = 'Moderate',
24 | on_shopping_page: bool = False,
25 | mkt: str = '',
26 | response_filter: str = 'WebPages,Translations,TimeZone,Computation,RelatedSearches',
27 | domain: str = 'youchat',
28 | query_trace_id: str = None,
29 | chat: list = None,
30 | include_links: bool = False,
31 | detailed: bool = False,
32 | debug: bool = False,
33 | proxy: Optional[str] = None,
34 | ) -> PoeResponse:
35 | if chat is None:
36 | chat = []
37 |
38 | proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else {}
39 |
40 | client = Session(client_identifier='chrome_108')
41 | client.headers = Completion.__get_headers()
42 | client.proxies = proxies
43 |
44 | response = client.get(
45 | f'https://you.com/api/streamingSearch',
46 | params={
47 | 'q': prompt,
48 | 'page': page,
49 | 'count': count,
50 | 'safeSearch': safe_search,
51 | 'onShoppingPage': on_shopping_page,
52 | 'mkt': mkt,
53 | 'responseFilter': response_filter,
54 | 'domain': domain,
55 | 'queryTraceId': str(uuid4()) if query_trace_id is None else query_trace_id,
56 | 'chat': str(chat), # {'question':'','answer':' ''}
57 | },
58 | )
59 |
60 | if debug:
61 | print('\n\n------------------\n\n')
62 | print(response.text)
63 | print('\n\n------------------\n\n')
64 |
65 | if 'youChatToken' not in response.text:
66 | return Completion.__get_failure_response()
67 |
68 | you_chat_serp_results = re.search(
69 | r'(?<=event: youChatSerpResults\ndata:)(.*\n)*?(?=event: )', response.text
70 | ).group()
71 | third_party_search_results = re.search(
72 | r'(?<=event: thirdPartySearchResults\ndata:)(.*\n)*?(?=event: )', response.text
73 | ).group()
74 | # slots = findall(r"slots\ndata: (.*)\n\nevent", response.text)[0]
75 |
76 | text = ''.join(re.findall(r'{\"youChatToken\": \"(.*?)\"}', response.text))
77 |
78 | extra = {
79 | 'youChatSerpResults': json.loads(you_chat_serp_results),
80 | # 'slots' : loads(slots)
81 | }
82 |
83 | response = PoeResponse(text=text.replace('\\n', '\n').replace('\\\\', '\\').replace('\\"', '"'))
84 | if include_links:
85 | response.links = json.loads(third_party_search_results)['search']['third_party_search_results']
86 |
87 | if detailed:
88 | response.extra = extra
89 |
90 | return response
91 |
92 | @staticmethod
93 | def __get_headers() -> dict:
94 | return {
95 | 'authority': 'you.com',
96 | 'accept': 'text/event-stream',
97 | 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
98 | 'cache-control': 'no-cache',
99 | 'referer': 'https://you.com/search?q=who+are+you&tbm=youchat',
100 | 'sec-ch-ua': '"Not_A Brand";v="99", "Google Chrome";v="109", "Chromium";v="109"',
101 | 'sec-ch-ua-mobile': '?0',
102 | 'sec-ch-ua-platform': '"Windows"',
103 | 'sec-fetch-dest': 'empty',
104 | 'sec-fetch-mode': 'cors',
105 | 'sec-fetch-site': 'same-origin',
106 | 'cookie': f'safesearch_guest=Moderate; uuid_guest={str(uuid4())}',
107 | 'user-agent': UserAgent().random,
108 | }
109 |
110 | @staticmethod
111 | def __get_failure_response() -> PoeResponse:
112 | return PoeResponse(text='Unable to fetch the response, Please try again.')
113 |
--------------------------------------------------------------------------------
/unfinished/test.py:
--------------------------------------------------------------------------------
1 | # asyncio.run(gptbz.test())
2 |
3 | import requests
4 |
5 | image = '/9j/4AAQSkZJRgABAQEAYABgAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAAoALQDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigDkZP+EhS4W0k1S+VntQPtEWmRsgkNwBu4ZsHYQNvTbls5BA6DS7uW6S6E0VwjQ3UsQM0Pl71DZUrydy4IAbvg8CsTx3DbHQLi4uVs9scWzdd+dsAaWI4PlfNjKjpzkDtmpoNSgbWYpLR7Ty5bq5trw/vd3nIowBxtzti53Y6fKT3z2djra56fNbv07HR1z13ZRX/jDyby0+02f9nfdmsEeHd5o/5anndwPkxjjPWuhrh9Mvra88RLqccmnOHtvLEqfaN+1r1lUcjbg4PbO4H+Cqk+hnRi9ZI29E0uC2N1eG3Am+13DITZRwuqlsYG0ZYEKCGJywwT2AtWTapcW1vcPPCiyrE5ils2SRQV+dW/ecMT/3zgj5utZtpdwL4e190e02W9xeb9vm7FOWY78/NnnJ28f3ahkgtptD8JRlbMos9s8QPnbcrEzDy/4sgDjzOMdeaSZbi23f8vmbfn6hBFuktmuWWPJWCNELNuxgbpcDj1Pbr2qJ9bMVyIZNK1JVLyr5qwB1AjUNu+Uk4bovGSRjAqCTwdoElv5B02MReT5G1HZfk8zzMcEfx81YlsJ7NJX0tolZzNK8dyZJA8jDIwd3yjcBkAHjOAM09SP3b/q36mkjiSNXAYBgCNykH8QeRWdfaw1ldSW66XqN0UgE++3iBRsvt2BiQN/8WPQZqharF9oN5osVml1NLbLqUbmUFY/L4CrgYYKy4yoGM5xjhlnc2OoeMrfULV7aQXGkExyYlErJ5oPQ/Jtye/zZ9qLgqaTba0NyzvPtizH7NcQeVM8OJ49u/acbl9VPY96s1geFjF/xOhF9m41Wfd9n8z73BO7f/Fzzt+X0q7c6mWvRY2DwSXcUsQuUff8Auo2ySflB+YqrYyQOmTyARPQmVP32kLqF1cbmsrJZkuni3rcfZ98UfzKvJJUE4JOM5wpODwDl3Meuf2rHbRatcBJXuj5iachjhUovlBmZudrNkEZ3HIOMGlhREhbS9He2a8MO6a4fzmGDMQ3zAk5yZ8DzMgj0yRuWdha2CzLawrEJpnnkx/G7HLMfc0bl3VNf5pff/kVLS8uxFHHJZ3s5Xyo2mZI4y2VBZyN44B6gDrwAcVZ069Go2EV2Le5t/MBPlXMZjkXnGGU9OlULSdbfTt8LWy5mt0JAkK4YRLjnnODx26Z71TXULEWn/CUWDwmxeDbM4WbkCXJbaB23SnlM5PUDNF7CcObZf12OlpCcDoTz2oVlcZVgRkjIPccGo7hgsSk7ceYg+bP94elUYpamda64915GdH1SESxiTM0KjZmTZtbDHB53Y/u89eK1qw4xD9l0mIC3wLdCg/eYwHh+73x0+9znb71uUkXUSWyCiiimZhRRRQBieL5Hj8LXjxySxuNmGivFtWHzr0lbhfx69O9MvHdZpbKKWYnUluNji+VGikVFULHnkdGbjO05JHPEviyF5/DF7HGkjuQpCx2i3THDA8RNw3Tv069qR0kk0i4uFilF3bSXTwE2a+YGzIAUQnnIPByN46kbjUPc6YNKC9X+SLtjeB9Mt5ZyqzbI1lQzK5R2C/KWGAT8w6dcjHUVzemSyxeCba9e5uWfzIgxl1aOTgXPebGw5BwR3ACdalna8+0R3Kx3nk6jc2MvkjTI2MH97zDnI+4uWOSny4z2Lqxmt/hytvHHIZhFHJsj0yJnyXDEfZ87M9cjPB56ik2y4xSsu7XcnjMsejeJszXBZZrgozaihZAYwQFfGIQM8Bvu9ehrTKuJtOg3y5gKs/8ApAy2Y5B846uMj8Tz/CaqzROH1C3EchW6uHGRZIVx9nHXs4yPvN1PydBV2Lc+u3eUkCJBDtZoAFJzJna/VjgjI/h/4EaaM5PS/wDXRF+iiirOcy7RZE8RanukmKPFA6q9yHVfvg7Y+qfd5J4Y9OhrJ8Nm4FxYJNNdORaXCsJtTS4yVnAyQoG5sfxfw/dPJrUslmGt6rcymQxM0MMStahMALk4cfM65c9cBSGA7mqmi2k9t/ZZuDJJKbSdpHNjHEdzyRvhtv3G5PyjIbBJOVqDpurP5d+zGWtzeLdahZQLNK895PiV7+N/IURKQQMEqNzKAm1tucnggG4Fkhs4INNuJL145oEuHa7BcIAuWOQRkrhiAFzkkEE8rNDJPczWtnG1rG7yfapvsqESsY1AIJPP3hztbPllTjHKvpv2CWKbTUSHdJCk8cVtH+8jUFOSNpGAynOTgJgL1BNRNxf9fmWNGa3fR7U2ty9zDswJZJxMzHvlwSCc5BwccVerBZ3tLf8Atqyguvsxt/n02OyUSsxk3FsHa24bnyM4ycgE9d1WDDIz1I5BHQ471SM6i1uY8cjjSIWLyFjLbDJu1J5Mefn6HryP4snH3hRdmTS5f7T82aS2WBY5Y5LpVjX94Pn+YYzhmydw4UDB4wio/wDY8K+XLuE1qcfY1B4MWfk6DHOT/Bg4+6K1zGkkHlSoroy7WVlGCCOQRSsU5JGUrPo96EZ5p7O7mmmlubm7XFqQoYIobB2fK3Aztwe3TQvX2QKQSMyxDiQJ1dR1P8u/TvWb5bWty2m3KTXlvqMs7Ky2ieVbqVBKSEcHJL4JB3ZwfeLfcQRnTpY7mT7PLZiOdbJSkillzgA44KMScLsBBAOBkuNxu0/6epcQv9s0+LfJzauxBuVJJDRckdXPJ+YcDJH8QrTrN2sNcsxsk2LZyjd9nXaCWj439VPH3RwcZ/hFaVNGc+gUUUUyAooooAxfFVxZxeG9RS7ltVQ25ytwzbCCQBkJ82MkD5eeah0G7tYLi/sZJrKO4fUbjy4oncM/SQ5D9Ww4J25Xniiis2/eO2FNOhf1/CxmamsEGp2+nzx2CwxajYyWKN9o3KdpX+Ebd2I2287ePm973i3UdMg0W+0y4mtUkNqJPKuBJ5ewuEBYx8gbiBxz+FFFS3ZM1p01OdNN/wBaFfVtU0qHxHplx9qsSkEl2853SvIjxwjdtCZXIX7wbt05q7YJdS6nc6vYxWEtpfi2KS+bKsjQhCSWBBG4bhtAAyCcmiinF3k0RWgqdKMl1VvxZfM2s+VkWFh5nl5x9tfG/djGfK6bec468Y/irN1CeUCeHXbrTItPc3O6GN5PNltxHx0I+YKXLYB42455ooqpaIwo2lO1rE1rZjUYrcCO2Giw/Zp7BYzKrkKu4bh8oAB2EA56HIz0u3uxL+1kbygQpQFt2fmki4GOOuOvfHbNFFPpcTu6nKFpsTU75V8oNJKXIXduOI4hk54zjHTjGO+a0KKKaM59PQxLqNNBMuoQpDFYJEfPQLISp8zcWAXIxh5CcLnOMnHQaFNKkkvtOFoli0k9xqP32Zn24LIFyM7kwRg98c5yUVL3No6xTfV2/IrxyW0vh21kQ2phaexKn97s5aErj+LPTbnj7u7+KujoopxZNZW+9/oQXdpBfWk1rcxiSGVGjdSSMhgQeRyOCRxWOtvbXU0Ol6mIHksJbea0IMoJYISGy3U5ST+JuB83uUUMVJuz121JnaL/AITOBSYPOGnyEA7/ADdvmJnH8G3IHX5s4xxmtmiihdRVFZR9AoooqjI//9k='
6 |
7 | response = requests.get('https://ocr.holey.cc/ncku?base64_str=%s' % image) # .split('base64,')[1])
8 | print(response.content)
9 |
--------------------------------------------------------------------------------
/testing/poe_account_create_test.py:
--------------------------------------------------------------------------------
1 | from hashlib import md5
2 | from json import dumps
3 | from re import findall
4 | from typing import Optional
5 |
6 | from tls_client import Session as TLS
7 | from twocaptcha import TwoCaptcha
8 |
9 | from gpt4free.quora import extract_formkey
10 | from gpt4free.quora.mail import Emailnator
11 |
12 | solver = TwoCaptcha('72747bf24a9d89b4dcc1b24875efd358')
13 |
14 |
15 | class Account:
16 | @staticmethod
17 | def create(proxy: Optional[str] = None, logging: bool = False, enable_bot_creation: bool = False):
18 | client = TLS(client_identifier='chrome110')
19 | client.proxies = {'http': f'http://{proxy}', 'https': f'http://{proxy}'} if proxy else None
20 |
21 | mail_client = Emailnator()
22 | mail_address = mail_client.get_mail()
23 |
24 | if logging:
25 | print('email', mail_address)
26 |
27 | client.headers = {
28 | 'authority': 'poe.com',
29 | 'accept': '*/*',
30 | 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
31 | 'content-type': 'application/json',
32 | 'origin': 'https://poe.com',
33 | 'poe-formkey': 'null',
34 | 'poe-tag-id': 'null',
35 | 'poe-tchannel': 'null',
36 | 'referer': 'https://poe.com/login',
37 | 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
38 | 'sec-ch-ua-mobile': '?0',
39 | 'sec-ch-ua-platform': '"macOS"',
40 | 'sec-fetch-dest': 'empty',
41 | 'sec-fetch-mode': 'cors',
42 | 'sec-fetch-site': 'same-origin',
43 | 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
44 | }
45 |
46 | client.headers["poe-formkey"] = extract_formkey(client.get('https://poe.com/login').text)
47 | client.headers["poe-tchannel"] = client.get('https://poe.com/api/settings').json()['tchannelData']['channel']
48 |
49 | # token = reCaptchaV3('https://www.recaptcha.net/recaptcha/enterprise/anchor?ar=1&k=6LflhEElAAAAAI_ewVwRWI9hsyV4mbZnYAslSvlG&co=aHR0cHM6Ly9wb2UuY29tOjQ0Mw..&hl=en&v=4PnKmGB9wRHh1i04o7YUICeI&size=invisible&cb=bi6ivxoskyal')
50 | token = solver.recaptcha(
51 | sitekey='6LflhEElAAAAAI_ewVwRWI9hsyV4mbZnYAslSvlG',
52 | url='https://poe.com/login?redirect_url=%2F',
53 | version='v3',
54 | enterprise=1,
55 | invisible=1,
56 | action='login',
57 | )['code']
58 |
59 | payload = dumps(
60 | separators=(',', ':'),
61 | obj={
62 | 'queryName': 'MainSignupLoginSection_sendVerificationCodeMutation_Mutation',
63 | 'variables': {'emailAddress': mail_address, 'phoneNumber': None, 'recaptchaToken': token},
64 | 'query': 'mutation MainSignupLoginSection_sendVerificationCodeMutation_Mutation(\n $emailAddress: String\n $phoneNumber: String\n $recaptchaToken: String\n) {\n sendVerificationCode(verificationReason: login, emailAddress: $emailAddress, phoneNumber: $phoneNumber, recaptchaToken: $recaptchaToken) {\n status\n errorMessage\n }\n}\n',
65 | },
66 | )
67 |
68 | base_string = payload + client.headers["poe-formkey"] + 'WpuLMiXEKKE98j56k'
69 | client.headers["poe-tag-id"] = md5(base_string.encode()).hexdigest()
70 |
71 | print(dumps(client.headers, indent=4))
72 |
73 | response = client.post('https://poe.com/api/gql_POST', data=payload)
74 |
75 | if 'automated_request_detected' in response.text:
76 | print('please try using a proxy / wait for fix')
77 |
78 | if 'Bad Request' in response.text:
79 | if logging:
80 | print('bad request, retrying...', response.json())
81 | quit()
82 |
83 | if logging:
84 | print('send_code', response.json())
85 |
86 | mail_content = mail_client.get_message()
87 | mail_token = findall(r';">(\d{6,7})', mail_content)[0]
88 |
89 | if logging:
90 | print('code', mail_token)
91 |
92 | payload = dumps(
93 | separators=(',', ':'),
94 | obj={
95 | "queryName": "SignupOrLoginWithCodeSection_signupWithVerificationCodeMutation_Mutation",
96 | "variables": {"verificationCode": str(mail_token), "emailAddress": mail_address, "phoneNumber": None},
97 | "query": "mutation SignupOrLoginWithCodeSection_signupWithVerificationCodeMutation_Mutation(\n $verificationCode: String!\n $emailAddress: String\n $phoneNumber: String\n) {\n signupWithVerificationCode(verificationCode: $verificationCode, emailAddress: $emailAddress, phoneNumber: $phoneNumber) {\n status\n errorMessage\n }\n}\n",
98 | },
99 | )
100 |
101 | base_string = payload + client.headers["poe-formkey"] + 'WpuLMiXEKKE98j56k'
102 | client.headers["poe-tag-id"] = md5(base_string.encode()).hexdigest()
103 |
104 | response = client.post('https://poe.com/api/gql_POST', data=payload)
105 | if logging:
106 | print('verify_code', response.json())
107 |
108 |
109 | Account.create(proxy='xtekky:wegwgwegwed_streaming-1@geo.iproyal.com:12321', logging=True)
110 |
--------------------------------------------------------------------------------
/unfinished/t3nsor/__init__.py:
--------------------------------------------------------------------------------
1 | from time import time
2 |
3 | from requests import post
4 |
5 | headers = {
6 | 'authority': 'www.t3nsor.tech',
7 | 'accept': '*/*',
8 | 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
9 | 'cache-control': 'no-cache',
10 | 'content-type': 'application/json',
11 | 'origin': 'https://www.t3nsor.tech',
12 | 'pragma': 'no-cache',
13 | 'referer': 'https://www.t3nsor.tech/',
14 | 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
15 | 'sec-ch-ua-mobile': '?0',
16 | 'sec-ch-ua-platform': '"macOS"',
17 | 'sec-fetch-dest': 'empty',
18 | 'sec-fetch-mode': 'cors',
19 | 'sec-fetch-site': 'same-origin',
20 | 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
21 | }
22 |
23 |
24 | class T3nsorResponse:
25 | class Completion:
26 | class Choices:
27 | def __init__(self, choice: dict) -> None:
28 | self.text = choice['text']
29 | self.content = self.text.encode()
30 | self.index = choice['index']
31 | self.logprobs = choice['logprobs']
32 | self.finish_reason = choice['finish_reason']
33 |
34 | def __repr__(self) -> str:
35 | return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
36 |
37 | def __init__(self, choices: dict) -> None:
38 | self.choices = [self.Choices(choice) for choice in choices]
39 |
40 | class Usage:
41 | def __init__(self, usage_dict: dict) -> None:
42 | self.prompt_tokens = usage_dict['prompt_chars']
43 | self.completion_tokens = usage_dict['completion_chars']
44 | self.total_tokens = usage_dict['total_chars']
45 |
46 | def __repr__(self):
47 | return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
48 |
49 | def __init__(self, response_dict: dict) -> None:
50 | self.response_dict = response_dict
51 | self.id = response_dict['id']
52 | self.object = response_dict['object']
53 | self.created = response_dict['created']
54 | self.model = response_dict['model']
55 | self.completion = self.Completion(response_dict['choices'])
56 | self.usage = self.Usage(response_dict['usage'])
57 |
58 | def json(self) -> dict:
59 | return self.response_dict
60 |
61 |
62 | class Completion:
63 | model = {
64 | 'model': {
65 | 'id': 'gpt-3.5-turbo',
66 | 'name': 'Default (GPT-3.5)'
67 | }
68 | }
69 |
70 | def create(
71 | prompt: str = 'hello world',
72 | messages: list = []) -> T3nsorResponse:
73 | response = post('https://www.t3nsor.tech/api/chat', headers=headers, json=Completion.model | {
74 | 'messages': messages,
75 | 'key': '',
76 | 'prompt': prompt
77 | })
78 |
79 | return T3nsorResponse({
80 | 'id': f'cmpl-1337-{int(time())}',
81 | 'object': 'text_completion',
82 | 'created': int(time()),
83 | 'model': Completion.model,
84 | 'choices': [{
85 | 'text': response.text,
86 | 'index': 0,
87 | 'logprobs': None,
88 | 'finish_reason': 'stop'
89 | }],
90 | 'usage': {
91 | 'prompt_chars': len(prompt),
92 | 'completion_chars': len(response.text),
93 | 'total_chars': len(prompt) + len(response.text)
94 | }
95 | })
96 |
97 |
98 | class StreamCompletion:
99 | model = {
100 | 'model': {
101 | 'id': 'gpt-3.5-turbo',
102 | 'name': 'Default (GPT-3.5)'
103 | }
104 | }
105 |
106 | def create(
107 | prompt: str = 'hello world',
108 | messages: list = []) -> T3nsorResponse:
109 | print('t3nsor api is down, this may not work, refer to another module')
110 |
111 | response = post('https://www.t3nsor.tech/api/chat', headers=headers, stream=True, json=Completion.model | {
112 | 'messages': messages,
113 | 'key': '',
114 | 'prompt': prompt
115 | })
116 |
117 | for chunk in response.iter_content(chunk_size=2046):
118 | yield T3nsorResponse({
119 | 'id': f'cmpl-1337-{int(time())}',
120 | 'object': 'text_completion',
121 | 'created': int(time()),
122 | 'model': Completion.model,
123 |
124 | 'choices': [{
125 | 'text': chunk.decode(),
126 | 'index': 0,
127 | 'logprobs': None,
128 | 'finish_reason': 'stop'
129 | }],
130 |
131 | 'usage': {
132 | 'prompt_chars': len(prompt),
133 | 'completion_chars': len(chunk.decode()),
134 | 'total_chars': len(prompt) + len(chunk.decode())
135 | }
136 | })
137 |
--------------------------------------------------------------------------------
/gui/streamlit_chat_app.py:
--------------------------------------------------------------------------------
1 | import atexit
2 | import Levenshtein
3 | import os
4 | import sys
5 |
6 | sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
7 |
8 | import streamlit as st
9 | from streamlit_chat import message
10 | from query_methods import query, avail_query_methods
11 | import pickle
12 |
13 | conversations_file = "conversations.pkl"
14 |
15 | def load_conversations():
16 | try:
17 | with open(conversations_file, "rb") as f:
18 | return pickle.load(f)
19 | except FileNotFoundError:
20 | return []
21 | except EOFError:
22 | return []
23 |
24 |
25 | def save_conversations(conversations, current_conversation):
26 | updated = False
27 | for idx, conversation in enumerate(conversations):
28 | if conversation == current_conversation:
29 | conversations[idx] = current_conversation
30 | updated = True
31 | break
32 | if not updated:
33 | conversations.append(current_conversation)
34 |
35 | temp_conversations_file = "temp_" + conversations_file
36 | with open(temp_conversations_file, "wb") as f:
37 | pickle.dump(conversations, f)
38 |
39 | os.replace(temp_conversations_file, conversations_file)
40 |
41 | def delete_conversation(conversations, current_conversation):
42 | for idx, conversation in enumerate(conversations):
43 | conversations[idx] = current_conversation
44 | break
45 | conversations.remove(current_conversation)
46 |
47 | temp_conversations_file = "temp_" + conversations_file
48 | with open(temp_conversations_file, "wb") as f:
49 | pickle.dump(conversations, f)
50 |
51 | os.replace(temp_conversations_file, conversations_file)
52 |
53 | def exit_handler():
54 | print("Exiting, saving data...")
55 | # Perform cleanup operations here, like saving data or closing open files.
56 | save_conversations(st.session_state.conversations, st.session_state.current_conversation)
57 |
58 |
59 | # Register the exit_handler function to be called when the program is closing.
60 | atexit.register(exit_handler)
61 |
62 | st.header("Chat Placeholder")
63 |
64 | if 'conversations' not in st.session_state:
65 | st.session_state['conversations'] = load_conversations()
66 |
67 | if 'input_text' not in st.session_state:
68 | st.session_state['input_text'] = ''
69 |
70 | if 'selected_conversation' not in st.session_state:
71 | st.session_state['selected_conversation'] = None
72 |
73 | if 'input_field_key' not in st.session_state:
74 | st.session_state['input_field_key'] = 0
75 |
76 | if 'query_method' not in st.session_state:
77 | st.session_state['query_method'] = query
78 |
79 | if 'search_query' not in st.session_state:
80 | st.session_state['search_query'] = ''
81 |
82 | # Initialize new conversation
83 | if 'current_conversation' not in st.session_state or st.session_state['current_conversation'] is None:
84 | st.session_state['current_conversation'] = {'user_inputs': [], 'generated_responses': []}
85 |
86 | input_placeholder = st.empty()
87 | user_input = input_placeholder.text_input(
88 | 'You:', value=st.session_state['input_text'], key=f'input_text_-1'#{st.session_state["input_field_key"]}
89 | )
90 | submit_button = st.button("Submit")
91 |
92 | if (user_input and user_input != st.session_state['input_text']) or submit_button:
93 | output = query(user_input, st.session_state['query_method'])
94 |
95 | escaped_output = output.encode('utf-8').decode('unicode-escape')
96 |
97 | st.session_state['current_conversation']['user_inputs'].append(user_input)
98 | st.session_state.current_conversation['generated_responses'].append(escaped_output)
99 | save_conversations(st.session_state.conversations, st.session_state.current_conversation)
100 | st.session_state['input_text'] = ''
101 | st.session_state['input_field_key'] += 1 # Increment key value for new widget
102 | user_input = input_placeholder.text_input(
103 | 'You:', value=st.session_state['input_text'], key=f'input_text_{st.session_state["input_field_key"]}'
104 | ) # Clear the input field
105 |
106 | # Add a button to create a new conversation
107 | if st.sidebar.button("New Conversation"):
108 | st.session_state['selected_conversation'] = None
109 | st.session_state['current_conversation'] = {'user_inputs': [], 'generated_responses': []}
110 | st.session_state['input_field_key'] += 1 # Increment key value for new widget
111 | st.session_state['query_method'] = st.sidebar.selectbox("Select API:", options=avail_query_methods, index=0)
112 |
113 | # Proxy
114 | st.session_state['proxy'] = st.sidebar.text_input("Proxy: ")
115 |
116 | # Searchbar
117 | search_query = st.sidebar.text_input("Search Conversations:", value=st.session_state.get('search_query', ''), key='search')
118 |
119 | if search_query:
120 | filtered_conversations = []
121 | indices = []
122 | for idx, conversation in enumerate(st.session_state.conversations):
123 | if search_query in conversation['user_inputs'][0]:
124 | filtered_conversations.append(conversation)
125 | indices.append(idx)
126 |
127 | filtered_conversations = list(zip(indices, filtered_conversations))
128 | conversations = sorted(filtered_conversations, key=lambda x: Levenshtein.distance(search_query, x[1]['user_inputs'][0]))
129 |
130 | sidebar_header = f"Search Results ({len(conversations)})"
131 | else:
132 | conversations = st.session_state.conversations
133 | sidebar_header = "Conversation History"
134 |
135 | # Sidebar
136 | st.sidebar.header(sidebar_header)
137 | sidebar_col1, sidebar_col2 = st.sidebar.columns([5,1])
138 | for idx, conversation in enumerate(conversations):
139 | if sidebar_col1.button(f"Conversation {idx + 1}: {conversation['user_inputs'][0]}", key=f"sidebar_btn_{idx}"):
140 | st.session_state['selected_conversation'] = idx
141 | st.session_state['current_conversation'] = conversation
142 | if sidebar_col2.button('🗑️', key=f"sidebar_btn_delete_{idx}"):
143 | if st.session_state['selected_conversation'] == idx:
144 | st.session_state['selected_conversation'] = None
145 | st.session_state['current_conversation'] = {'user_inputs': [], 'generated_responses': []}
146 | delete_conversation(conversations, conversation)
147 | st.experimental_rerun()
148 | if st.session_state['selected_conversation'] is not None:
149 | conversation_to_display = conversations[st.session_state['selected_conversation']]
150 | else:
151 | conversation_to_display = st.session_state.current_conversation
152 |
153 | if conversation_to_display['generated_responses']:
154 | for i in range(len(conversation_to_display['generated_responses']) - 1, -1, -1):
155 | message(conversation_to_display["generated_responses"][i], key=f"display_generated_{i}")
156 | message(conversation_to_display['user_inputs'][i], is_user=True, key=f"display_user_{i}")
--------------------------------------------------------------------------------
/gpt4free/forefront/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pickle
3 | from json import loads
4 | from re import findall
5 | from time import time, sleep
6 | from typing import Generator, Optional
7 | from uuid import uuid4
8 |
9 | from fake_useragent import UserAgent
10 | from pymailtm import MailTm, Message
11 | from requests import post
12 | from tls_client import Session
13 |
14 | from .typing import ForeFrontResponse
15 |
16 |
17 | class Account:
18 | COOKIES_FILE_NAME = 'cookies.pickle'
19 |
20 | @staticmethod
21 | def login(proxy: Optional[str] = None, logging: bool = False) -> str:
22 | if not os.path.isfile(Account.COOKIES_FILE_NAME):
23 | return Account.create(proxy, logging)
24 |
25 | with open(Account.COOKIES_FILE_NAME, 'rb') as f:
26 | cookies = pickle.load(f)
27 | proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else False
28 |
29 | client = Session(client_identifier='chrome110')
30 | client.proxies = proxies
31 | client.cookies.update(cookies)
32 |
33 | if Account.is_cookie_enabled(client):
34 | response = client.get('https://clerk.forefront.ai/v1/client?_clerk_js_version=4.38.4')
35 | return response.json()['response']['sessions'][0]['last_active_token']['jwt']
36 | else:
37 | return Account.create(proxy, logging)
38 |
39 | @staticmethod
40 | def create(proxy: Optional[str] = None, logging: bool = False, save_cookies: bool = False) -> str:
41 | proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else False
42 |
43 | start = time()
44 |
45 | mail_client = MailTm().get_account()
46 | mail_address = mail_client.address
47 |
48 | client = Session(client_identifier='chrome110')
49 | client.proxies = proxies
50 | client.headers = {
51 | 'origin': 'https://accounts.forefront.ai',
52 | 'user-agent': UserAgent().random,
53 | }
54 |
55 | response = client.post(
56 | 'https://clerk.forefront.ai/v1/client/sign_ups?_clerk_js_version=4.38.4',
57 | data={'email_address': mail_address},
58 | )
59 |
60 | try:
61 | trace_token = response.json()['response']['id']
62 | if logging:
63 | print(trace_token)
64 | except KeyError:
65 | return 'Failed to create account!'
66 |
67 | response = client.post(
68 | f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/prepare_verification?_clerk_js_version=4.38.4',
69 | data={'strategy': 'email_link', 'redirect_url': 'https://accounts.forefront.ai/sign-up/verify'},
70 | )
71 |
72 | if logging:
73 | print(response.text)
74 |
75 | if 'sign_up_attempt' not in response.text:
76 | return 'Failed to create account!'
77 |
78 | while True:
79 | sleep(1)
80 | new_message: Message = mail_client.wait_for_message()
81 | if logging:
82 | print(new_message.data['id'])
83 |
84 | verification_url = findall(r'https:\/\/clerk\.forefront\.ai\/v1\/verify\?token=\w.+', new_message.text)[0]
85 |
86 | if verification_url:
87 | break
88 |
89 | if logging:
90 | print(verification_url)
91 |
92 | response = client.get(verification_url)
93 |
94 | response = client.get('https://clerk.forefront.ai/v1/client?_clerk_js_version=4.38.4')
95 |
96 | token = response.json()['response']['sessions'][0]['last_active_token']['jwt']
97 |
98 | if save_cookies:
99 | with open(Account.COOKIES_FILE_NAME, 'wb') as f:
100 | pickle.dump(client.cookies, f)
101 |
102 | with open('accounts.txt', 'a') as f:
103 | f.write(f'{mail_address}:{token}\n')
104 |
105 | if logging:
106 | print(time() - start)
107 |
108 | return token
109 |
110 | @staticmethod
111 | def is_cookie_enabled(client: Session) -> bool:
112 | response = client.get('https://chat.forefront.ai/')
113 | return 'window.startClerk' in response.text
114 |
115 |
116 | class StreamingCompletion:
117 | @staticmethod
118 | def create(
119 | token=None,
120 | chat_id=None,
121 | prompt='',
122 | action_type='new',
123 | default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
124 | model='gpt-4',
125 | proxy=None,
126 | ) -> Generator[ForeFrontResponse, None, None]:
127 | if not token:
128 | raise Exception('Token is required!')
129 | if not chat_id:
130 | chat_id = str(uuid4())
131 |
132 | proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else None
133 |
134 | headers = {
135 | 'authority': 'chat-server.tenant-forefront-default.knative.chi.coreweave.com',
136 | 'accept': '*/*',
137 | 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
138 | 'authorization': 'Bearer ' + token,
139 | 'cache-control': 'no-cache',
140 | 'content-type': 'application/json',
141 | 'origin': 'https://chat.forefront.ai',
142 | 'pragma': 'no-cache',
143 | 'referer': 'https://chat.forefront.ai/',
144 | 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
145 | 'sec-ch-ua-mobile': '?0',
146 | 'sec-ch-ua-platform': '"macOS"',
147 | 'sec-fetch-dest': 'empty',
148 | 'sec-fetch-mode': 'cors',
149 | 'sec-fetch-site': 'cross-site',
150 | 'user-agent': UserAgent().random,
151 | }
152 |
153 | json_data = {
154 | 'text': prompt,
155 | 'action': action_type,
156 | 'parentId': chat_id,
157 | 'workspaceId': chat_id,
158 | 'messagePersona': default_persona,
159 | 'model': model,
160 | }
161 |
162 | for chunk in post(
163 | 'https://chat-server.tenant-forefront-default.knative.chi.coreweave.com/chat',
164 | headers=headers,
165 | proxies=proxies,
166 | json=json_data,
167 | stream=True,
168 | ).iter_lines():
169 | if b'finish_reason":null' in chunk:
170 | data = loads(chunk.decode('utf-8').split('data: ')[1])
171 | token = data['choices'][0]['delta'].get('content')
172 |
173 | if token is not None:
174 | yield ForeFrontResponse(
175 | **{
176 | 'id': chat_id,
177 | 'object': 'text_completion',
178 | 'created': int(time()),
179 | 'text': token,
180 | 'model': model,
181 | 'choices': [{'text': token, 'index': 0, 'logprobs': None, 'finish_reason': 'stop'}],
182 | 'usage': {
183 | 'prompt_tokens': len(prompt),
184 | 'completion_tokens': len(token),
185 | 'total_tokens': len(prompt) + len(token),
186 | },
187 | }
188 | )
189 |
190 |
191 | class Completion:
192 | @staticmethod
193 | def create(
194 | token=None,
195 | chat_id=None,
196 | prompt='',
197 | action_type='new',
198 | default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
199 | model='gpt-4',
200 | proxy=None,
201 | ) -> ForeFrontResponse:
202 | text = ''
203 | final_response = None
204 | for response in StreamingCompletion.create(
205 | token=token,
206 | chat_id=chat_id,
207 | prompt=prompt,
208 | action_type=action_type,
209 | default_persona=default_persona,
210 | model=model,
211 | proxy=proxy,
212 | ):
213 | if response:
214 | final_response = response
215 | text += response.text
216 |
217 | if final_response:
218 | final_response.text = text
219 | else:
220 | raise Exception('Unable to get the response, Please try again')
221 |
222 | return final_response
223 |
--------------------------------------------------------------------------------
/gpt4free/quora/graphql/ChatListPaginationQuery.graphql:
--------------------------------------------------------------------------------
1 | query ChatListPaginationQuery(
2 | $count: Int = 5
3 | $cursor: String
4 | $id: ID!
5 | ) {
6 | node(id: $id) {
7 | __typename
8 | ...ChatPageMain_chat_1G22uz
9 | id
10 | }
11 | }
12 |
13 | fragment BotImage_bot on Bot {
14 | displayName
15 | ...botHelpers_useDeletion_bot
16 | ...BotImage_useProfileImage_bot
17 | }
18 |
19 | fragment BotImage_useProfileImage_bot on Bot {
20 | image {
21 | __typename
22 | ... on LocalBotImage {
23 | localName
24 | }
25 | ... on UrlBotImage {
26 | url
27 | }
28 | }
29 | ...botHelpers_useDeletion_bot
30 | }
31 |
32 | fragment ChatMessageDownvotedButton_message on Message {
33 | ...MessageFeedbackReasonModal_message
34 | ...MessageFeedbackOtherModal_message
35 | }
36 |
37 | fragment ChatMessageDropdownMenu_message on Message {
38 | id
39 | messageId
40 | vote
41 | text
42 | author
43 | ...chatHelpers_isBotMessage
44 | }
45 |
46 | fragment ChatMessageFeedbackButtons_message on Message {
47 | id
48 | messageId
49 | vote
50 | voteReason
51 | ...ChatMessageDownvotedButton_message
52 | }
53 |
54 | fragment ChatMessageInputView_chat on Chat {
55 | id
56 | chatId
57 | defaultBotObject {
58 | nickname
59 | messageLimit {
60 | dailyBalance
61 | shouldShowRemainingMessageCount
62 | }
63 | hasClearContext
64 | isDown
65 | ...botHelpers_useDeletion_bot
66 | id
67 | }
68 | shouldShowDisclaimer
69 | ...chatHelpers_useSendMessage_chat
70 | ...chatHelpers_useSendChatBreak_chat
71 | }
72 |
73 | fragment ChatMessageInputView_edges on MessageEdge {
74 | node {
75 | ...chatHelpers_isChatBreak
76 | ...chatHelpers_isHumanMessage
77 | state
78 | text
79 | id
80 | }
81 | }
82 |
83 | fragment ChatMessageOverflowButton_message on Message {
84 | text
85 | ...ChatMessageDropdownMenu_message
86 | ...chatHelpers_isBotMessage
87 | }
88 |
89 | fragment ChatMessageSuggestedReplies_SuggestedReplyButton_chat on Chat {
90 | ...chatHelpers_useSendMessage_chat
91 | }
92 |
93 | fragment ChatMessageSuggestedReplies_SuggestedReplyButton_message on Message {
94 | messageId
95 | }
96 |
97 | fragment ChatMessageSuggestedReplies_chat on Chat {
98 | ...ChatWelcomeView_chat
99 | ...ChatMessageSuggestedReplies_SuggestedReplyButton_chat
100 | defaultBotObject {
101 | hasWelcomeTopics
102 | id
103 | }
104 | }
105 |
106 | fragment ChatMessageSuggestedReplies_message on Message {
107 | suggestedReplies
108 | ...ChatMessageSuggestedReplies_SuggestedReplyButton_message
109 | }
110 |
111 | fragment ChatMessage_chat on Chat {
112 | defaultBotObject {
113 | hasWelcomeTopics
114 | hasSuggestedReplies
115 | disclaimerText
116 | messageLimit {
117 | ...ChatPageRateLimitedBanner_messageLimit
118 | }
119 | ...ChatPageDisclaimer_bot
120 | id
121 | }
122 | ...ChatMessageSuggestedReplies_chat
123 | ...ChatWelcomeView_chat
124 | }
125 |
126 | fragment ChatMessage_message on Message {
127 | id
128 | messageId
129 | text
130 | author
131 | linkifiedText
132 | state
133 | contentType
134 | ...ChatMessageSuggestedReplies_message
135 | ...ChatMessageFeedbackButtons_message
136 | ...ChatMessageOverflowButton_message
137 | ...chatHelpers_isHumanMessage
138 | ...chatHelpers_isBotMessage
139 | ...chatHelpers_isChatBreak
140 | ...chatHelpers_useTimeoutLevel
141 | ...MarkdownLinkInner_message
142 | ...IdAnnotation_node
143 | }
144 |
145 | fragment ChatMessagesView_chat on Chat {
146 | ...ChatMessage_chat
147 | ...ChatWelcomeView_chat
148 | ...IdAnnotation_node
149 | defaultBotObject {
150 | hasWelcomeTopics
151 | messageLimit {
152 | ...ChatPageRateLimitedBanner_messageLimit
153 | }
154 | id
155 | }
156 | }
157 |
158 | fragment ChatMessagesView_edges on MessageEdge {
159 | node {
160 | id
161 | messageId
162 | creationTime
163 | ...ChatMessage_message
164 | ...chatHelpers_isBotMessage
165 | ...chatHelpers_isHumanMessage
166 | ...chatHelpers_isChatBreak
167 | }
168 | }
169 |
170 | fragment ChatPageDeleteFooter_chat on Chat {
171 | ...MessageDeleteConfirmationModal_chat
172 | }
173 |
174 | fragment ChatPageDisclaimer_bot on Bot {
175 | disclaimerText
176 | }
177 |
178 | fragment ChatPageMainFooter_chat on Chat {
179 | defaultBotObject {
180 | ...ChatPageMainFooter_useAccessMessage_bot
181 | id
182 | }
183 | ...ChatMessageInputView_chat
184 | ...ChatPageShareFooter_chat
185 | ...ChatPageDeleteFooter_chat
186 | }
187 |
188 | fragment ChatPageMainFooter_edges on MessageEdge {
189 | ...ChatMessageInputView_edges
190 | }
191 |
192 | fragment ChatPageMainFooter_useAccessMessage_bot on Bot {
193 | ...botHelpers_useDeletion_bot
194 | ...botHelpers_useViewerCanAccessPrivateBot
195 | }
196 |
197 | fragment ChatPageMain_chat_1G22uz on Chat {
198 | id
199 | chatId
200 | ...ChatPageShareFooter_chat
201 | ...ChatPageDeleteFooter_chat
202 | ...ChatMessagesView_chat
203 | ...MarkdownLinkInner_chat
204 | ...chatHelpers_useUpdateStaleChat_chat
205 | ...ChatSubscriptionPaywallContextWrapper_chat
206 | ...ChatPageMainFooter_chat
207 | messagesConnection(last: $count, before: $cursor) {
208 | edges {
209 | ...ChatMessagesView_edges
210 | ...ChatPageMainFooter_edges
211 | ...MarkdownLinkInner_edges
212 | node {
213 | ...chatHelpers_useUpdateStaleChat_message
214 | id
215 | __typename
216 | }
217 | cursor
218 | id
219 | }
220 | pageInfo {
221 | hasPreviousPage
222 | startCursor
223 | }
224 | id
225 | }
226 | }
227 |
228 | fragment ChatPageRateLimitedBanner_messageLimit on MessageLimit {
229 | numMessagesRemaining
230 | }
231 |
232 | fragment ChatPageShareFooter_chat on Chat {
233 | chatId
234 | }
235 |
236 | fragment ChatSubscriptionPaywallContextWrapper_chat on Chat {
237 | defaultBotObject {
238 | messageLimit {
239 | numMessagesRemaining
240 | shouldShowRemainingMessageCount
241 | }
242 | ...SubscriptionPaywallModal_bot
243 | id
244 | }
245 | }
246 |
247 | fragment ChatWelcomeView_ChatWelcomeButton_chat on Chat {
248 | ...chatHelpers_useSendMessage_chat
249 | }
250 |
251 | fragment ChatWelcomeView_chat on Chat {
252 | ...ChatWelcomeView_ChatWelcomeButton_chat
253 | defaultBotObject {
254 | displayName
255 | id
256 | }
257 | }
258 |
259 | fragment IdAnnotation_node on Node {
260 | __isNode: __typename
261 | id
262 | }
263 |
264 | fragment MarkdownLinkInner_chat on Chat {
265 | id
266 | chatId
267 | defaultBotObject {
268 | nickname
269 | id
270 | }
271 | ...chatHelpers_useSendMessage_chat
272 | }
273 |
274 | fragment MarkdownLinkInner_edges on MessageEdge {
275 | node {
276 | state
277 | id
278 | }
279 | }
280 |
281 | fragment MarkdownLinkInner_message on Message {
282 | messageId
283 | }
284 |
285 | fragment MessageDeleteConfirmationModal_chat on Chat {
286 | id
287 | }
288 |
289 | fragment MessageFeedbackOtherModal_message on Message {
290 | id
291 | messageId
292 | }
293 |
294 | fragment MessageFeedbackReasonModal_message on Message {
295 | id
296 | messageId
297 | }
298 |
299 | fragment SubscriptionPaywallModal_bot on Bot {
300 | displayName
301 | messageLimit {
302 | dailyLimit
303 | numMessagesRemaining
304 | shouldShowRemainingMessageCount
305 | resetTime
306 | }
307 | ...BotImage_bot
308 | }
309 |
310 | fragment botHelpers_useDeletion_bot on Bot {
311 | deletionState
312 | }
313 |
314 | fragment botHelpers_useViewerCanAccessPrivateBot on Bot {
315 | isPrivateBot
316 | viewerIsCreator
317 | }
318 |
319 | fragment chatHelpers_isBotMessage on Message {
320 | ...chatHelpers_isHumanMessage
321 | ...chatHelpers_isChatBreak
322 | }
323 |
324 | fragment chatHelpers_isChatBreak on Message {
325 | author
326 | }
327 |
328 | fragment chatHelpers_isHumanMessage on Message {
329 | author
330 | }
331 |
332 | fragment chatHelpers_useSendChatBreak_chat on Chat {
333 | id
334 | chatId
335 | defaultBotObject {
336 | nickname
337 | introduction
338 | model
339 | id
340 | }
341 | shouldShowDisclaimer
342 | }
343 |
344 | fragment chatHelpers_useSendMessage_chat on Chat {
345 | id
346 | chatId
347 | defaultBotObject {
348 | id
349 | nickname
350 | }
351 | shouldShowDisclaimer
352 | }
353 |
354 | fragment chatHelpers_useTimeoutLevel on Message {
355 | id
356 | state
357 | text
358 | messageId
359 | chat {
360 | chatId
361 | defaultBotNickname
362 | id
363 | }
364 | }
365 |
366 | fragment chatHelpers_useUpdateStaleChat_chat on Chat {
367 | chatId
368 | defaultBotObject {
369 | contextClearWindowSecs
370 | id
371 | }
372 | ...chatHelpers_useSendChatBreak_chat
373 | }
374 |
375 | fragment chatHelpers_useUpdateStaleChat_message on Message {
376 | creationTime
377 | ...chatHelpers_isChatBreak
378 | }
379 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Due to legal and personal issues, the development speed of this Repository may slow down over the next one to two weeks. I apologize for any inconvenience this may cause. I have been putting a lot of effort into this small personal/educational project, and it is now on the verge of being taken down.
2 |
3 | You may join our discord: discord.gg/gpt4free for further updates. 
4 |
5 |
6 |
7 |
8 | ## Legal Notice
9 |
10 | This repository is _not_ associated with or endorsed by providers of the APIs contained in this GitHub repository. This project is intended **for educational purposes only**. This is just a little personal project. Sites may contact me to improve their security or request the removal of their site from this repository.
11 |
12 | Please note the following:
13 |
14 | 1. **Disclaimer**: The APIs, services, and trademarks mentioned in this repository belong to their respective owners. This project is _not_ claiming any right over them nor is it affiliated with or endorsed by any of the providers mentioned.
15 | <
16 | 2. **Responsibility**: The author of this repository is _not_ responsible for any consequences, damages, or losses arising from the use or misuse of this repository or the content provided by the third-party APIs. Users are solely responsible for their actions and any repercussions that may follow.
17 |
18 | 3. **Educational Purposes Only**: This repository and its content are provided strictly for educational purposes. By using the information and code provided, users acknowledge that they are using the APIs and models at their own risk and agree to comply with any applicable laws and regulations.
19 |
20 | 4. **Copyright**: All content in this repository, including but not limited to code, images, and documentation, is the intellectual property of the repository author, unless otherwise stated. Unauthorized copying, distribution, or use of any content in this repository is strictly prohibited without the express written consent of the repository author.
21 |
22 | 5. **Indemnification**: Users agree to indemnify, defend, and hold harmless the author of this repository from and against any and all claims, liabilities, damages, losses, or expenses, including legal fees and costs, arising out of or in any way connected with their use or misuse of this repository, its content, or related third-party APIs.
23 |
24 | 6. **Updates and Changes**: The author reserves the right to modify, update, or remove any content, information, or features in this repository at any time without prior notice. Users are responsible for regularly reviewing the content and any changes made to this repository.
25 |
26 | By using this repository or any code related to it, you agree to these terms. The author is not responsible for any copies, forks, or reuploads made by other users. This is the author's only account and repository. To prevent impersonation or irresponsible actions, you may comply with the GNU GPL license this Repository uses.
27 |
28 |
29 |
30 |
31 | Just API's from some language model sites.
32 |
33 |
34 | # Related gpt4free projects
35 |
36 |
37 |
38 |
39 | | 🎁 Projects |
40 | ⭐ Stars |
41 | 📚 Forks |
42 | 🛎 Issues |
43 | 📬 Pull requests |
44 |
45 |
46 |
47 |
48 | | gpt4free |
49 |  |
50 |  |
51 |  |
52 |  |
53 |
54 |
55 | | ChatGPT-Clone |
56 |  |
57 |  |
58 |  |
59 |  |
60 |
61 |
62 | | ChatGpt Discord Bot |
63 |  |
64 |  |
65 |  |
66 |  |
67 |
68 |
69 |
70 |
71 |
72 | ## Table of Contents
73 | | Section | Description | Link | Status |
74 | | ------- | ----------- | ---- | ------ |
75 | | **To do list** | List of tasks to be done | [](#todo) | - |
76 | | **Current Sites** | Current websites or platforms that can be used as APIs | [](#current-sites) | - |
77 | | **Best Sites for gpt4** | Recommended websites or platforms for gpt4 | [](#best-sites) | - |
78 | | **Streamlit GPT4Free GUI** | Web-based graphical user interface for interacting with gpt4free | [](#streamlit-gpt4free-gui) | - |
79 | | **Docker** | Instructions on how to run gpt4free in a Docker container | [](#docker-instructions) | - |
80 | | **ChatGPT clone** | A ChatGPT clone with new features and scalability | [](https://chat.chatbot.sex/chat) | - |
81 | | **How to install** | Instructions on how to install gpt4free | [](#install) | - |
82 | | **Usage Examples** | | | |
83 | | `theb` | Example usage for theb (gpt-3.5) | [](gpt4free/theb/README.md) |  |
84 | | `forefront` | Example usage for forefront (gpt-4) | [](gpt4free/forefront/README.md) |  | ||
85 | | `quora (poe)` | Example usage for quora | [](gpt4free/quora/README.md) |  |
86 | | `you` | Example usage for you | [](gpt4free/you/README.md) |  |
87 | | **Try it Out** | | | |
88 | | Google Colab Jupyter Notebook | Example usage for gpt4free | [](https://colab.research.google.com/github/DanielShemesh/gpt4free-colab/blob/main/gpt4free.ipynb) | - |
89 | | replit Example (feel free to fork this repl) | Example usage for gpt4free | [](https://replit.com/@gpt4free/gpt4free-webui) | - |
90 | | **Legal Notice** | Legal notice or disclaimer | [](#legal-notice) | - |
91 | | **Copyright** | Copyright information | [](#copyright) | - |
92 | | **Star History** | Star History | [](#star-history) | - |
93 |
94 |
95 | ## To do list
96 |
97 | - [x] Add a GUI for the repo
98 | - [ ] Make a general package named `gpt4free`, instead of different folders
99 | - [ ] Live api status to know which are down and which can be used
100 | - [ ] Integrate more API's in `./unfinished` as well as other ones in the lists
101 | - [ ] Make an API to use as proxy for other projects
102 | - [ ] Make a pypi package
103 |
104 | ## Current Sites
105 |
106 | | Website s | Model(s) |
107 | | ------------------------------------------------ | -------------------------------- |
108 | | [forefront.ai](https://chat.forefront.ai) | GPT-4/3.5 |
109 | | [poe.com](https://poe.com) | GPT-4/3.5 |
110 | | [writesonic.com](https://writesonic.com) | GPT-3.5 / Internet |
111 | | [t3nsor.com](https://t3nsor.com) | GPT-3.5 |
112 | | [you.com](https://you.com) | GPT-3.5 / Internet / good search |
113 | | [sqlchat.ai](https://sqlchat.ai) | GPT-3.5 |
114 | | [bard.google.com](https://bard.google.com) | custom / search |
115 | | [bing.com/chat](https://bing.com/chat) | GPT-4/3.5 |
116 | | [italygpt.it](https://italygpt.it) | GPT-3.5 |
117 |
118 | ## Best sites
119 |
120 | #### gpt-4
121 |
122 | - [`/forefront`](gpt4free/forefront/README.md)
123 |
124 | #### gpt-3.5
125 |
126 | - [`/you`](gpt4free/you/README.md)
127 |
128 | ## Install
129 |
130 | Download or clone this GitHub repo
131 | install requirements with:
132 |
133 | ```sh
134 | pip3 install -r requirements.txt
135 | ```
136 |
137 |
138 | ## To start gpt4free GUI
139 |
140 | Move `streamlit_app.py` from `./gui` to the base folder then run:
141 | `streamlit run streamlit_app.py` or `python3 -m streamlit run streamlit_app.py`
142 |
143 | ## Docker
144 |
145 | Build
146 |
147 | ```
148 | docker build -t gpt4free:latest .
149 | ```
150 |
151 | Run
152 |
153 | ```
154 | docker run -p 8501:8501 gpt4free:latest
155 | ```
156 |
157 | ## Deploy using docker-compose
158 |
159 | Run the following:
160 |
161 | ```
162 | docker-compose up --build -d
163 | ```
164 |
165 | ## ChatGPT clone
166 |
167 | > Currently implementing new features and trying to scale it, please be patient it may be unstable
168 | > https://chat.g4f.ai/chat
169 | > This site was developed by me and includes **gpt-4/3.5**, **internet access** and **gpt-jailbreak's** like DAN
170 | > Run locally here: https://github.com/xtekky/chatgpt-clone
171 |
172 | ## Copyright:
173 |
174 | This program is licensed under the [GNU GPL v3](https://www.gnu.org/licenses/gpl-3.0.txt)
175 |
176 | Most code, with the exception of `quora/api.py` (by [ading2210](https://github.com/ading2210)), has been written by me, [xtekky](https://github.com/xtekky).
177 |
178 | ### Copyright Notice:
179 |
180 | ```
181 | xtekky/gpt4free: multiple reverse engineered language-model api's to decentralise the ai industry.
182 | Copyright (C) 2023 xtekky
183 |
184 | This program is free software: you can redistribute it and/or modify
185 | it under the terms of the GNU General Public License as published by
186 | the Free Software Foundation, either version 3 of the License, or
187 | (at your option) any later version.
188 |
189 | This program is distributed in the hope that it will be useful,
190 | but WITHOUT ANY WARRANTY; without even the implied warranty of
191 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
192 | GNU General Public License for more details.
193 |
194 | You should have received a copy of the GNU General Public License
195 | along with this program. If not, see .
196 | ```
197 |
198 |
199 | ## Star History
200 |
201 |
202 |
203 |
204 |
--------------------------------------------------------------------------------
/gpt4free/quora/api.py:
--------------------------------------------------------------------------------
1 | # This file was taken from the repository poe-api https://github.com/ading2210/poe-api and is unmodified
2 | # This file is licensed under the GNU GPL v3 and written by @ading2210
3 |
4 | # license:
5 | # ading2210/poe-api: a reverse engineered Python API wrapepr for Quora's Poe
6 | # Copyright (C) 2023 ading2210
7 |
8 | # This program is free software: you can redistribute it and/or modify
9 | # it under the terms of the GNU General Public License as published by
10 | # the Free Software Foundation, either version 3 of the License, or
11 | # (at your option) any later version.
12 |
13 | # This program is distributed in the hope that it will be useful,
14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 | # GNU General Public License for more details.
17 |
18 | # You should have received a copy of the GNU General Public License
19 | # along with this program. If not, see .
20 |
21 | import hashlib
22 | import json
23 | import logging
24 | import queue
25 | import random
26 | import re
27 | import threading
28 | import time
29 | import traceback
30 | from pathlib import Path
31 | from urllib.parse import urlparse
32 |
33 | import requests
34 | import requests.adapters
35 | import websocket
36 |
37 | parent_path = Path(__file__).resolve().parent
38 | queries_path = parent_path / "graphql"
39 | queries = {}
40 |
41 | logging.basicConfig()
42 | logger = logging.getLogger()
43 |
44 | user_agent = "Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 Firefox/102.0"
45 |
46 |
47 | def load_queries():
48 | for path in queries_path.iterdir():
49 | if path.suffix != ".graphql":
50 | continue
51 | with open(path) as f:
52 | queries[path.stem] = f.read()
53 |
54 |
55 | def generate_payload(query_name, variables):
56 | return {"query": queries[query_name], "variables": variables}
57 |
58 |
59 | def request_with_retries(method, *args, **kwargs):
60 | attempts = kwargs.get("attempts") or 10
61 | url = args[0]
62 | for i in range(attempts):
63 | r = method(*args, **kwargs)
64 | if r.status_code == 200:
65 | return r
66 | logger.warn(
67 | f"Server returned a status code of {r.status_code} while downloading {url}. Retrying ({i + 1}/{attempts})..."
68 | )
69 |
70 | raise RuntimeError(f"Failed to download {url} too many times.")
71 |
72 |
73 | class Client:
74 | gql_url = "https://poe.com/api/gql_POST"
75 | gql_recv_url = "https://poe.com/api/receive_POST"
76 | home_url = "https://poe.com"
77 | settings_url = "https://poe.com/api/settings"
78 |
79 | def __init__(self, token, proxy=None):
80 | self.proxy = proxy
81 | self.session = requests.Session()
82 | self.adapter = requests.adapters.HTTPAdapter(pool_connections=100, pool_maxsize=100)
83 | self.session.mount("http://", self.adapter)
84 | self.session.mount("https://", self.adapter)
85 |
86 | if proxy:
87 | self.session.proxies = {"http": self.proxy, "https": self.proxy}
88 | logger.info(f"Proxy enabled: {self.proxy}")
89 |
90 | self.active_messages = {}
91 | self.message_queues = {}
92 |
93 | self.session.cookies.set("p-b", token, domain="poe.com")
94 | self.headers = {
95 | "User-Agent": user_agent,
96 | "Referrer": "https://poe.com/",
97 | "Origin": "https://poe.com",
98 | }
99 | self.session.headers.update(self.headers)
100 |
101 | self.setup_connection()
102 | self.connect_ws()
103 |
104 | def setup_connection(self):
105 | self.ws_domain = f"tch{random.randint(1, 1e6)}"
106 | self.next_data = self.get_next_data(overwrite_vars=True)
107 | self.channel = self.get_channel_data()
108 | self.bots = self.get_bots(download_next_data=False)
109 | self.bot_names = self.get_bot_names()
110 |
111 | self.gql_headers = {
112 | "poe-formkey": self.formkey,
113 | "poe-tchannel": self.channel["channel"],
114 | }
115 | self.gql_headers = {**self.gql_headers, **self.headers}
116 | self.subscribe()
117 |
118 | def extract_formkey(self, html):
119 | script_regex = r""
120 | script_text = re.search(script_regex, html).group(1)
121 | key_regex = r'var .="([0-9a-f]+)",'
122 | key_text = re.search(key_regex, script_text).group(1)
123 | cipher_regex = r".\[(\d+)\]=.\[(\d+)\]"
124 | cipher_pairs = re.findall(cipher_regex, script_text)
125 |
126 | formkey_list = [""] * len(cipher_pairs)
127 | for pair in cipher_pairs:
128 | formkey_index, key_index = map(int, pair)
129 | formkey_list[formkey_index] = key_text[key_index]
130 | formkey = "".join(formkey_list)
131 |
132 | return formkey
133 |
134 | def get_next_data(self, overwrite_vars=False):
135 | logger.info("Downloading next_data...")
136 |
137 | r = request_with_retries(self.session.get, self.home_url)
138 | json_regex = r''
139 | json_text = re.search(json_regex, r.text).group(1)
140 | next_data = json.loads(json_text)
141 |
142 | if overwrite_vars:
143 | self.formkey = self.extract_formkey(r.text)
144 | self.viewer = next_data["props"]["pageProps"]["payload"]["viewer"]
145 | self.next_data = next_data
146 |
147 | return next_data
148 |
149 | def get_bot(self, display_name):
150 | url = f'https://poe.com/_next/data/{self.next_data["buildId"]}/{display_name}.json'
151 |
152 | r = request_with_retries(self.session.get, url)
153 |
154 | chat_data = r.json()["pageProps"]["payload"]["chatOfBotDisplayName"]
155 | return chat_data
156 |
157 | def get_bots(self, download_next_data=True):
158 | logger.info("Downloading all bots...")
159 | if download_next_data:
160 | next_data = self.get_next_data(overwrite_vars=True)
161 | else:
162 | next_data = self.next_data
163 |
164 | if not "availableBots" in self.viewer:
165 | raise RuntimeError("Invalid token or no bots are available.")
166 | bot_list = self.viewer["availableBots"]
167 |
168 | threads = []
169 | bots = {}
170 |
171 | def get_bot_thread(bot):
172 | chat_data = self.get_bot(bot["displayName"])
173 | bots[chat_data["defaultBotObject"]["nickname"]] = chat_data
174 |
175 | for bot in bot_list:
176 | thread = threading.Thread(target=get_bot_thread, args=(bot,), daemon=True)
177 | threads.append(thread)
178 |
179 | for thread in threads:
180 | thread.start()
181 | for thread in threads:
182 | thread.join()
183 |
184 | self.bots = bots
185 | self.bot_names = self.get_bot_names()
186 | return bots
187 |
188 | def get_bot_names(self):
189 | bot_names = {}
190 | for bot_nickname in self.bots:
191 | bot_obj = self.bots[bot_nickname]["defaultBotObject"]
192 | bot_names[bot_nickname] = bot_obj["displayName"]
193 | return bot_names
194 |
195 | def get_remaining_messages(self, chatbot):
196 | chat_data = self.get_bot(self.bot_names[chatbot])
197 | return chat_data["defaultBotObject"]["messageLimit"]["numMessagesRemaining"]
198 |
199 | def get_channel_data(self, channel=None):
200 | logger.info("Downloading channel data...")
201 | r = request_with_retries(self.session.get, self.settings_url)
202 | data = r.json()
203 |
204 | return data["tchannelData"]
205 |
206 | def get_websocket_url(self, channel=None):
207 | if channel is None:
208 | channel = self.channel
209 | query = f'?min_seq={channel["minSeq"]}&channel={channel["channel"]}&hash={channel["channelHash"]}'
210 | return f'wss://{self.ws_domain}.tch.{channel["baseHost"]}/up/{channel["boxName"]}/updates' + query
211 |
212 | def send_query(self, query_name, variables):
213 | for i in range(20):
214 | json_data = generate_payload(query_name, variables)
215 | payload = json.dumps(json_data, separators=(",", ":"))
216 |
217 | base_string = payload + self.gql_headers["poe-formkey"] + "WpuLMiXEKKE98j56k"
218 |
219 | headers = {
220 | "content-type": "application/json",
221 | "poe-tag-id": hashlib.md5(base_string.encode()).hexdigest(),
222 | }
223 | headers = {**self.gql_headers, **headers}
224 |
225 | r = request_with_retries(self.session.post, self.gql_url, data=payload, headers=headers)
226 |
227 | data = r.json()
228 | if data["data"] is None:
229 | logger.warn(f'{query_name} returned an error: {data["errors"][0]["message"]} | Retrying ({i + 1}/20)')
230 | time.sleep(2)
231 | continue
232 |
233 | return r.json()
234 |
235 | raise RuntimeError(f"{query_name} failed too many times.")
236 |
237 | def subscribe(self):
238 | logger.info("Subscribing to mutations")
239 | result = self.send_query(
240 | "SubscriptionsMutation",
241 | {
242 | "subscriptions": [
243 | {
244 | "subscriptionName": "messageAdded",
245 | "query": queries["MessageAddedSubscription"],
246 | },
247 | {
248 | "subscriptionName": "viewerStateUpdated",
249 | "query": queries["ViewerStateUpdatedSubscription"],
250 | },
251 | ]
252 | },
253 | )
254 |
255 | def ws_run_thread(self):
256 | kwargs = {}
257 | if self.proxy:
258 | proxy_parsed = urlparse(self.proxy)
259 | kwargs = {
260 | "proxy_type": proxy_parsed.scheme,
261 | "http_proxy_host": proxy_parsed.hostname,
262 | "http_proxy_port": proxy_parsed.port,
263 | }
264 |
265 | self.ws.run_forever(**kwargs)
266 |
267 | def connect_ws(self):
268 | self.ws_connected = False
269 | self.ws = websocket.WebSocketApp(
270 | self.get_websocket_url(),
271 | header={"User-Agent": user_agent},
272 | on_message=self.on_message,
273 | on_open=self.on_ws_connect,
274 | on_error=self.on_ws_error,
275 | on_close=self.on_ws_close,
276 | )
277 | t = threading.Thread(target=self.ws_run_thread, daemon=True)
278 | t.start()
279 | while not self.ws_connected:
280 | time.sleep(0.01)
281 |
282 | def disconnect_ws(self):
283 | if self.ws:
284 | self.ws.close()
285 | self.ws_connected = False
286 |
287 | def on_ws_connect(self, ws):
288 | self.ws_connected = True
289 |
290 | def on_ws_close(self, ws, close_status_code, close_message):
291 | self.ws_connected = False
292 | logger.warn(f"Websocket closed with status {close_status_code}: {close_message}")
293 |
294 | def on_ws_error(self, ws, error):
295 | self.disconnect_ws()
296 | self.connect_ws()
297 |
298 | def on_message(self, ws, msg):
299 | try:
300 | data = json.loads(msg)
301 |
302 | if not "messages" in data:
303 | return
304 |
305 | for message_str in data["messages"]:
306 | message_data = json.loads(message_str)
307 | if message_data["message_type"] != "subscriptionUpdate":
308 | continue
309 | message = message_data["payload"]["data"]["messageAdded"]
310 |
311 | copied_dict = self.active_messages.copy()
312 | for key, value in copied_dict.items():
313 | # add the message to the appropriate queue
314 | if value == message["messageId"] and key in self.message_queues:
315 | self.message_queues[key].put(message)
316 | return
317 |
318 | # indicate that the response id is tied to the human message id
319 | elif key != "pending" and value is None and message["state"] != "complete":
320 | self.active_messages[key] = message["messageId"]
321 | self.message_queues[key].put(message)
322 | return
323 |
324 | except Exception:
325 | logger.error(traceback.format_exc())
326 | self.disconnect_ws()
327 | self.connect_ws()
328 |
329 | def send_message(self, chatbot, message, with_chat_break=False, timeout=20):
330 | # if there is another active message, wait until it has finished sending
331 | while None in self.active_messages.values():
332 | time.sleep(0.01)
333 |
334 | # None indicates that a message is still in progress
335 | self.active_messages["pending"] = None
336 |
337 | logger.info(f"Sending message to {chatbot}: {message}")
338 |
339 | # reconnect websocket
340 | if not self.ws_connected:
341 | self.disconnect_ws()
342 | self.setup_connection()
343 | self.connect_ws()
344 |
345 | message_data = self.send_query(
346 | "SendMessageMutation",
347 | {
348 | "bot": chatbot,
349 | "query": message,
350 | "chatId": self.bots[chatbot]["chatId"],
351 | "source": None,
352 | "withChatBreak": with_chat_break,
353 | },
354 | )
355 | del self.active_messages["pending"]
356 |
357 | if not message_data["data"]["messageEdgeCreate"]["message"]:
358 | raise RuntimeError(f"Daily limit reached for {chatbot}.")
359 | try:
360 | human_message = message_data["data"]["messageEdgeCreate"]["message"]
361 | human_message_id = human_message["node"]["messageId"]
362 | except TypeError:
363 | raise RuntimeError(f"An unknown error occurred. Raw response data: {message_data}")
364 |
365 | # indicate that the current message is waiting for a response
366 | self.active_messages[human_message_id] = None
367 | self.message_queues[human_message_id] = queue.Queue()
368 |
369 | last_text = ""
370 | message_id = None
371 | while True:
372 | try:
373 | message = self.message_queues[human_message_id].get(timeout=timeout)
374 | except queue.Empty:
375 | del self.active_messages[human_message_id]
376 | del self.message_queues[human_message_id]
377 | raise RuntimeError("Response timed out.")
378 |
379 | # only break when the message is marked as complete
380 | if message["state"] == "complete":
381 | if last_text and message["messageId"] == message_id:
382 | break
383 | else:
384 | continue
385 |
386 | # update info about response
387 | message["text_new"] = message["text"][len(last_text) :]
388 | last_text = message["text"]
389 | message_id = message["messageId"]
390 |
391 | yield message
392 |
393 | del self.active_messages[human_message_id]
394 | del self.message_queues[human_message_id]
395 |
396 | def send_chat_break(self, chatbot):
397 | logger.info(f"Sending chat break to {chatbot}")
398 | result = self.send_query("AddMessageBreakMutation", {"chatId": self.bots[chatbot]["chatId"]})
399 | return result["data"]["messageBreakCreate"]["message"]
400 |
401 | def get_message_history(self, chatbot, count=25, cursor=None):
402 | logger.info(f"Downloading {count} messages from {chatbot}")
403 |
404 | messages = []
405 | if cursor is None:
406 | chat_data = self.get_bot(self.bot_names[chatbot])
407 | if not chat_data["messagesConnection"]["edges"]:
408 | return []
409 | messages = chat_data["messagesConnection"]["edges"][:count]
410 | cursor = chat_data["messagesConnection"]["pageInfo"]["startCursor"]
411 | count -= len(messages)
412 |
413 | cursor = str(cursor)
414 | if count > 50:
415 | messages = self.get_message_history(chatbot, count=50, cursor=cursor) + messages
416 | while count > 0:
417 | count -= 50
418 | new_cursor = messages[0]["cursor"]
419 | new_messages = self.get_message_history(chatbot, min(50, count), cursor=new_cursor)
420 | messages = new_messages + messages
421 | return messages
422 | elif count <= 0:
423 | return messages
424 |
425 | result = self.send_query(
426 | "ChatListPaginationQuery",
427 | {"count": count, "cursor": cursor, "id": self.bots[chatbot]["id"]},
428 | )
429 | query_messages = result["data"]["node"]["messagesConnection"]["edges"]
430 | messages = query_messages + messages
431 | return messages
432 |
433 | def delete_message(self, message_ids):
434 | logger.info(f"Deleting messages: {message_ids}")
435 | if not type(message_ids) is list:
436 | message_ids = [int(message_ids)]
437 |
438 | result = self.send_query("DeleteMessageMutation", {"messageIds": message_ids})
439 |
440 | def purge_conversation(self, chatbot, count=-1):
441 | logger.info(f"Purging messages from {chatbot}")
442 | last_messages = self.get_message_history(chatbot, count=50)[::-1]
443 | while last_messages:
444 | message_ids = []
445 | for message in last_messages:
446 | if count == 0:
447 | break
448 | count -= 1
449 | message_ids.append(message["node"]["messageId"])
450 |
451 | self.delete_message(message_ids)
452 |
453 | if count == 0:
454 | return
455 | last_messages = self.get_message_history(chatbot, count=50)[::-1]
456 | logger.info(f"No more messages left to delete.")
457 |
458 | def create_bot(
459 | self,
460 | handle,
461 | prompt="",
462 | base_model="chinchilla",
463 | description="",
464 | intro_message="",
465 | api_key=None,
466 | api_bot=False,
467 | api_url=None,
468 | prompt_public=True,
469 | pfp_url=None,
470 | linkification=False,
471 | markdown_rendering=True,
472 | suggested_replies=False,
473 | private=False,
474 | ):
475 | result = self.send_query(
476 | "PoeBotCreateMutation",
477 | {
478 | "model": base_model,
479 | "handle": handle,
480 | "prompt": prompt,
481 | "isPromptPublic": prompt_public,
482 | "introduction": intro_message,
483 | "description": description,
484 | "profilePictureUrl": pfp_url,
485 | "apiUrl": api_url,
486 | "apiKey": api_key,
487 | "isApiBot": api_bot,
488 | "hasLinkification": linkification,
489 | "hasMarkdownRendering": markdown_rendering,
490 | "hasSuggestedReplies": suggested_replies,
491 | "isPrivateBot": private,
492 | },
493 | )
494 |
495 | data = result["data"]["poeBotCreate"]
496 | if data["status"] != "success":
497 | raise RuntimeError(f"Poe returned an error while trying to create a bot: {data['status']}")
498 | self.get_bots()
499 | return data
500 |
501 | def edit_bot(
502 | self,
503 | bot_id,
504 | handle,
505 | prompt="",
506 | base_model="chinchilla",
507 | description="",
508 | intro_message="",
509 | api_key=None,
510 | api_url=None,
511 | private=False,
512 | prompt_public=True,
513 | pfp_url=None,
514 | linkification=False,
515 | markdown_rendering=True,
516 | suggested_replies=False,
517 | ):
518 | result = self.send_query(
519 | "PoeBotEditMutation",
520 | {
521 | "baseBot": base_model,
522 | "botId": bot_id,
523 | "handle": handle,
524 | "prompt": prompt,
525 | "isPromptPublic": prompt_public,
526 | "introduction": intro_message,
527 | "description": description,
528 | "profilePictureUrl": pfp_url,
529 | "apiUrl": api_url,
530 | "apiKey": api_key,
531 | "hasLinkification": linkification,
532 | "hasMarkdownRendering": markdown_rendering,
533 | "hasSuggestedReplies": suggested_replies,
534 | "isPrivateBot": private,
535 | },
536 | )
537 |
538 | data = result["data"]["poeBotEdit"]
539 | if data["status"] != "success":
540 | raise RuntimeError(f"Poe returned an error while trying to edit a bot: {data['status']}")
541 | self.get_bots()
542 | return data
543 |
544 | def delete_account(self) -> None:
545 | response = self.send_query('SettingsDeleteAccountButton_deleteAccountMutation_Mutation', {})
546 | data = response['data']['deleteAccount']
547 | if 'viewer' not in data:
548 | raise RuntimeError(f'Error occurred while deleting the account, Please try again!')
549 |
550 |
551 | load_queries()
552 |
--------------------------------------------------------------------------------
/gpt4free/quora/__init__.py:
--------------------------------------------------------------------------------
1 | import json
2 | from datetime import datetime
3 | from hashlib import md5
4 | from json import dumps
5 | from pathlib import Path
6 | from random import choice, choices, randint
7 | from re import search, findall
8 | from string import ascii_letters, digits
9 | from typing import Optional, Union, List, Any, Generator
10 | from urllib.parse import unquote
11 |
12 | import selenium.webdriver.support.expected_conditions as EC
13 | from fake_useragent import UserAgent
14 | from pydantic import BaseModel
15 | from pypasser import reCaptchaV3
16 | from requests import Session
17 | from selenium.webdriver import Firefox, Chrome, FirefoxOptions, ChromeOptions
18 | from selenium.webdriver.common.by import By
19 | from selenium.webdriver.support.wait import WebDriverWait
20 | from tls_client import Session as TLS
21 |
22 | from .api import Client as PoeClient
23 | from .mail import Emailnator
24 |
25 | SELENIUM_WEB_DRIVER_ERROR_MSG = b'''The error message you are receiving is due to the `geckodriver` executable not
26 | being found in your system\'s PATH. To resolve this issue, you need to download the geckodriver and add its location
27 | to your system\'s PATH.\n\nHere are the steps to resolve the issue:\n\n1. Download the geckodriver for your platform
28 | (Windows, macOS, or Linux) from the following link: https://github.com/mozilla/geckodriver/releases\n\n2. Extract the
29 | downloaded archive and locate the geckodriver executable.\n\n3. Add the geckodriver executable to your system\'s
30 | PATH.\n\nFor macOS and Linux:\n\n- Open a terminal window.\n- Move the geckodriver executable to a directory that is
31 | already in your PATH, or create a new directory and add it to your PATH:\n\n```bash\n# Example: Move geckodriver to
32 | /usr/local/bin\nmv /path/to/your/geckodriver /usr/local/bin\n```\n\n- If you created a new directory, add it to your
33 | PATH:\n\n```bash\n# Example: Add a new directory to PATH\nexport PATH=$PATH:/path/to/your/directory\n```\n\nFor
34 | Windows:\n\n- Right-click on "My Computer" or "This PC" and select "Properties".\n- Click on "Advanced system
35 | settings".\n- Click on the "Environment Variables" button.\n- In the "System variables" section, find the "Path"
36 | variable, select it, and click "Edit".\n- Click "New" and add the path to the directory containing the geckodriver
37 | executable.\n\nAfter adding the geckodriver to your PATH, restart your terminal or command prompt and try running
38 | your script again. The error should be resolved.'''
39 |
40 | # from twocaptcha import TwoCaptcha
41 | # solver = TwoCaptcha('72747bf24a9d89b4dcc1b24875efd358')
42 |
43 | MODELS = {
44 | 'Sage': 'capybara',
45 | 'GPT-4': 'beaver',
46 | 'Claude+': 'a2_2',
47 | 'Claude-instant': 'a2',
48 | 'ChatGPT': 'chinchilla',
49 | 'Dragonfly': 'nutria',
50 | 'NeevaAI': 'hutia',
51 | }
52 |
53 |
54 | def extract_formkey(html):
55 | script_regex = r''
56 | script_text = search(script_regex, html).group(1)
57 | key_regex = r'var .="([0-9a-f]+)",'
58 | key_text = search(key_regex, script_text).group(1)
59 | cipher_regex = r'.\[(\d+)\]=.\[(\d+)\]'
60 | cipher_pairs = findall(cipher_regex, script_text)
61 |
62 | formkey_list = [''] * len(cipher_pairs)
63 | for pair in cipher_pairs:
64 | formkey_index, key_index = map(int, pair)
65 | formkey_list[formkey_index] = key_text[key_index]
66 | formkey = ''.join(formkey_list)
67 |
68 | return formkey
69 |
70 |
71 | class Choice(BaseModel):
72 | text: str
73 | index: int
74 | logprobs: Any
75 | finish_reason: str
76 |
77 |
78 | class Usage(BaseModel):
79 | prompt_tokens: int
80 | completion_tokens: int
81 | total_tokens: int
82 |
83 |
84 | class PoeResponse(BaseModel):
85 | id: int
86 | object: str
87 | created: int
88 | model: str
89 | choices: List[Choice]
90 | usage: Usage
91 | text: str
92 |
93 |
94 | class ModelResponse:
95 | def __init__(self, json_response: dict) -> None:
96 | self.id = json_response['data']['poeBotCreate']['bot']['id']
97 | self.name = json_response['data']['poeBotCreate']['bot']['displayName']
98 | self.limit = json_response['data']['poeBotCreate']['bot']['messageLimit']['dailyLimit']
99 | self.deleted = json_response['data']['poeBotCreate']['bot']['deletionState']
100 |
101 |
102 | class Model:
103 | @staticmethod
104 | def create(
105 | token: str,
106 | model: str = 'gpt-3.5-turbo', # claude-instant
107 | system_prompt: str = 'You are ChatGPT a large language model developed by Openai. Answer as consisely as possible',
108 | description: str = 'gpt-3.5 language model from openai, skidded by poe.com',
109 | handle: str = None,
110 | ) -> ModelResponse:
111 | if not handle:
112 | handle = f'gptx{randint(1111111, 9999999)}'
113 |
114 | client = Session()
115 | client.cookies['p-b'] = token
116 |
117 | formkey = extract_formkey(client.get('https://poe.com').text)
118 | settings = client.get('https://poe.com/api/settings').json()
119 |
120 | client.headers = {
121 | 'host': 'poe.com',
122 | 'origin': 'https://poe.com',
123 | 'referer': 'https://poe.com/',
124 | 'poe-formkey': formkey,
125 | 'poe-tchannel': settings['tchannelData']['channel'],
126 | 'user-agent': UserAgent().random,
127 | 'connection': 'keep-alive',
128 | 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
129 | 'sec-ch-ua-mobile': '?0',
130 | 'sec-ch-ua-platform': '"macOS"',
131 | 'content-type': 'application/json',
132 | 'sec-fetch-site': 'same-origin',
133 | 'sec-fetch-mode': 'cors',
134 | 'sec-fetch-dest': 'empty',
135 | 'accept': '*/*',
136 | 'accept-encoding': 'gzip, deflate, br',
137 | 'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',
138 | }
139 |
140 | payload = dumps(
141 | separators=(',', ':'),
142 | obj={
143 | 'queryName': 'CreateBotMain_poeBotCreate_Mutation',
144 | 'variables': {
145 | 'model': MODELS[model],
146 | 'handle': handle,
147 | 'prompt': system_prompt,
148 | 'isPromptPublic': True,
149 | 'introduction': '',
150 | 'description': description,
151 | 'profilePictureUrl': 'https://qph.fs.quoracdn.net/main-qimg-24e0b480dcd946e1cc6728802c5128b6',
152 | 'apiUrl': None,
153 | 'apiKey': ''.join(choices(ascii_letters + digits, k=32)),
154 | 'isApiBot': False,
155 | 'hasLinkification': False,
156 | 'hasMarkdownRendering': False,
157 | 'hasSuggestedReplies': False,
158 | 'isPrivateBot': False,
159 | },
160 | 'query': 'mutation CreateBotMain_poeBotCreate_Mutation(\n $model: String!\n $handle: String!\n $prompt: String!\n $isPromptPublic: Boolean!\n $introduction: String!\n $description: String!\n $profilePictureUrl: String\n $apiUrl: String\n $apiKey: String\n $isApiBot: Boolean\n $hasLinkification: Boolean\n $hasMarkdownRendering: Boolean\n $hasSuggestedReplies: Boolean\n $isPrivateBot: Boolean\n) {\n poeBotCreate(model: $model, handle: $handle, promptPlaintext: $prompt, isPromptPublic: $isPromptPublic, introduction: $introduction, description: $description, profilePicture: $profilePictureUrl, apiUrl: $apiUrl, apiKey: $apiKey, isApiBot: $isApiBot, hasLinkification: $hasLinkification, hasMarkdownRendering: $hasMarkdownRendering, hasSuggestedReplies: $hasSuggestedReplies, isPrivateBot: $isPrivateBot) {\n status\n bot {\n id\n ...BotHeader_bot\n }\n }\n}\n\nfragment BotHeader_bot on Bot {\n displayName\n messageLimit {\n dailyLimit\n }\n ...BotImage_bot\n ...BotLink_bot\n ...IdAnnotation_node\n ...botHelpers_useViewerCanAccessPrivateBot\n ...botHelpers_useDeletion_bot\n}\n\nfragment BotImage_bot on Bot {\n displayName\n ...botHelpers_useDeletion_bot\n ...BotImage_useProfileImage_bot\n}\n\nfragment BotImage_useProfileImage_bot on Bot {\n image {\n __typename\n ... on LocalBotImage {\n localName\n }\n ... on UrlBotImage {\n url\n }\n }\n ...botHelpers_useDeletion_bot\n}\n\nfragment BotLink_bot on Bot {\n displayName\n}\n\nfragment IdAnnotation_node on Node {\n __isNode: __typename\n id\n}\n\nfragment botHelpers_useDeletion_bot on Bot {\n deletionState\n}\n\nfragment botHelpers_useViewerCanAccessPrivateBot on Bot {\n isPrivateBot\n viewerIsCreator\n}\n',
161 | },
162 | )
163 |
164 | base_string = payload + client.headers['poe-formkey'] + 'WpuLMiXEKKE98j56k'
165 | client.headers['poe-tag-id'] = md5(base_string.encode()).hexdigest()
166 |
167 | response = client.post('https://poe.com/api/gql_POST', data=payload)
168 |
169 | if 'success' not in response.text:
170 | raise Exception(
171 | '''
172 | Bot creation Failed
173 | !! Important !!
174 | Bot creation was not enabled on this account
175 | please use: quora.Account.create with enable_bot_creation set to True
176 | '''
177 | )
178 |
179 | return ModelResponse(response.json())
180 |
181 |
182 | class Account:
183 | @staticmethod
184 | def create(
185 | proxy: Optional[str] = None,
186 | logging: bool = False,
187 | enable_bot_creation: bool = False,
188 | ):
189 | client = TLS(client_identifier='chrome110')
190 | client.proxies = {'http': f'http://{proxy}', 'https': f'http://{proxy}'} if proxy else {}
191 |
192 | mail_client = Emailnator()
193 | mail_address = mail_client.get_mail()
194 |
195 | if logging:
196 | print('email', mail_address)
197 |
198 | client.headers = {
199 | 'authority': 'poe.com',
200 | 'accept': '*/*',
201 | 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
202 | 'content-type': 'application/json',
203 | 'origin': 'https://poe.com',
204 | 'poe-tag-id': 'null',
205 | 'referer': 'https://poe.com/login',
206 | 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
207 | 'sec-ch-ua-mobile': '?0',
208 | 'sec-ch-ua-platform': '"macOS"',
209 | 'sec-fetch-dest': 'empty',
210 | 'sec-fetch-mode': 'cors',
211 | 'sec-fetch-site': 'same-origin',
212 | 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
213 | 'poe-formkey': extract_formkey(client.get('https://poe.com/login').text),
214 | 'poe-tchannel': client.get('https://poe.com/api/settings').json()['tchannelData']['channel'],
215 | }
216 |
217 | token = reCaptchaV3(
218 | 'https://www.recaptcha.net/recaptcha/enterprise/anchor?ar=1&k=6LflhEElAAAAAI_ewVwRWI9hsyV4mbZnYAslSvlG&co=aHR0cHM6Ly9wb2UuY29tOjQ0Mw..&hl=en&v=4PnKmGB9wRHh1i04o7YUICeI&size=invisible&cb=bi6ivxoskyal'
219 | )
220 | # token = solver.recaptcha(sitekey='6LflhEElAAAAAI_ewVwRWI9hsyV4mbZnYAslSvlG',
221 | # url = 'https://poe.com/login?redirect_url=%2F',
222 | # version = 'v3',
223 | # enterprise = 1,
224 | # invisible = 1,
225 | # action = 'login',)['code']
226 |
227 | payload = dumps(
228 | separators=(',', ':'),
229 | obj={
230 | 'queryName': 'MainSignupLoginSection_sendVerificationCodeMutation_Mutation',
231 | 'variables': {
232 | 'emailAddress': mail_address,
233 | 'phoneNumber': None,
234 | 'recaptchaToken': token,
235 | },
236 | 'query': 'mutation MainSignupLoginSection_sendVerificationCodeMutation_Mutation(\n $emailAddress: String\n $phoneNumber: String\n $recaptchaToken: String\n) {\n sendVerificationCode(verificationReason: login, emailAddress: $emailAddress, phoneNumber: $phoneNumber, recaptchaToken: $recaptchaToken) {\n status\n errorMessage\n }\n}\n',
237 | },
238 | )
239 |
240 | base_string = payload + client.headers['poe-formkey'] + 'WpuLMiXEKKE98j56k'
241 | client.headers['poe-tag-id'] = md5(base_string.encode()).hexdigest()
242 |
243 | print(dumps(client.headers, indent=4))
244 |
245 | response = client.post('https://poe.com/api/gql_POST', data=payload)
246 |
247 | if 'automated_request_detected' in response.text:
248 | print('please try using a proxy / wait for fix')
249 |
250 | if 'Bad Request' in response.text:
251 | if logging:
252 | print('bad request, retrying...', response.json())
253 | quit()
254 |
255 | if logging:
256 | print('send_code', response.json())
257 |
258 | mail_content = mail_client.get_message()
259 | mail_token = findall(r';">(\d{6,7})', mail_content)[0]
260 |
261 | if logging:
262 | print('code', mail_token)
263 |
264 | payload = dumps(
265 | separators=(',', ':'),
266 | obj={
267 | 'queryName': 'SignupOrLoginWithCodeSection_signupWithVerificationCodeMutation_Mutation',
268 | 'variables': {
269 | 'verificationCode': str(mail_token),
270 | 'emailAddress': mail_address,
271 | 'phoneNumber': None,
272 | },
273 | 'query': 'mutation SignupOrLoginWithCodeSection_signupWithVerificationCodeMutation_Mutation(\n $verificationCode: String!\n $emailAddress: String\n $phoneNumber: String\n) {\n signupWithVerificationCode(verificationCode: $verificationCode, emailAddress: $emailAddress, phoneNumber: $phoneNumber) {\n status\n errorMessage\n }\n}\n',
274 | },
275 | )
276 |
277 | base_string = payload + client.headers['poe-formkey'] + 'WpuLMiXEKKE98j56k'
278 | client.headers['poe-tag-id'] = md5(base_string.encode()).hexdigest()
279 |
280 | response = client.post('https://poe.com/api/gql_POST', data=payload)
281 | if logging:
282 | print('verify_code', response.json())
283 |
284 | def get(self):
285 | cookies = open(Path(__file__).resolve().parent / 'cookies.txt', 'r').read().splitlines()
286 | return choice(cookies)
287 |
288 | @staticmethod
289 | def delete(token: str, proxy: Optional[str] = None):
290 | client = PoeClient(token, proxy=proxy)
291 | client.delete_account()
292 |
293 |
294 | class StreamingCompletion:
295 | @staticmethod
296 | def create(
297 | model: str = 'gpt-4',
298 | custom_model: bool = None,
299 | prompt: str = 'hello world',
300 | token: str = '',
301 | proxy: Optional[str] = None,
302 | ) -> Generator[PoeResponse, None, None]:
303 | _model = MODELS[model] if not custom_model else custom_model
304 |
305 | proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else False
306 | client = PoeClient(token)
307 | client.proxy = proxies
308 |
309 | for chunk in client.send_message(_model, prompt):
310 | yield PoeResponse(
311 | **{
312 | 'id': chunk['messageId'],
313 | 'object': 'text_completion',
314 | 'created': chunk['creationTime'],
315 | 'model': _model,
316 | 'text': chunk['text_new'],
317 | 'choices': [
318 | {
319 | 'text': chunk['text_new'],
320 | 'index': 0,
321 | 'logprobs': None,
322 | 'finish_reason': 'stop',
323 | }
324 | ],
325 | 'usage': {
326 | 'prompt_tokens': len(prompt),
327 | 'completion_tokens': len(chunk['text_new']),
328 | 'total_tokens': len(prompt) + len(chunk['text_new']),
329 | },
330 | }
331 | )
332 |
333 |
334 | class Completion:
335 | @staticmethod
336 | def create(
337 | model: str = 'gpt-4',
338 | custom_model: str = None,
339 | prompt: str = 'hello world',
340 | token: str = '',
341 | proxy: Optional[str] = None,
342 | ) -> PoeResponse:
343 | _model = MODELS[model] if not custom_model else custom_model
344 |
345 | proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else False
346 | client = PoeClient(token)
347 | client.proxy = proxies
348 |
349 | chunk = None
350 | for response in client.send_message(_model, prompt):
351 | chunk = response
352 |
353 | return PoeResponse(
354 | **{
355 | 'id': chunk['messageId'],
356 | 'object': 'text_completion',
357 | 'created': chunk['creationTime'],
358 | 'model': _model,
359 | 'text': chunk['text'],
360 | 'choices': [
361 | {
362 | 'text': chunk['text'],
363 | 'index': 0,
364 | 'logprobs': None,
365 | 'finish_reason': 'stop',
366 | }
367 | ],
368 | 'usage': {
369 | 'prompt_tokens': len(prompt),
370 | 'completion_tokens': len(chunk['text']),
371 | 'total_tokens': len(prompt) + len(chunk['text']),
372 | },
373 | }
374 | )
375 |
376 |
377 | class Poe:
378 | def __init__(
379 | self,
380 | model: str = 'ChatGPT',
381 | driver: str = 'firefox',
382 | download_driver: bool = False,
383 | driver_path: Optional[str] = None,
384 | cookie_path: str = './quora/cookie.json',
385 | ):
386 | # validating the model
387 | if model and model not in MODELS:
388 | raise RuntimeError('Sorry, the model you provided does not exist. Please check and try again.')
389 | self.model = MODELS[model]
390 | self.cookie_path = cookie_path
391 | self.cookie = self.__load_cookie(driver, driver_path=driver_path)
392 | self.client = PoeClient(self.cookie)
393 |
394 | def __load_cookie(self, driver: str, driver_path: Optional[str] = None) -> str:
395 | if (cookie_file := Path(self.cookie_path)).exists():
396 | with cookie_file.open() as fp:
397 | cookie = json.load(fp)
398 | if datetime.fromtimestamp(cookie['expiry']) < datetime.now():
399 | cookie = self.__register_and_get_cookie(driver, driver_path=driver_path)
400 | else:
401 | print('Loading the cookie from file')
402 | else:
403 | cookie = self.__register_and_get_cookie(driver, driver_path=driver_path)
404 |
405 | return unquote(cookie['value'])
406 |
407 | def __register_and_get_cookie(self, driver: str, driver_path: Optional[str] = None) -> dict:
408 | mail_client = Emailnator()
409 | mail_address = mail_client.get_mail()
410 |
411 | driver = self.__resolve_driver(driver, driver_path=driver_path)
412 | driver.get("https://www.poe.com")
413 |
414 | # clicking use email button
415 | driver.find_element(By.XPATH, '//button[contains(text(), "Use email")]').click()
416 |
417 | email = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, '//input[@type="email"]')))
418 | email.send_keys(mail_address)
419 | driver.find_element(By.XPATH, '//button[text()="Go"]').click()
420 |
421 | code = findall(r';">(\d{6,7})', mail_client.get_message())[0]
422 | print(code)
423 |
424 | verification_code = WebDriverWait(driver, 30).until(
425 | EC.presence_of_element_located((By.XPATH, '//input[@placeholder="Code"]'))
426 | )
427 | verification_code.send_keys(code)
428 | verify_button = EC.presence_of_element_located((By.XPATH, '//button[text()="Verify"]'))
429 | login_button = EC.presence_of_element_located((By.XPATH, '//button[text()="Log In"]'))
430 |
431 | WebDriverWait(driver, 30).until(EC.any_of(verify_button, login_button)).click()
432 |
433 | cookie = driver.get_cookie('p-b')
434 |
435 | with open(self.cookie_path, 'w') as fw:
436 | json.dump(cookie, fw)
437 |
438 | driver.close()
439 | return cookie
440 |
441 | @staticmethod
442 | def __resolve_driver(driver: str, driver_path: Optional[str] = None) -> Union[Firefox, Chrome]:
443 | options = FirefoxOptions() if driver == 'firefox' else ChromeOptions()
444 | options.add_argument('-headless')
445 |
446 | if driver_path:
447 | options.binary_location = driver_path
448 | try:
449 | return Firefox(options=options) if driver == 'firefox' else Chrome(options=options)
450 | except Exception:
451 | raise Exception(SELENIUM_WEB_DRIVER_ERROR_MSG)
452 |
453 | def chat(self, message: str, model: Optional[str] = None) -> str:
454 | if model and model not in MODELS:
455 | raise RuntimeError('Sorry, the model you provided does not exist. Please check and try again.')
456 | model = MODELS[model] if model else self.model
457 | response = None
458 | for chunk in self.client.send_message(model, message):
459 | response = chunk['text']
460 | return response
461 |
462 | def create_bot(self, name: str, /, prompt: str = '', base_model: str = 'ChatGPT', description: str = '') -> None:
463 | if base_model not in MODELS:
464 | raise RuntimeError('Sorry, the base_model you provided does not exist. Please check and try again.')
465 |
466 | response = self.client.create_bot(
467 | handle=name,
468 | prompt=prompt,
469 | base_model=MODELS[base_model],
470 | description=description,
471 | )
472 | print(f'Successfully created bot with name: {response["bot"]["displayName"]}')
473 |
474 | def list_bots(self) -> list:
475 | return list(self.client.bot_names.values())
476 |
477 | def delete_account(self) -> None:
478 | self.client.delete_account()
479 |
--------------------------------------------------------------------------------