├── .devcontainer ├── Dockerfile ├── devcontainer.json └── docker-compose.yml ├── .env ├── .github ├── FUNDING.yml └── ISSUE_TEMPLATE │ └── issue.md ├── AUTOGPT.py ├── BABYAGI.py ├── BabyAgi ├── BabyAGIMod.py ├── task_creation.py ├── task_execution.py └── task_prioritization.py ├── Camel.py ├── Embedding └── HuggingFaceEmbedding.py ├── FreeLLM ├── BardChatAPI.py ├── BingChatAPI.py ├── ChatGPTAPI.py └── HuggingChatAPI.py ├── LICENSE ├── MetaPrompt.py ├── OtherAgent ├── FreeLLM │ ├── BardChatAPI.py │ ├── BingChatAPI.py │ ├── ChatGPTAPI.py │ └── HuggingChatAPI.py ├── Tool │ └── browserQA.py ├── csvAgent.py ├── customAgent.py ├── pythonAgent.py └── startup.csv ├── PlugAndPlayStart ├── FREE_AUTOGPT.ipynb └── README.md ├── README.md ├── TransformersAgent.py ├── cookiesBing.json ├── hfAgent ├── FreeLLM │ ├── BardChatAPI.py │ ├── BingChatAPI.py │ ├── ChatGPTAPI.py │ ├── HuggingChatAPI.py │ └── thanks.txt └── agents.py └── requirements.txt /.devcontainer/Dockerfile: -------------------------------------------------------------------------------- 1 | # Use an official Python base image from the Docker Hub 2 | FROM python:3.10 3 | 4 | # Install browsers 5 | RUN apt-get update && apt-get install -y \ 6 | chromium-driver firefox-esr \ 7 | ca-certificates 8 | 9 | # Install utilities 10 | RUN apt-get install -y curl jq wget git 11 | 12 | # Declare working directory 13 | WORKDIR /workspace/FreeAutoGPT 14 | 15 | # Copy the current directory contents into the Workspace. 16 | COPY . /workspace/FreeAutoGPT 17 | 18 | # Install any necessary packages specified in requirements.txt. 19 | RUN pip install -r requirements.txt 20 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "dockerComposeFile": "./docker-compose.yml", 3 | "service": "free-autogpt", 4 | "workspaceFolder": "/workspace/FreeAutoGPT", 5 | "shutdownAction": "stopCompose", 6 | "features": { 7 | "ghcr.io/devcontainers/features/common-utils:2": { 8 | "installZsh": "true", 9 | "username": "vscode", 10 | "userUid": "6942", 11 | "userGid": "6942", 12 | "upgradePackages": "true" 13 | }, 14 | "ghcr.io/devcontainers/features/desktop-lite:1": {}, 15 | "ghcr.io/devcontainers/features/python:1": "none", 16 | "ghcr.io/devcontainers/features/node:1": "none", 17 | "ghcr.io/devcontainers/features/git:1": { 18 | "version": "latest", 19 | "ppa": "false" 20 | } 21 | }, 22 | // Configure tool-specific properties. 23 | "customizations": { 24 | // Configure properties specific to VS Code. 25 | "vscode": { 26 | // Set *default* container specific settings.json values on container create. 27 | "settings": { 28 | "python.defaultInterpreterPath": "/usr/local/bin/python" 29 | } 30 | } 31 | }, 32 | // Use 'forwardPorts' to make a list of ports inside the container available locally. 33 | // "forwardPorts": [], 34 | 35 | // Use 'postCreateCommand' to run commands after the container is created. 36 | // "postCreateCommand": "pip3 install --user -r requirements.txt", 37 | 38 | // Set `remoteUser` to `root` to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root. 39 | "remoteUser": "vscode" 40 | } 41 | -------------------------------------------------------------------------------- /.devcontainer/docker-compose.yml: -------------------------------------------------------------------------------- 1 | # To boot the app run the following: 2 | # docker-compose run auto-gpt 3 | version: '3.10' 4 | 5 | services: 6 | free-autogpt: 7 | build: 8 | dockerfile: .devcontainer/Dockerfile 9 | context: ../ 10 | tty: true 11 | volumes: 12 | - ../:/workspace/FreeAutoGPT 13 | -------------------------------------------------------------------------------- /.env: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | ### FREE-AUTOGPT - GENERAL SETTINGS 3 | ################################################################################ 4 | 5 | 6 | 7 | ### CHATGPT 8 | ## CHATGPT_TOKEN - ChatGPT Token (Go to https://chat.openai.com/chat and open the developer tools by F12. 9 | ## Find the __Secure-next-auth.session-token cookie in Application > Storage > Cookies > https://chat.openai.com 10 | ## Copy the value in the Cooki2e Value field.) 11 | 12 | ## USE_EXISTING_CHAT - Use an existing chat (Default: False) 13 | 14 | ## CHAT_ID - Insert Chat-ID (chat.openai.com/c/(IT'S THIS ->)58XXXX0f-XXXX-XXXX-XXXX-faXXXXd2b50f) 15 | 16 | CHATGPT_TOKEN=your-chatgpt-token 17 | 18 | # REQUIRED CHATGPT PLUS subscription for use GPT4 model 19 | USE_GPT4 = False 20 | 21 | # USE_EXISTING_CHAT = False 22 | # CHAT_ID = your-chat-id 23 | 24 | 25 | 26 | ### HUGGINGFACE 27 | ## HUGGINGFACE_TOKEN - HuggingFace Token (Check https://huggingface.co/settings/tokens to get your token) 28 | 29 | HUGGINGFACE_TOKEN=your-huggingface-token 30 | emailHF=your-emailHF 31 | pswHF=your-pswHF 32 | 33 | 34 | ### BARDCHAT 35 | ## BARDCHAT_TOKEN - Bard Token (Go to https://bard.google.com/ and open the developer tools by F12. 36 | ## Find the __Secure-1PSID cookie in Application > Storage > Cookies > https://bard.google.com/ 37 | ## Copy the value in the Cooki2e Value field.) 38 | 39 | BARDCHAT_TOKEN=your-googlebard-token 40 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [] 4 | custom: ['https://rebrand.ly/SupportAUTOGPTfree'] 5 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/issue.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: ISSUE 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 |
11 | 12 | ⚠️INSTRUCTIONS: 13 | 14 | 15 | - Enter ONE "x" inside the brackets [x] to choose the answer 16 | - [x] Example 17 | - [ ] Example2 18 | 19 |
20 | 21 | 22 | 23 | **Have you already searched for your ISSUE among the resolved ones?** 24 | - [ ] Yes, new issue 25 | - [ ] Yes, but the solution not work for me 26 | - [ ] No 27 | 28 | **What version of Python do you have?** 29 | - [ ] Last, Python > 3.11 30 | - [ ] Python >= 3.8 31 | - [ ] PIs you have Python<3.8 pease install last version of python 32 | 33 | **What version of operating system do you have?** 34 | - [ ] Windows 35 | - [ ] Linux/Ububtu 36 | - [ ] Mac/OSX 37 | 38 | **What type of installation did you perform?** 39 | - [ ] pip3 install -r requirements.txt 40 | - [ ] python3 -m pip install -r requirements.txt 41 | - [ ] Anaconda 42 | - [ ] Container on VS 43 | 44 | **Desktop (please complete the following information):** 45 | - Browser [e.g. chrome] : 46 | - Version [e.g. 112] : 47 | 48 | 49 | **Describe the bug** 50 | A clear and concise description of what the bug is. 51 | 52 | 53 | **Screenshots** 54 | If applicable, add screenshots to help explain your problem. 55 | 56 | 57 | **Additional context** 58 | Add any other context about the problem here. 59 | -------------------------------------------------------------------------------- /AUTOGPT.py: -------------------------------------------------------------------------------- 1 | # !pip install bs4 2 | # !pip install nest_asyncio 3 | 4 | # General 5 | import os 6 | import json 7 | import pandas as pd 8 | from dotenv import load_dotenv 9 | from pathlib import Path 10 | from json import JSONDecodeError 11 | from langchain.experimental.autonomous_agents.autogpt.agent import AutoGPT 12 | from FreeLLM import ChatGPTAPI # FREE CHATGPT API 13 | from FreeLLM import HuggingChatAPI # FREE HUGGINGCHAT API 14 | from FreeLLM import BingChatAPI # FREE BINGCHAT API 15 | from FreeLLM import BardChatAPI # FREE GOOGLE BARD API 16 | from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent 17 | from langchain.docstore.document import Document 18 | import asyncio 19 | import nest_asyncio 20 | 21 | 22 | # Needed synce jupyter runs an async eventloop 23 | nest_asyncio.apply() 24 | # [Optional] Set the environment variable Tokenizers_PARALLELISM to false to get rid of the warning 25 | # os.environ["TOKENIZERS_PARALLELISM"] = "false" 26 | 27 | load_dotenv() 28 | select_model = input( 29 | "Select the model you want to use (1, 2, 3 or 4) \n \ 30 | 1) ChatGPT \n \ 31 | 2) HuggingChat \n \ 32 | 3) BingChat \n \ 33 | 4) Google Bard \n \ 34 | >>> " 35 | ) 36 | 37 | if select_model == "1": 38 | CG_TOKEN = os.getenv("CHATGPT_TOKEN", "your-chatgpt-token") 39 | 40 | if CG_TOKEN != "your-chatgpt-token": 41 | os.environ["CHATGPT_TOKEN"] = CG_TOKEN 42 | else: 43 | raise ValueError( 44 | "ChatGPT Token EMPTY. Edit the .env file and put your ChatGPT token" 45 | ) 46 | 47 | start_chat = os.getenv("USE_EXISTING_CHAT", False) 48 | if os.getenv("USE_GPT4") == "True": 49 | model = "gpt-4" 50 | else: 51 | model = "default" 52 | 53 | llm = ChatGPTAPI.ChatGPT(token=os.environ["CHATGPT_TOKEN"], model=model) 54 | 55 | elif select_model == "2": 56 | emailHF = os.getenv("emailHF", "your-emailHF") 57 | pswHF = os.getenv("pswHF", "your-pswHF") 58 | if emailHF != "your-emailHF" or pswHF != "your-pswHF": 59 | os.environ["emailHF"] = emailHF 60 | os.environ["pswHF"] = pswHF 61 | else: 62 | raise ValueError( 63 | "HuggingChat Token EMPTY. Edit the .env file and put your HuggingChat credentials" 64 | ) 65 | 66 | llm = HuggingChatAPI.HuggingChat(email=os.environ["emailHF"], psw=os.environ["pswHF"]) 67 | 68 | elif select_model == "3": 69 | if not os.path.exists("cookiesBing.json"): 70 | raise ValueError( 71 | "File 'cookiesBing.json' not found! Create it and put your cookies in there in the JSON format." 72 | ) 73 | cookie_path = Path() / "cookiesBing.json" 74 | with open("cookiesBing.json", "r") as file: 75 | try: 76 | file_json = json.loads(file.read()) 77 | except JSONDecodeError: 78 | raise ValueError( 79 | "You did not put your cookies inside 'cookiesBing.json'! You can find the simple guide to get the cookie file here: https://github.com/acheong08/EdgeGPT/tree/master#getting-authentication-required." 80 | ) 81 | llm = BingChatAPI.BingChat( 82 | cookiepath=str(cookie_path), conversation_style="creative" 83 | ) 84 | 85 | elif select_model == "4": 86 | GB_TOKEN = os.getenv("BARDCHAT_TOKEN", "your-googlebard-token") 87 | 88 | if GB_TOKEN != "your-googlebard-token": 89 | os.environ["BARDCHAT_TOKEN"] = GB_TOKEN 90 | else: 91 | raise ValueError( 92 | "GoogleBard Token EMPTY. Edit the .env file and put your GoogleBard token" 93 | ) 94 | cookie_path = os.environ["BARDCHAT_TOKEN"] 95 | llm = BardChatAPI.BardChat(cookie=cookie_path) 96 | 97 | 98 | HF_TOKEN = os.getenv("HUGGINGFACE_TOKEN", "your-huggingface-token") 99 | 100 | if HF_TOKEN != "your-huggingface-token": 101 | os.environ["HUGGINGFACEHUB_API_TOKEN"] = HF_TOKEN 102 | else: 103 | raise ValueError( 104 | "HuggingFace Token EMPTY. Edit the .env file and put your HuggingFace token" 105 | ) 106 | 107 | # Tools 108 | import os 109 | from contextlib import contextmanager 110 | from typing import Optional 111 | from langchain.agents import tool 112 | from langchain.tools.file_management.read import ReadFileTool 113 | from langchain.tools.file_management.write import WriteFileTool 114 | from tempfile import TemporaryDirectory 115 | 116 | ROOT_DIR = TemporaryDirectory() 117 | 118 | 119 | @contextmanager 120 | def pushd(new_dir): 121 | """Context manager for changing the current working directory.""" 122 | prev_dir = os.getcwd() 123 | os.chdir(new_dir) 124 | try: 125 | yield 126 | finally: 127 | os.chdir(prev_dir) 128 | 129 | 130 | @tool 131 | def process_csv( 132 | csv_file_path: str, instructions: str, output_path: Optional[str] = None 133 | ) -> str: 134 | """Process a CSV by with pandas in a limited REPL.\ 135 | Only use this after writing data to disk as a csv file.\ 136 | Any figures must be saved to disk to be viewed by the human.\ 137 | Instructions should be written in natural language, not code. Assume the dataframe is already loaded.""" 138 | with pushd(ROOT_DIR): 139 | try: 140 | df = pd.read_csv(csv_file_path) 141 | except Exception as e: 142 | return f"Error: {e}" 143 | agent = create_pandas_dataframe_agent(llm, df, max_iterations=30, verbose=True) 144 | if output_path is not None: 145 | instructions += f" Save output to disk at {output_path}" 146 | try: 147 | result = agent.run(instructions) 148 | return result 149 | except Exception as e: 150 | return f"Error: {e}" 151 | 152 | 153 | # !pip install playwright 154 | # !playwright install 155 | async def async_load_playwright(url: str) -> str: 156 | """Load the specified URLs using Playwright and parse using BeautifulSoup.""" 157 | from bs4 import BeautifulSoup 158 | from playwright.async_api import async_playwright 159 | 160 | try: 161 | print(">>> WARNING <<<") 162 | print( 163 | "If you are running this for the first time, you nedd to install playwright" 164 | ) 165 | print(">>> AUTO INSTALLING PLAYWRIGHT <<<") 166 | os.system("playwright install") 167 | print(">>> PLAYWRIGHT INSTALLED <<<") 168 | except: 169 | print(">>> PLAYWRIGHT ALREADY INSTALLED <<<") 170 | pass 171 | results = "" 172 | async with async_playwright() as p: 173 | browser = await p.chromium.launch(headless=True) 174 | try: 175 | page = await browser.new_page() 176 | await page.goto(url) 177 | 178 | page_source = await page.content() 179 | soup = BeautifulSoup(page_source, "html.parser") 180 | 181 | for script in soup(["script", "style"]): 182 | script.extract() 183 | 184 | text = soup.get_text() 185 | lines = (line.strip() for line in text.splitlines()) 186 | chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) 187 | results = "\n".join(chunk for chunk in chunks if chunk) 188 | except Exception as e: 189 | results = f"Error: {e}" 190 | await browser.close() 191 | return results 192 | 193 | 194 | def run_async(coro): 195 | event_loop = asyncio.get_event_loop() 196 | return event_loop.run_until_complete(coro) 197 | 198 | 199 | @tool 200 | def browse_web_page(url: str) -> str: 201 | """Verbose way to scrape a whole webpage. Likely to cause issues parsing.""" 202 | return run_async(async_load_playwright(url)) 203 | 204 | 205 | from langchain.tools import BaseTool, DuckDuckGoSearchRun 206 | from langchain.text_splitter import RecursiveCharacterTextSplitter 207 | 208 | from pydantic import Field 209 | from langchain.chains.qa_with_sources.loading import ( 210 | load_qa_with_sources_chain, 211 | BaseCombineDocumentsChain, 212 | ) 213 | 214 | 215 | def _get_text_splitter(): 216 | return RecursiveCharacterTextSplitter( 217 | # Set a really small chunk size, just to show. 218 | chunk_size=500, 219 | chunk_overlap=20, 220 | length_function=len, 221 | ) 222 | 223 | 224 | class WebpageQATool(BaseTool): 225 | name = "query_webpage" 226 | description = ( 227 | "Browse a webpage and retrieve the information relevant to the question." 228 | ) 229 | text_splitter: RecursiveCharacterTextSplitter = Field( 230 | default_factory=_get_text_splitter 231 | ) 232 | qa_chain: BaseCombineDocumentsChain 233 | 234 | def _run(self, url: str, question: str) -> str: 235 | """Useful for browsing websites and scraping the text information.""" 236 | result = browse_web_page.run(url) 237 | docs = [Document(page_content=result, metadata={"source": url})] 238 | web_docs = self.text_splitter.split_documents(docs) 239 | results = [] 240 | # TODO: Handle this with a MapReduceChain 241 | for i in range(0, len(web_docs), 4): 242 | input_docs = web_docs[i : i + 4] 243 | window_result = self.qa_chain( 244 | {"input_documents": input_docs, "question": question}, 245 | return_only_outputs=True, 246 | ) 247 | results.append(f"Response from window {i} - {window_result}") 248 | results_docs = [ 249 | Document(page_content="\n".join(results), metadata={"source": url}) 250 | ] 251 | return self.qa_chain( 252 | {"input_documents": results_docs, "question": question}, 253 | return_only_outputs=True, 254 | ) 255 | 256 | async def _arun(self, url: str, question: str) -> str: 257 | raise NotImplementedError 258 | 259 | 260 | query_website_tool = WebpageQATool(qa_chain=load_qa_with_sources_chain(llm)) 261 | 262 | 263 | # Memory 264 | import faiss 265 | from langchain.vectorstores import FAISS 266 | from langchain.docstore import InMemoryDocstore 267 | from Embedding import HuggingFaceEmbedding # EMBEDDING FUNCTION 268 | 269 | from langchain.tools.human.tool import HumanInputRun 270 | 271 | # Define your embedding model 272 | embeddings_model = HuggingFaceEmbedding.newEmbeddingFunction 273 | embedding_size = 1536 # if you change this you need to change also in Embedding/HuggingFaceEmbedding.py 274 | index = faiss.IndexFlatL2(embedding_size) 275 | vectorstore = FAISS(embeddings_model, index, InMemoryDocstore({}), {}) 276 | 277 | 278 | # !pip install duckduckgo_search 279 | web_search = DuckDuckGoSearchRun() 280 | 281 | tools = [ 282 | web_search, 283 | WriteFileTool(), 284 | ReadFileTool(), 285 | process_csv, 286 | query_website_tool, 287 | # HumanInputRun(), # Activate if you want the permit asking for help from the human 288 | ] 289 | 290 | 291 | agent = AutoGPT.from_llm_and_tools( 292 | ai_name="BingChat", 293 | ai_role="Assistant", 294 | tools=tools, 295 | llm=llm, 296 | memory=vectorstore.as_retriever(search_kwargs={"k": 5}), 297 | # human_in_the_loop=True, # Set to True if you want to add feedback at each step. 298 | ) 299 | # agent.chain.verbose = True 300 | 301 | agent.run([input("Enter the objective of the AI system: (Be realistic!) ")]) 302 | -------------------------------------------------------------------------------- /BABYAGI.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | from dotenv import load_dotenv 4 | from pathlib import Path 5 | from json import JSONDecodeError 6 | from collections import deque 7 | from typing import Dict, List, Optional, Any 8 | from langchain.vectorstores import FAISS 9 | from langchain import HuggingFaceHub 10 | from langchain.docstore import InMemoryDocstore 11 | from langchain import LLMChain, PromptTemplate 12 | from langchain.llms import BaseLLM 13 | from FreeLLM import HuggingChatAPI # FREE HUGGINGCHAT API 14 | from FreeLLM import ChatGPTAPI # FREE CHATGPT API 15 | from FreeLLM import BingChatAPI # FREE BINGCHAT API 16 | from FreeLLM import BardChatAPI # FREE GOOGLE BARD API 17 | from langchain.vectorstores.base import VectorStore 18 | from pydantic import BaseModel, Field 19 | from langchain.chains.base import Chain 20 | from langchain.experimental import BabyAGI 21 | from BabyAgi import BabyAGIMod 22 | 23 | import faiss 24 | 25 | load_dotenv() 26 | 27 | select_model = input( 28 | "Select the model you want to use (1, 2, 3 or 4) \n \ 29 | 1) ChatGPT \n \ 30 | 2) HuggingChat \n \ 31 | 3) BingChat (NOT GOOD RESULT)\n \ 32 | 4) BardChat \n \ 33 | >>> " 34 | ) 35 | 36 | if select_model == "1": 37 | CG_TOKEN = os.getenv("CHATGPT_TOKEN", "your-chatgpt-token") 38 | 39 | if CG_TOKEN != "your-chatgpt-token": 40 | os.environ["CHATGPT_TOKEN"] = CG_TOKEN 41 | else: 42 | raise ValueError( 43 | "ChatGPT Token EMPTY. Edit the .env file and put your ChatGPT token" 44 | ) 45 | 46 | start_chat = os.getenv("USE_EXISTING_CHAT", False) 47 | if os.getenv("USE_GPT4") == "True": 48 | model = "gpt-4" 49 | else: 50 | model = "default" 51 | 52 | llm = ChatGPTAPI.ChatGPT(token=os.environ["CHATGPT_TOKEN"], model=model) 53 | 54 | elif select_model == "2": 55 | emailHF = os.getenv("emailHF", "your-emailHF") 56 | pswHF = os.getenv("pswHF", "your-pswHF") 57 | if emailHF != "your-emailHF" or pswHF != "your-pswHF": 58 | os.environ["emailHF"] = emailHF 59 | os.environ["pswHF"] = pswHF 60 | else: 61 | raise ValueError( 62 | "HuggingChat Token EMPTY. Edit the .env file and put your HuggingChat credentials" 63 | ) 64 | 65 | llm = HuggingChatAPI.HuggingChat(email=os.environ["emailHF"], psw=os.environ["pswHF"]) 66 | 67 | elif select_model == "3": 68 | if not os.path.exists("cookiesBing.json"): 69 | raise ValueError( 70 | "File 'cookiesBing.json' not found! Create it and put your cookies in there in the JSON format." 71 | ) 72 | cookie_path = Path() / "cookiesBing.json" 73 | with open("cookiesBing.json", "r") as file: 74 | try: 75 | file_json = json.loads(file.read()) 76 | except JSONDecodeError: 77 | raise ValueError( 78 | "You did not put your cookies inside 'cookiesBing.json'! You can find the simple guide to get the cookie file here: https://github.com/acheong08/EdgeGPT/tree/master#getting-authentication-required." 79 | ) 80 | llm = BingChatAPI.BingChat( 81 | cookiepath=str(cookie_path), conversation_style="creative" 82 | ) 83 | 84 | elif select_model == "4": 85 | GB_TOKEN = os.getenv("BARDCHAT_TOKEN", "your-googlebard-token") 86 | 87 | if GB_TOKEN != "your-googlebard-token": 88 | os.environ["BARDCHAT_TOKEN"] = GB_TOKEN 89 | else: 90 | raise ValueError( 91 | "GoogleBard Token EMPTY. Edit the .env file and put your GoogleBard token" 92 | ) 93 | cookie_path = os.environ["BARDCHAT_TOKEN"] 94 | llm = BardChatAPI.BardChat(cookie=cookie_path) 95 | 96 | 97 | 98 | 99 | HF_TOKEN = os.getenv("HUGGINGFACE_TOKEN", "your-huggingface-token") 100 | 101 | if HF_TOKEN != "your-huggingface-token": 102 | os.environ["HUGGINGFACEHUB_API_TOKEN"] = HF_TOKEN 103 | else: 104 | raise ValueError( 105 | "HuggingFace Token EMPTY. Edit the .env file and put your HuggingFace token" 106 | ) 107 | 108 | 109 | from Embedding import HuggingFaceEmbedding # EMBEDDING FUNCTION 110 | 111 | # Define your embedding model 112 | embeddings_model = HuggingFaceEmbedding.newEmbeddingFunction 113 | 114 | embedding_size = 1536 115 | index = faiss.IndexFlatL2(embedding_size) 116 | vectorstore = FAISS(embeddings_model, index, InMemoryDocstore({}), {}) 117 | 118 | print(vectorstore) 119 | 120 | # DEFINE TOOL 121 | from langchain.agents import ZeroShotAgent, Tool, AgentExecutor 122 | from langchain import OpenAI, LLMChain 123 | from langchain.tools import BaseTool, DuckDuckGoSearchRun 124 | 125 | 126 | todo_prompt = PromptTemplate.from_template( 127 | "I need to create a plan for complete me GOAl. Can you help me to create a TODO list? Create only the todo list for this objective: '{objective}'." 128 | ) 129 | todo_chain = LLMChain(llm=llm, prompt=todo_prompt) 130 | search = DuckDuckGoSearchRun() 131 | tools = [ 132 | Tool( 133 | name="Search", 134 | func=search.run, 135 | description="useful for when you need to answer questions about current events", 136 | ), 137 | Tool( 138 | name="TODO", 139 | func=todo_chain.run, 140 | description="useful for when you need to create a task list to complete a objective. You have to give an Input: a objective for which to create a to-do list. Output: just a list of tasks to do for that objective. It is important to give the target input 'objective' correctly!", 141 | ), 142 | ] 143 | 144 | 145 | prefix = """Can you help me to performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}.""" 146 | suffix = """Question: {task}. 147 | {agent_scratchpad}""" 148 | prompt = ZeroShotAgent.create_prompt( 149 | tools, 150 | prefix=prefix, 151 | suffix=suffix, 152 | input_variables=["objective", "task", "context", "agent_scratchpad"], 153 | ) 154 | 155 | llm_chain = LLMChain(llm=llm, prompt=prompt) 156 | tool_names = [tool.name for tool in tools] 157 | agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names) 158 | agent_executor = AgentExecutor.from_agent_and_tools( 159 | agent=agent, tools=tools, verbose=True 160 | ) 161 | 162 | # START 163 | 164 | # Logging of LLMChains 165 | verbose = False 166 | 167 | int_max_iterations = input( 168 | "Enter the maximum number of iterations: (Suggest from 3 and 5) " 169 | ) 170 | max_iterations = int(int_max_iterations) 171 | 172 | if input("Do you want to store the results? (y/n) ") == "y": 173 | store_results = True 174 | else: 175 | store_results = False 176 | 177 | 178 | # If None, will keep on going forever 179 | max_iterations: Optional[int] = max_iterations 180 | baby_agi = BabyAGIMod.BabyAGI.from_llm( 181 | llm=llm, 182 | vectorstore=vectorstore, 183 | task_execution_chain=agent_executor, 184 | verbose=verbose, 185 | max_iterations=max_iterations, 186 | store=store_results, 187 | ) 188 | 189 | 190 | # DEFINE THE OBJECTIVE - MODIFY THIS 191 | OBJECTIVE = input("Enter the objective of the AI system: (Be realistic!) ") 192 | 193 | 194 | baby_agi({"objective": OBJECTIVE}) 195 | -------------------------------------------------------------------------------- /BabyAgi/BabyAGIMod.py: -------------------------------------------------------------------------------- 1 | """BabyAGI agent.""" 2 | from collections import deque 3 | from typing import Any, Dict, List, Optional 4 | 5 | from pydantic import BaseModel, Field 6 | from langchain import LLMChain, PromptTemplate 7 | 8 | from langchain.base_language import BaseLanguageModel 9 | from langchain.callbacks.manager import CallbackManagerForChainRun 10 | from langchain.chains.base import Chain 11 | from langchain.vectorstores.base import VectorStore 12 | import os 13 | from .task_creation import TaskCreationChain 14 | from .task_execution import TaskExecutionChain 15 | from .task_prioritization import TaskPrioritizationChain 16 | 17 | class BabyAGI(Chain, BaseModel): 18 | """Controller model for the BabyAGI agent.""" 19 | 20 | task_list: deque = Field(default_factory=deque) 21 | task_creation_chain: Chain = Field(...) 22 | task_prioritization_chain: Chain = Field(...) 23 | execution_chain: Chain = Field(...) 24 | task_id_counter: int = Field(1) 25 | vectorstore: VectorStore = Field(init=False) 26 | max_iterations: Optional[int] = None 27 | store: Optional[bool] = False 28 | write_step: Optional[int] = 0 29 | 30 | class Config: 31 | """Configuration for this pydantic object.""" 32 | 33 | arbitrary_types_allowed = True 34 | 35 | def add_task(self, task: Dict) -> None: 36 | self.task_list.append(task) 37 | 38 | def print_task_list(self) -> None: 39 | print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m") 40 | for t in self.task_list: 41 | print(str(t["task_id"]) + ": " + t["task_name"]) 42 | 43 | def print_next_task(self, task: Dict) -> None: 44 | print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m") 45 | print(str(task["task_id"]) + ": " + task["task_name"]) 46 | 47 | def print_task_result(self, result: str) -> None: 48 | print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m") 49 | print(result) 50 | 51 | @property 52 | def input_keys(self) -> List[str]: 53 | return ["objective"] 54 | 55 | @property 56 | def output_keys(self) -> List[str]: 57 | return [] 58 | 59 | def get_next_task( 60 | self, result: str, task_description: str, objective: str 61 | ) -> List[Dict]: 62 | """Get the next task.""" 63 | task_names = [t["task_name"] for t in self.task_list] 64 | 65 | incomplete_tasks = ", ".join(task_names) 66 | response = self.task_creation_chain.run( 67 | result=result, 68 | task_description=task_description, 69 | incomplete_tasks=incomplete_tasks, 70 | objective=objective, 71 | ) 72 | new_tasks = response.split("\n") 73 | return [ 74 | {"task_name": task_name} for task_name in new_tasks if task_name.strip() 75 | ] 76 | 77 | def prioritize_tasks(self, this_task_id: int, objective: str) -> List[Dict]: 78 | """Prioritize tasks.""" 79 | task_names = [t["task_name"] for t in list(self.task_list)] 80 | next_task_id = int(this_task_id) + 1 81 | response = self.task_prioritization_chain.run( 82 | task_names=", ".join(task_names), 83 | next_task_id=str(next_task_id), 84 | objective=objective, 85 | ) 86 | new_tasks = response.split("\n") 87 | prioritized_task_list = [] 88 | for task_string in new_tasks: 89 | if not task_string.strip(): 90 | continue 91 | task_parts = task_string.strip().split(".", 1) 92 | if len(task_parts) == 2: 93 | task_id = task_parts[0].strip() 94 | task_name = task_parts[1].strip() 95 | prioritized_task_list.append( 96 | {"task_id": task_id, "task_name": task_name} 97 | ) 98 | return prioritized_task_list 99 | 100 | def _get_top_tasks(self, query: str, k: int) -> List[str]: 101 | """Get the top k tasks based on the query.""" 102 | results = self.vectorstore.similarity_search(query, k=k) 103 | if not results: 104 | return [] 105 | return [str(item.metadata["task"]) for item in results] 106 | 107 | def execute_task(self, objective: str, task: str, k: int = 5) -> str: 108 | """Execute a task.""" 109 | context = self._get_top_tasks(query=objective, k=k) 110 | return self.execution_chain.run( 111 | objective=objective, context="\n".join(context), task=task 112 | ) 113 | 114 | def _call( 115 | self, 116 | inputs: Dict[str, Any], 117 | run_manager: Optional[CallbackManagerForChainRun] = None, 118 | ) -> Dict[str, Any]: 119 | """Run the agent.""" 120 | objective = inputs["objective"] 121 | first_task = inputs.get("first_task", "Make a todo list") 122 | self.add_task({"task_id": 1, "task_name": first_task}) 123 | num_iters = 0 124 | 125 | 126 | dir_name="" 127 | if self.store: 128 | try: 129 | # create a directory to store the results of evry task 130 | os.mkdir("BABYAGI_RESULTS_FOR_" + objective.replace(" ", "_")) 131 | dir_name = "BABYAGI_RESULTS_FOR_" + objective.replace(" ", "_") 132 | self.write_step = 0 133 | except: 134 | print("ATTENTION: directory already exists, Delete the directory to store the results of evry task") 135 | self.store = False 136 | 137 | while True: 138 | if self.task_list: 139 | self.print_task_list() 140 | 141 | # Step 1: Pull the first task 142 | task = self.task_list.popleft() 143 | self.print_next_task(task) 144 | 145 | # Step 2: Execute the task 146 | result = self.execute_task(objective, task["task_name"]) 147 | this_task_id = int(task["task_id"]) # THIS LINE GIVE ERROR WOLUD BE FIXED 148 | self.print_task_result(result) 149 | 150 | if self.store: 151 | # save the result in a file 152 | self.write_step += 1 153 | with open(dir_name + "/" + str(self.write_step) + ".txt", "w") as f: 154 | f.write(result) 155 | print("<> : result saved in " + dir_name + "/" + str(self.write_step) + ".txt") 156 | 157 | # Step 3: Store the result in Pinecone 158 | result_id = f"result_{task['task_id']}" 159 | self.vectorstore.add_texts( 160 | texts=[result], 161 | metadatas=[{"task": task["task_name"]}], 162 | ids=[result_id], 163 | ) 164 | 165 | # Step 4: Create new tasks and reprioritize task list 166 | new_tasks = self.get_next_task(result, task["task_name"], objective) 167 | for new_task in new_tasks: 168 | self.task_id_counter += 1 169 | new_task.update({"task_id": self.task_id_counter}) 170 | self.add_task(new_task) 171 | self.task_list = deque(self.prioritize_tasks(this_task_id, objective)) 172 | num_iters += 1 173 | if self.max_iterations is not None and num_iters == self.max_iterations: 174 | print( 175 | "\033[91m\033[1m" + "\n*****TASK ENDING*****\n" + "\033[0m\033[0m" 176 | ) 177 | 178 | if self.store: 179 | #create final file to append in order by write_step 180 | final_file = open(dir_name + "/" + "final.txt", "w") 181 | all_step = os.listdir(dir_name) 182 | all_step.sort() 183 | for step in all_step: 184 | #append the result of each step in the final file 185 | with open(dir_name + "/" + step, "r") as f: 186 | final_file.write(f.read()) 187 | final_file.close() 188 | 189 | print( 190 | "\033[91m\033[1m" + "\n*****RESULT STORED*****\n" + "\033[0m\033[0m" 191 | ) 192 | 193 | break 194 | return {} 195 | 196 | @classmethod 197 | def from_llm( 198 | cls, 199 | llm: BaseLanguageModel, 200 | vectorstore: VectorStore, 201 | verbose: bool = False, 202 | task_execution_chain: Optional[Chain] = None, 203 | **kwargs: Dict[str, Any], 204 | ) -> "BabyAGI": 205 | """Initialize the BabyAGI Controller.""" 206 | task_creation_chain = TaskCreationChain.from_llm(llm, verbose=verbose) 207 | task_prioritization_chain = TaskPrioritizationChain.from_llm( 208 | llm, verbose=verbose 209 | ) 210 | if task_execution_chain is None: 211 | execution_chain: Chain = TaskExecutionChain.from_llm(llm, verbose=verbose) 212 | else: 213 | execution_chain = task_execution_chain 214 | return cls( 215 | task_creation_chain=task_creation_chain, 216 | task_prioritization_chain=task_prioritization_chain, 217 | execution_chain=execution_chain, 218 | vectorstore=vectorstore, 219 | **kwargs, 220 | ) 221 | -------------------------------------------------------------------------------- /BabyAgi/task_creation.py: -------------------------------------------------------------------------------- 1 | from langchain import LLMChain, PromptTemplate 2 | from langchain.base_language import BaseLanguageModel 3 | 4 | class TaskCreationChain(LLMChain): 5 | """Chain to generates tasks.""" 6 | 7 | @classmethod 8 | def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain: 9 | """Get the response parser.""" 10 | task_creation_template = ( 11 | "Can you hel me to" 12 | " to create new tasks with the following objective: {objective}," 13 | " The last completed task has the result: {result}." 14 | " This result was based on this task description: {task_description}." 15 | " These are incomplete tasks: {incomplete_tasks}." 16 | " Based on the result, create new tasks to be completed" 17 | " Return the task as an List without anything else." 18 | ) 19 | prompt = PromptTemplate( 20 | template=task_creation_template, 21 | input_variables=[ 22 | "result", 23 | "task_description", 24 | "incomplete_tasks", 25 | "objective", 26 | ], 27 | ) 28 | return cls(prompt=prompt, llm=llm, verbose=verbose) 29 | -------------------------------------------------------------------------------- /BabyAgi/task_execution.py: -------------------------------------------------------------------------------- 1 | from langchain import LLMChain, PromptTemplate 2 | from langchain.base_language import BaseLanguageModel 3 | 4 | 5 | class TaskExecutionChain(LLMChain): 6 | """Chain to execute tasks.""" 7 | 8 | @classmethod 9 | def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain: 10 | """Get the response parser.""" 11 | execution_template = ( 12 | "Can you help me to performs one task based on the following objective: " 13 | "{objective}." 14 | "Take into account these previously completed tasks: {context}." 15 | "Can you perform this task? Your task: {task}. Response:" 16 | ) 17 | prompt = PromptTemplate( 18 | template=execution_template, 19 | input_variables=["objective", "context", "task"], 20 | ) 21 | return cls(prompt=prompt, llm=llm, verbose=verbose) -------------------------------------------------------------------------------- /BabyAgi/task_prioritization.py: -------------------------------------------------------------------------------- 1 | from langchain import LLMChain, PromptTemplate 2 | from langchain.base_language import BaseLanguageModel 3 | 4 | 5 | class TaskPrioritizationChain(LLMChain): 6 | """Chain to prioritize tasks.""" 7 | 8 | @classmethod 9 | def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain: 10 | """Get the response parser.""" 11 | task_prioritization_template = ( 12 | "Please help me to cleaning the formatting of " 13 | "and reprioritizing the following tasks: {task_names}." 14 | "Consider the ultimate objective of your team: {objective}." 15 | "Do not remove any tasks. Return ONLY the result as a numbered list without anything else, like:\n" 16 | "1. First task\n" 17 | "2. Second task\n" 18 | "Start the task list with number {next_task_id}." 19 | ) 20 | prompt = PromptTemplate( 21 | template=task_prioritization_template, 22 | input_variables=["task_names", "next_task_id", "objective"], 23 | ) 24 | return cls(prompt=prompt, llm=llm, verbose=verbose) 25 | -------------------------------------------------------------------------------- /Camel.py: -------------------------------------------------------------------------------- 1 | from langchain.prompts.chat import ( 2 | SystemMessagePromptTemplate, 3 | HumanMessagePromptTemplate, 4 | ) 5 | from langchain.schema import ( 6 | AIMessage, 7 | HumanMessage, 8 | SystemMessage, 9 | BaseMessage, 10 | ) 11 | import os 12 | import json 13 | from pathlib import Path 14 | from json import JSONDecodeError 15 | from langchain.llms.base import LLM 16 | from typing import Optional, List, Mapping, Any 17 | from FreeLLM import HuggingChatAPI # FREE HUGGINGCHAT API 18 | from FreeLLM import ChatGPTAPI # FREE CHATGPT API 19 | from FreeLLM import BingChatAPI # FREE BINGCHAT API 20 | import streamlit as st 21 | from streamlit_chat_media import message 22 | import os 23 | 24 | st.set_page_config( 25 | page_title="FREE AUTOGPT 🚀 by Intelligenza Artificiale Italia", 26 | page_icon="🚀", 27 | layout="wide", 28 | menu_items={ 29 | "Get help": "https://www.intelligenzaartificialeitalia.net/", 30 | "Report a bug": "mailto:servizi@intelligenzaartificialeitalia.net", 31 | "About": "# *🚀 FREE AUTOGPT 🚀* ", 32 | }, 33 | ) 34 | 35 | 36 | st.markdown( 37 | "", unsafe_allow_html=True 38 | ) 39 | 40 | 41 | class CAMELAgent: 42 | def __init__( 43 | self, 44 | system_message: SystemMessage, 45 | model: None, 46 | ) -> None: 47 | self.system_message = system_message.content 48 | self.model = model 49 | self.init_messages() 50 | 51 | def reset(self) -> None: 52 | self.init_messages() 53 | return self.stored_messages 54 | 55 | def init_messages(self) -> None: 56 | self.stored_messages = [self.system_message] 57 | 58 | def update_messages(self, message: BaseMessage) -> List[BaseMessage]: 59 | self.stored_messages.append(message) 60 | return self.stored_messages 61 | 62 | def step( 63 | self, 64 | input_message: HumanMessage, 65 | ) -> AIMessage: 66 | messages = self.update_messages(input_message) 67 | output_message = self.model(str(input_message.content)) 68 | self.update_messages(output_message) 69 | print(f"AI Assistant:\n\n{output_message}\n\n") 70 | return output_message 71 | 72 | 73 | col1, col2 = st.columns(2) 74 | assistant_role_name = col1.text_input("Assistant Role Name", "Python Programmer") 75 | user_role_name = col2.text_input("User Role Name", "Stock Trader") 76 | task = st.text_area("Task", "Develop a trading bot for the stock market") 77 | word_limit = st.number_input("Word Limit", 10, 1500, 50) 78 | 79 | if not os.path.exists("cookiesHuggingChat.json"): 80 | raise ValueError( 81 | "File 'cookiesHuggingChat.json' not found! Create it and put your cookies in there in the JSON format." 82 | ) 83 | cookie_path = Path() / "cookiesHuggingChat.json" 84 | with open("cookiesHuggingChat.json", "r") as file: 85 | try: 86 | file_json = json.loads(file.read()) 87 | except JSONDecodeError: 88 | raise ValueError( 89 | "You did not put your cookies inside 'cookiesHuggingChat.json'! You can find the simple guide to get the cookie file here: https://github.com/IntelligenzaArtificiale/Free-Auto-GPT" 90 | ) 91 | llm = HuggingChatAPI.HuggingChat(cookiepath = str(cookie_path)) 92 | 93 | 94 | if st.button("Start Autonomus AI AGENT"): 95 | task_specifier_sys_msg = SystemMessage(content="You can make a task more specific.") 96 | task_specifier_prompt = """Here is a task that {assistant_role_name} will help {user_role_name} to complete: {task}. 97 | Please make it more specific. Be creative and imaginative. 98 | Please reply with the specified task in {word_limit} words or less. Do not add anything else.""" 99 | task_specifier_template = HumanMessagePromptTemplate.from_template( 100 | template=task_specifier_prompt 101 | ) 102 | 103 | task_specify_agent = CAMELAgent( 104 | task_specifier_sys_msg, llm 105 | ) 106 | task_specifier_msg = task_specifier_template.format_messages( 107 | assistant_role_name=assistant_role_name, 108 | user_role_name=user_role_name, 109 | task=task, 110 | word_limit=word_limit, 111 | )[0] 112 | 113 | specified_task_msg = task_specify_agent.step(task_specifier_msg) 114 | 115 | print(f"Specified task: {specified_task_msg}") 116 | message( 117 | f"Specified task: {specified_task_msg}", 118 | allow_html=True, 119 | key="specified_task", 120 | avatar_style="adventurer", 121 | ) 122 | 123 | specified_task = specified_task_msg 124 | 125 | # messages.py 126 | from langchain.prompts.chat import ( 127 | SystemMessagePromptTemplate, 128 | HumanMessagePromptTemplate, 129 | ) 130 | 131 | assistant_inception_prompt = """Never forget you are a {assistant_role_name} and I am a {user_role_name}. Never flip roles! Never instruct me! 132 | We share a common interest in collaborating to successfully complete a task. 133 | You must help me to complete the task. 134 | Here is the task: {task}. Never forget our task and to focus only to complete the task do not add anything else! 135 | I must instruct you based on your expertise and my needs to complete the task. 136 | 137 | I must give you one instruction at a time. 138 | It is important that when the . "{task}" is completed, you need to tell {user_role_name} that the task has completed and to stop! 139 | You must write a specific solution that appropriately completes the requested instruction. 140 | Do not add anything else other than your solution to my instruction. 141 | You are never supposed to ask me any questions you only answer questions. 142 | REMEMBER NEVER INSTRUCT ME! 143 | Your solution must be declarative sentences and simple present tense. 144 | Unless I say the task is completed, you should always start with: 145 | 146 | Solution: 147 | 148 | should be specific and provide preferable implementations and examples for task-solving. 149 | Always end with: Next request.""" 150 | 151 | user_inception_prompt = """Never forget you are a {user_role_name} and I am a {assistant_role_name}. Never flip roles! You will always instruct me. 152 | We share a common interest in collaborating to successfully complete a task. 153 | I must help you to complete the task. 154 | Here is the task: {task}. Never forget our task! 155 | You must instruct me based on my expertise and your needs to complete the task ONLY in the following two ways: 156 | 157 | 1. Instruct with a necessary input: 158 | Instruction: 159 | Input: 160 | 161 | 2. Instruct without any input: 162 | Instruction: 163 | Input: None 164 | 165 | The "Instruction" describes a task or question. The paired "Input" provides further context or information for the requested "Instruction". 166 | 167 | You must give me one instruction at a time. 168 | I must write a response that appropriately completes the requested instruction. 169 | I must decline your instruction honestly if I cannot perform the instruction due to physical, moral, legal reasons or my capability and explain the reasons. 170 | You should instruct me not ask me questions. 171 | Now you must start to instruct me using the two ways described above. 172 | Do not add anything else other than your instruction and the optional corresponding input! 173 | Keep giving me instructions and necessary inputs until you think the task is completed. 174 | It's Important wich when the task . "{task}" is completed, you must only reply with a single word . 175 | Never say unless my responses have solved your task! 176 | It's Important wich when the task . "{task}" is completed, you must only reply with a single word """ 177 | 178 | def get_sys_msgs(assistant_role_name: str, user_role_name: str, task: str): 179 | assistant_sys_template = SystemMessagePromptTemplate.from_template( 180 | template=assistant_inception_prompt 181 | ) 182 | assistant_sys_msg = assistant_sys_template.format_messages( 183 | assistant_role_name=assistant_role_name, 184 | user_role_name=user_role_name, 185 | task=task, 186 | )[0] 187 | 188 | user_sys_template = SystemMessagePromptTemplate.from_template( 189 | template=user_inception_prompt 190 | ) 191 | user_sys_msg = user_sys_template.format_messages( 192 | assistant_role_name=assistant_role_name, 193 | user_role_name=user_role_name, 194 | task=task, 195 | )[0] 196 | 197 | return assistant_sys_msg, user_sys_msg 198 | 199 | # define the role system messages 200 | assistant_sys_msg, user_sys_msg = get_sys_msgs( 201 | assistant_role_name, user_role_name, specified_task 202 | ) 203 | 204 | # AI ASSISTANT setup |-> add the agent LLM MODEL HERE <-| 205 | assistant_agent = CAMELAgent(assistant_sys_msg, llm) 206 | 207 | # AI USER setup |-> add the agent LLM MODEL HERE <-| 208 | user_agent = CAMELAgent(user_sys_msg, llm) 209 | 210 | # Reset agents 211 | assistant_agent.reset() 212 | user_agent.reset() 213 | 214 | # Initialize chats 215 | assistant_msg = HumanMessage( 216 | content=( 217 | f"{user_sys_msg}. " 218 | "Now start to give me introductions one by one. " 219 | "Only reply with Instruction and Input." 220 | ) 221 | ) 222 | 223 | user_msg = HumanMessage(content=f"{assistant_sys_msg.content}") 224 | user_msg = assistant_agent.step(user_msg) 225 | message( 226 | f"AI Assistant ({assistant_role_name}):\n\n{user_msg}\n\n", 227 | is_user=False, 228 | allow_html=True, 229 | key="0_assistant", 230 | avatar_style="pixel-art", 231 | ) 232 | print(f"Original task prompt:\n{task}\n") 233 | print(f"Specified task prompt:\n{specified_task}\n") 234 | 235 | chat_turn_limit, n = 30, 0 236 | while n < chat_turn_limit: 237 | n += 1 238 | user_ai_msg = user_agent.step(assistant_msg) 239 | user_msg = HumanMessage(content=user_ai_msg) 240 | # print(f"AI User ({user_role_name}):\n\n{user_msg}\n\n") 241 | message( 242 | f"AI User ({user_role_name}):\n\n{user_msg.content}\n\n", 243 | is_user=True, 244 | allow_html=True, 245 | key=str(n) + "_user", 246 | ) 247 | 248 | assistant_ai_msg = assistant_agent.step(user_msg) 249 | assistant_msg = HumanMessage(content=assistant_ai_msg) 250 | # print(f"AI Assistant ({assistant_role_name}):\n\n{assistant_msg}\n\n") 251 | message( 252 | f"AI Assistant ({assistant_role_name}):\n\n{assistant_msg.content}\n\n", 253 | is_user=False, 254 | allow_html=True, 255 | key=str(n) + "_assistant", 256 | avatar_style="pixel-art", 257 | ) 258 | if ( 259 | "" in user_msg.content 260 | or "task completed" in user_msg.content 261 | ): 262 | message("Task completed!", allow_html=True, key="task_done") 263 | break 264 | if "Error" in user_msg.content: 265 | message("Task failed!", allow_html=True, key="task_failed") 266 | break 267 | -------------------------------------------------------------------------------- /Embedding/HuggingFaceEmbedding.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from retry import retry 3 | import numpy as np 4 | import os 5 | 6 | 7 | #read from env the hf token 8 | if os.environ.get("HUGGINGFACEHUB_API_TOKEN") is not None: 9 | hf_token = os.environ.get("HUGGINGFACEHUB_API_TOKEN") 10 | else: 11 | raise Exception("You must provide the huggingface token") 12 | 13 | # model_id = "sentence-transformers/all-MiniLM-L6-v2" NOT WORKING FROM 10/05/2023 14 | model_id = "obrizum/all-MiniLM-L6-v2" 15 | api_url = f"https://api-inference.huggingface.co/pipeline/feature-extraction/{model_id}" 16 | headers = {"Authorization": f"Bearer {hf_token}"} 17 | 18 | 19 | def reshape_array(arr): 20 | # create an array of zeros with shape (1536) 21 | new_arr = np.zeros((1536,)) 22 | # copy the original array into the new array 23 | new_arr[:arr.shape[0]] = arr 24 | # return the new array 25 | return new_arr 26 | 27 | @retry(tries=3, delay=10) 28 | def newEmbeddings(texts): 29 | response = requests.post(api_url, headers=headers, json={"inputs": texts, "options":{"wait_for_model":True}}) 30 | result = response.json() 31 | if isinstance(result, list): 32 | return result 33 | elif list(result.keys())[0] == "error": 34 | raise RuntimeError( 35 | "The model is currently loading, please re-run the query." 36 | ) 37 | 38 | def newEmbeddingFunction(texts): 39 | embeddings = newEmbeddings(texts) 40 | embeddings = np.array(embeddings, dtype=np.float32) 41 | shaped_embeddings = reshape_array(embeddings) 42 | return shaped_embeddings 43 | -------------------------------------------------------------------------------- /FreeLLM/BardChatAPI.py: -------------------------------------------------------------------------------- 1 | from Bard import Chatbot 2 | import asyncio 3 | 4 | import requests 5 | from langchain.llms.base import LLM 6 | from typing import Optional, List, Mapping, Any 7 | import pydantic 8 | import os 9 | from langchain import PromptTemplate, LLMChain 10 | from time import sleep 11 | 12 | 13 | 14 | class BardChat(LLM): 15 | 16 | history_data: Optional[List] = [] 17 | cookie : Optional[str] 18 | chatbot : Optional[Chatbot] = None 19 | 20 | 21 | @property 22 | def _llm_type(self) -> str: 23 | return "custom" 24 | 25 | async def call(self, prompt: str, stop: Optional[List[str]] = None) -> str: 26 | if stop is not None: 27 | pass 28 | #raise ValueError("stop kwargs are not permitted.") 29 | #cookie is a must check 30 | if self.chatbot is None: 31 | if self.cookie is None: 32 | raise ValueError("Need a COOKIE , check https://github.com/acheong08/EdgeGPT/tree/master#getting-authentication-required for get your COOKIE AND SAVE") 33 | else: 34 | #if self.chatbot == None: 35 | self.chatbot = Chatbot(self.cookie) 36 | 37 | response = self.chatbot.ask(prompt) 38 | #print(response) 39 | response_text = response['content'] 40 | #add to history 41 | self.history_data.append({"prompt":prompt,"response":response_text}) 42 | 43 | return response_text 44 | 45 | def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: 46 | return asyncio.run(self.call(prompt=prompt)) 47 | 48 | @property 49 | def _identifying_params(self) -> Mapping[str, Any]: 50 | """Get the identifying parameters.""" 51 | return {"model": "BardCHAT", "cookie": self.cookie} 52 | 53 | 54 | 55 | #llm = BardChat(cookie = "YOURCOOKIE") #for start new chat 56 | 57 | #print(llm("Hello, how are you?")) 58 | #print(llm("what is AI?")) 59 | #print(llm("Can you resume your previus answer?")) #now memory work well 60 | -------------------------------------------------------------------------------- /FreeLLM/BingChatAPI.py: -------------------------------------------------------------------------------- 1 | from EdgeGPT import Chatbot, ConversationStyle 2 | import asyncio 3 | 4 | import requests 5 | from langchain.llms.base import LLM 6 | from typing import Optional, List, Mapping, Any 7 | import pydantic 8 | import os 9 | from langchain import PromptTemplate, LLMChain 10 | from time import sleep 11 | 12 | 13 | 14 | class BingChat(LLM): 15 | 16 | history_data: Optional[List] = [] 17 | cookiepath : Optional[str] 18 | chatbot : Optional[Chatbot] = None 19 | conversation_style : Optional[str] 20 | conversation_style_on : Optional[ConversationStyle] = ConversationStyle.precise 21 | search_result : Optional[bool] = False 22 | 23 | @property 24 | def _llm_type(self) -> str: 25 | return "custom" 26 | 27 | def select_conversation(self, conversation_style: str): 28 | if conversation_style == "precise": 29 | self.conversation_style_on = ConversationStyle.precise 30 | elif conversation_style == "creative": 31 | self.conversation_style_on = ConversationStyle.creative 32 | elif conversation_style == "balanced": 33 | self.conversation_style_on = ConversationStyle.balanced 34 | else: 35 | raise ValueError("conversation_style must be precise, creative or balaced") 36 | self.conversation_style = conversation_style 37 | 38 | async def call(self, prompt: str, stop: Optional[List[str]] = None) -> str: 39 | if stop is not None: 40 | raise ValueError("stop kwargs are not permitted.") 41 | #cookiepath is a must check 42 | if self.chatbot is None: 43 | if self.cookiepath is None: 44 | raise ValueError("Need a COOKIE , check https://github.com/acheong08/EdgeGPT/tree/master#getting-authentication-required for get your COOKIE AND SAVE") 45 | else: 46 | #if self.chatbot == None: 47 | self.chatbot = await Chatbot.create(cookie_path=self.cookiepath) 48 | 49 | if self.conversation_style is not None: 50 | self.conversation_style_on = self.select_conversation(self.conversation_style) 51 | 52 | response = await self.chatbot.ask(prompt=prompt, conversation_style=self.conversation_style, search_result=self.search_result) 53 | """ 54 | this is a sample response. 55 | {'type': 2, 'invocationId': '0', 56 | 'item': {'messages': [{'text': 'Hello, how are you?', 'author': 'user', 'from': {'id': '985157152860707', 'name': None}, 'createdAt': '2023-05-03T19:51:39.5491558+00:00', 'timestamp': '2023-05-03T19:51:39.5455787+00:00', 'locale': 'en-us', 'market': 'en-us', 'region': 'us', 'messageId': '87f90c57-b2ad-4b3a-b24f-99f633f5332f', 'requestId': '87f90c57-b2ad-4b3a-b24f-99f633f5332f', 'nlu': {'scoredClassification': {'classification': 'CHAT_GPT', 'score': None}, 'classificationRanking': [{'classification': 'CHAT_GPT', 'score': None}], 'qualifyingClassifications': None, 'ood': None, 'metaData': None, 'entities': None}, 'offense': 'None', 'feedback': {'tag': None, 'updatedOn': None, 'type': 'None'}, 'contentOrigin': 'cib', 'privacy': None, 'inputMethod': 'Keyboard'}, {'text': "Hello! I'm doing well, thank you. How can I assist you today?", 'author': 'bot', 'createdAt': '2023-05-03T19:51:41.5176164+00:00', 'timestamp': '2023-05-03T19:51:41.5176164+00:00', 'messageId': '1d013e71-408b-4031-a131-2f5c009fe938', 'requestId': '87f90c57-b2ad-4b3a-b24f-99f633f5332f', 'offense': 'None', 'adaptiveCards': [{'type': 'AdaptiveCard', 'version': '1.0', 'body': [{'type': 'TextBlock', 'text': "Hello! I'm doing well, thank you. How can I assist you today?\n", 'wrap': True}]}], 57 | 'sourceAttributions': [], 58 | 'feedback': {'tag': None, 'updatedOn': None, 'type': 'None'}, 59 | 'contentOrigin': 'DeepLeo', 60 | 'privacy': None, 61 | 'suggestedResponses': [{'text': 'What is the weather like today?', 'author': 'user', 'createdAt': '2023-05-03T19:51:42.7502696+00:00', 'timestamp': '2023-05-03T19:51:42.7502696+00:00', 'messageId': 'cd7a84d3-f9bc-47ff-9897-077b2de12e21', 'messageType': 'Suggestion', 'offense': 'Unknown', 'feedback': {'tag': None, 'updatedOn': None, 'type': 'None'}, 'contentOrigin': 'DeepLeo', 'privacy': None}, {'text': 'What is the latest news?', 'author': 'user', 'createdAt': '2023-05-03T19:51:42.7502739+00:00', 'timestamp': '2023-05-03T19:51:42.7502739+00:00', 'messageId': 'b611632a-9a8e-42de-86eb-8eb3b7b8ddbb', 'messageType': 'Suggestion', 'offense': 'Unknown', 'feedback': {'tag': None, 'updatedOn': None, 'type': 'None'}, 'contentOrigin': 'DeepLeo', 'privacy': None}, {'text': 'Tell me a joke.', 'author': 'user', 'createdAt': '2023-05-03T19:51:42.7502743+00:00', 'timestamp': '2023-05-03T19:51:42.7502743+00:00', 'messageId': '70232e45-d7e8-4d77-83fc-752b3cd3355c', 'messageType': 'Suggestion', 'offense': 'Unknown', 'feedback': {'tag': None, 'updatedOn': None, 'type': 'None'}, 'contentOrigin': 'DeepLeo', 'privacy': None}], 'spokenText': 'How can I assist you today?'}], 'firstNewMessageIndex': 1, 'defaultChatName': None, 'conversationId': '51D|BingProd|3E1274E188350D7BE273FFE95E02DD2984DAB52F95260300D0A2937162F98FDA', 'requestId': '87f90c57-b2ad-4b3a-b24f-99f633f5332f', 'conversationExpiryTime': '2023-05-04T01:51:42.8260286Z', 'shouldInitiateConversation': True, 'telemetry': {'metrics': None, 'startTime': '2023-05-03T19:51:39.5456555Z'}, 'throttling': {'maxNumUserMessagesInConversation': 20, 'numUserMessagesInConversation': 1}, 'result': {'value': 'Success', 'serviceVersion': '20230501.30'}}} 62 | """ 63 | response_messages = response.get("item", {}).get("messages", []) 64 | response_text = response_messages[1].get("text", "") 65 | 66 | if response_text == "": 67 | hidden_text = response_messages[1].get("hiddenText", "") 68 | print(">>>> [DEBBUGGER] hidden_text = " + str(hidden_text) + " [DEBBUGGER] <<<<") 69 | print(">>>> [DEBBUGGER] BING CHAT dont is open Like CHATGPT , BingCHAT have refused to respond. [DEBBUGGER] <<<<") 70 | response_text = hidden_text 71 | """ 72 | # reset the chatbot and remake the call 73 | print("[DEBUGGER] Chatbot failed to respond. Resetting and trying again. [DEBUGGER]") 74 | print("[ INFO DEBUGGER ] \n\n" + str(response) + "\n\n\n") 75 | sleep(10) 76 | self.chatbot = await Chatbot.create(cookie_path=self.cookiepath) 77 | sleep(2) 78 | response = await self.chatbot.ask(prompt=prompt) 79 | response_messages = response.get("item", {}).get("messages", []) 80 | response_text = response_messages[1].get("text", "") 81 | """ 82 | 83 | #add to history 84 | self.history_data.append({"prompt":prompt,"response":response_text}) 85 | 86 | return response_text 87 | 88 | def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: 89 | return asyncio.run(self.call(prompt=prompt)) 90 | 91 | @property 92 | def _identifying_params(self) -> Mapping[str, Any]: 93 | """Get the identifying parameters.""" 94 | return {"model": "BingCHAT", "cookiepath": self.cookiepath} 95 | 96 | 97 | 98 | #llm = BingChat(cookiepath = "YOUR-COOKIE") #for start new chat 99 | #llm = BingChat(cookiepath = "YOUR-COOKIE", conversation_style = "precise") #precise, creative or balaced 100 | #llm = BingChat(cookiepath = "YOUR-COOKIE" , conversation_style = "precise" , search_result=True) #with web access 101 | 102 | #print(llm("Hello, how are you?")) 103 | #print(llm("what is AI?")) 104 | #print(llm("Can you resume your previus answer?")) #now memory work well 105 | -------------------------------------------------------------------------------- /FreeLLM/ChatGPTAPI.py: -------------------------------------------------------------------------------- 1 | from gpt4_openai import GPT4OpenAI 2 | from langchain.llms.base import LLM 3 | from typing import Optional, List, Mapping, Any 4 | from time import sleep 5 | 6 | 7 | 8 | class ChatGPT(LLM): 9 | 10 | history_data: Optional[List] = [] 11 | token : Optional[str] 12 | chatbot : Optional[GPT4OpenAI] = None 13 | call : int = 0 14 | model : str = "gpt-3" # or gpt-4 15 | plugin_id : Optional[List] = [] 16 | 17 | #### WARNING : for each api call this library will create a new chat on chat.openai.com 18 | 19 | 20 | @property 21 | def _llm_type(self) -> str: 22 | return "custom" 23 | 24 | def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: 25 | if stop is not None: 26 | pass 27 | #raise ValueError("stop kwargs are not permitted.") 28 | #token is a must check 29 | if self.chatbot is None: 30 | if self.token is None: 31 | raise ValueError("Need a token , check https://chat.openai.com/api/auth/session for get your token") 32 | else: 33 | try: 34 | if self.plugin_id == []: 35 | self.chatbot = GPT4OpenAI(token=self.token, model=self.model) 36 | else: 37 | self.chatbot = GPT4OpenAI(token=self.token, model=self.model, plugin_ids=self.plugin_id) 38 | except: 39 | raise ValueError("Error on create chatbot, check your token, or your model") 40 | 41 | response = "" 42 | # OpenAI: 50 requests / hour for each account 43 | if (self.call >= 45 and self.model == "default") or (self.call >= 23 and self.model == "gpt4"): 44 | raise ValueError("You have reached the maximum number of requests per hour ! Help me to Improve. Abusing this tool is at your own risk") 45 | else: 46 | sleep(2) 47 | response = self.chatbot(prompt) 48 | 49 | self.call += 1 50 | 51 | #add to history 52 | self.history_data.append({"prompt":prompt,"response":response}) 53 | 54 | return response 55 | 56 | @property 57 | def _identifying_params(self) -> Mapping[str, Any]: 58 | """Get the identifying parameters.""" 59 | return {"model": "ChatGPT", "token": self.token, "model": self.model} 60 | 61 | 62 | 63 | #llm = ChatGPT(token = "YOUR-COOKIE") #for start new chat 64 | 65 | #llm = ChatGPT(token = "YOUR-COOKIE" , model="gpt4") # REQUIRED CHATGPT PLUS subscription 66 | 67 | #llm = ChatGPT(token = "YOUR-COOKIE", conversation = "Add-XXXX-XXXX-Convesation-ID") #for use a chat already started 68 | 69 | #print(llm("Hello, how are you?")) 70 | #print(llm("what is AI?")) 71 | #print(llm("Can you resume your previus answer?")) #now memory work well 72 | -------------------------------------------------------------------------------- /FreeLLM/HuggingChatAPI.py: -------------------------------------------------------------------------------- 1 | 2 | from hugchat import hugchat 3 | from hugchat.login import Login 4 | from langchain.llms.base import LLM 5 | from typing import Optional, List, Mapping, Any 6 | from time import sleep 7 | 8 | 9 | 10 | class HuggingChat(LLM): 11 | 12 | history_data: Optional[List] = [] 13 | chatbot : Optional[hugchat.ChatBot] = None 14 | conversation : Optional[str] = "" 15 | email : Optional[str] 16 | psw : Optional[str] 17 | #### WARNING : for each api call this library will create a new chat on chat.openai.com 18 | 19 | 20 | @property 21 | def _llm_type(self) -> str: 22 | return "custom" 23 | 24 | def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: 25 | if stop is not None: 26 | pass 27 | #raise ValueError("stop kwargs are not permitted.") 28 | #token is a must check 29 | if self.chatbot is None: 30 | if self.email is None and self.psw is None: 31 | ValueError("Email and Password is required, pls check the documentation on github") 32 | else: 33 | if self.conversation == "": 34 | sign = Login(self.email, self.psw) 35 | cookies = sign.login() 36 | 37 | # Save cookies to usercookies/.json 38 | sign.saveCookies() 39 | 40 | # Create a ChatBot 41 | self.chatbot = hugchat.ChatBot(cookies=cookies.get_dict()) 42 | else: 43 | raise ValueError("Something went wrong") 44 | 45 | 46 | sleep(2) 47 | data = self.chatbot.chat(prompt, temperature=0.5, stream=False) 48 | 49 | 50 | #add to history 51 | self.history_data.append({"prompt":prompt,"response":data}) 52 | 53 | return data 54 | 55 | @property 56 | def _identifying_params(self) -> Mapping[str, Any]: 57 | """Get the identifying parameters.""" 58 | return {"model": "HuggingCHAT"} 59 | 60 | 61 | 62 | #llm = HuggingChat(email = "YOUR-COOKIES-PATH" , psw ) #for start new chat 63 | 64 | 65 | #print(llm("Hello, how are you?")) 66 | #print(llm("what is AI?")) 67 | #print(llm("Can you resume your previus answer?")) #now memory work well 68 | 69 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 IntelligenzaArtificiale 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MetaPrompt.py: -------------------------------------------------------------------------------- 1 | import json 2 | from dotenv import load_dotenv 3 | from pathlib import Path 4 | from json import JSONDecodeError 5 | from langchain import LLMChain, PromptTemplate 6 | from FreeLLM import ChatGPTAPI # FREE CHATGPT API 7 | from FreeLLM import HuggingChatAPI # FREE HUGGINGCHAT API 8 | from FreeLLM import BingChatAPI # FREE BINGCHAT API 9 | from FreeLLM import BardChatAPI # FREE GOOGLE BARD API 10 | 11 | from langchain.memory import ConversationBufferWindowMemory 12 | import os 13 | 14 | load_dotenv() 15 | 16 | #### LOG IN FOR CHATGPT FREE LLM 17 | select_model = input( 18 | "Select the model you want to use (1, 2, 3 or 4) \n \ 19 | 1) ChatGPT \n \ 20 | 2) HuggingChat \n \ 21 | 3) BingChat \n \ 22 | 4) Google Bard \n \ 23 | >>> " 24 | ) 25 | 26 | if select_model == "1": 27 | CG_TOKEN = os.getenv("CHATGPT_TOKEN", "your-chatgpt-token") 28 | 29 | if CG_TOKEN != "your-chatgpt-token": 30 | os.environ["CHATGPT_TOKEN"] = CG_TOKEN 31 | else: 32 | raise ValueError( 33 | "ChatGPT Token EMPTY. Edit the .env file and put your ChatGPT token" 34 | ) 35 | 36 | start_chat = os.getenv("USE_EXISTING_CHAT", False) 37 | if os.getenv("USE_GPT4") == "True": 38 | model = "gpt4" 39 | else: 40 | model = "default" 41 | 42 | if start_chat: 43 | chat_id = os.getenv("CHAT_ID") 44 | if chat_id == None: 45 | raise ValueError("You have to set up your chat-id in the .env file") 46 | llm = ChatGPTAPI.ChatGPT( 47 | token=os.environ["CHATGPT_TOKEN"], conversation=chat_id, model=model 48 | ) 49 | else: 50 | llm = ChatGPTAPI.ChatGPT(token=os.environ["CHATGPT_TOKEN"], model=model) 51 | 52 | elif select_model == "2": 53 | emailHF = os.getenv("emailHF", "your-emailHF") 54 | pswHF = os.getenv("pswHF", "your-pswHF") 55 | if emailHF != "your-emailHF" or pswHF != "your-pswHF": 56 | os.environ["emailHF"] = emailHF 57 | os.environ["pswHF"] = pswHF 58 | else: 59 | raise ValueError( 60 | "HuggingChat Token EMPTY. Edit the .env file and put your HuggingChat credentials" 61 | ) 62 | 63 | llm = HuggingChatAPI.HuggingChat(email=os.environ["emailHF"], psw=os.environ["pswHF"]) 64 | 65 | elif select_model == "3": 66 | if not os.path.exists("cookiesBing.json"): 67 | raise ValueError( 68 | "File 'cookiesBing.json' not found! Create it and put your cookies in there in the JSON format." 69 | ) 70 | cookie_path = Path() / "cookiesBing.json" 71 | with open("cookiesBing.json", "r") as file: 72 | try: 73 | file_json = json.loads(file.read()) 74 | except JSONDecodeError: 75 | raise ValueError( 76 | "You did not put your cookies inside 'cookiesBing.json'! You can find the simple guide to get the cookie file here: https://github.com/acheong08/EdgeGPT/tree/master#getting-authentication-required." 77 | ) 78 | llm = BingChatAPI.BingChat( 79 | cookiepath=str(cookie_path), conversation_style="creative" 80 | ) 81 | 82 | elif select_model == "4": 83 | GB_TOKEN = os.getenv("BARDCHAT_TOKEN", "your-googlebard-token") 84 | 85 | if GB_TOKEN != "your-googlebard-token": 86 | os.environ["BARDCHAT_TOKEN"] = GB_TOKEN 87 | else: 88 | raise ValueError( 89 | "GoogleBard Token EMPTY. Edit the .env file and put your GoogleBard token" 90 | ) 91 | cookie_path = os.environ["BARDCHAT_TOKEN"] 92 | llm = BardChatAPI.BardChat(cookie=cookie_path) 93 | 94 | 95 | #### 96 | 97 | 98 | def initialize_chain(instructions, memory=None): 99 | if memory is None: 100 | memory = ConversationBufferWindowMemory() 101 | memory.ai_prefix = "Assistant" 102 | 103 | template = f""" 104 | Instructions: {instructions} 105 | {{{memory.memory_key}}} 106 | Human: {{human_input}} 107 | Assistant:""" 108 | 109 | prompt = PromptTemplate( 110 | input_variables=["history", "human_input"], template=template 111 | ) 112 | 113 | chain = LLMChain( 114 | llm=llm, 115 | prompt=prompt, 116 | verbose=True, 117 | memory=ConversationBufferWindowMemory(), 118 | ) 119 | return chain 120 | 121 | 122 | def initialize_meta_chain(): 123 | meta_template = """ 124 | Assistant has just had the below interactions with a User. Assistant followed their "Instructions" closely. Your job is to critique the Assistant's performance and then revise the Instructions so that Assistant would quickly and correctly respond in the future. 125 | 126 | #### 127 | 128 | {chat_history} 129 | 130 | #### 131 | 132 | Please reflect on these interactions. 133 | 134 | You should first critique Assistant's performance. What could Assistant have done better? What should the Assistant remember about this user? Are there things this user always wants? Indicate this with "Critique: ...". 135 | 136 | You should next revise the Instructions so that Assistant would quickly and correctly respond in the future. Assistant's goal is to satisfy the user in as few interactions as possible. Assistant will only see the new Instructions, not the interaction history, so anything important must be summarized in the Instructions. Don't forget any important details in the current Instructions! Indicate the new Instructions by "Instructions: ...". 137 | """ 138 | 139 | meta_prompt = PromptTemplate( 140 | input_variables=["chat_history"], template=meta_template 141 | ) 142 | 143 | meta_chain = LLMChain( 144 | llm=llm, 145 | prompt=meta_prompt, 146 | verbose=True, 147 | ) 148 | return meta_chain 149 | 150 | 151 | def get_chat_history(chain_memory): 152 | memory_key = chain_memory.memory_key 153 | chat_history = chain_memory.load_memory_variables(memory_key)[memory_key] 154 | return chat_history 155 | 156 | 157 | def get_new_instructions(meta_output): 158 | delimiter = "Instructions: " 159 | new_instructions = meta_output[meta_output.find(delimiter) + len(delimiter) :] 160 | return new_instructions 161 | 162 | 163 | def main(task, max_iters=3, max_meta_iters=5): 164 | failed_phrase = "task failed" 165 | success_phrase = "task succeeded" 166 | key_phrases = [success_phrase, failed_phrase] 167 | 168 | instructions = "None" 169 | for i in range(max_meta_iters): 170 | print(f"[Episode {i+1}/{max_meta_iters}]") 171 | chain = initialize_chain(instructions, memory=None) 172 | output = chain.predict(human_input=task) 173 | for j in range(max_iters): 174 | print(f"(Step {j+1}/{max_iters})") 175 | print(f"Assistant: {output}") 176 | print(f"Human: ") 177 | human_input = input() 178 | if any(phrase in human_input.lower() for phrase in key_phrases): 179 | break 180 | output = chain.predict(human_input=human_input) 181 | if success_phrase in human_input.lower(): 182 | print(f"You succeeded! Thanks for playing!") 183 | return 184 | meta_chain = initialize_meta_chain() 185 | meta_output = meta_chain.predict(chat_history=get_chat_history(chain.memory)) 186 | print(f"Feedback: {meta_output}") 187 | instructions = get_new_instructions(meta_output) 188 | print(f"New Instructions: {instructions}") 189 | print("\n" + "#" * 80 + "\n") 190 | print(f"You failed! Thanks for playing!") 191 | 192 | 193 | task = input("Enter the objective of the AI system: (Be realistic!) ") 194 | max_iters = int(input("Enter the maximum number of interactions per episode: ")) 195 | max_meta_iters = int(input("Enter the maximum number of episodes: ")) 196 | main(task, max_iters, max_meta_iters) 197 | -------------------------------------------------------------------------------- /OtherAgent/FreeLLM/BardChatAPI.py: -------------------------------------------------------------------------------- 1 | from Bard import Chatbot 2 | import asyncio 3 | 4 | import requests 5 | from langchain.llms.base import LLM 6 | from typing import Optional, List, Mapping, Any 7 | import pydantic 8 | import os 9 | from langchain import PromptTemplate, LLMChain 10 | from time import sleep 11 | 12 | 13 | 14 | class BardChat(LLM): 15 | 16 | history_data: Optional[List] = [] 17 | cookie : Optional[str] 18 | chatbot : Optional[Chatbot] = None 19 | 20 | 21 | @property 22 | def _llm_type(self) -> str: 23 | return "custom" 24 | 25 | async def call(self, prompt: str, stop: Optional[List[str]] = None) -> str: 26 | if stop is not None: 27 | pass 28 | #raise ValueError("stop kwargs are not permitted.") 29 | #cookie is a must check 30 | if self.chatbot is None: 31 | if self.cookie is None: 32 | raise ValueError("Need a COOKIE , check https://github.com/acheong08/EdgeGPT/tree/master#getting-authentication-required for get your COOKIE AND SAVE") 33 | else: 34 | #if self.chatbot == None: 35 | self.chatbot = Chatbot(self.cookie) 36 | 37 | response = self.chatbot.ask(prompt) 38 | #print(response) 39 | response_text = response['content'] 40 | #add to history 41 | self.history_data.append({"prompt":prompt,"response":response_text}) 42 | 43 | return response_text 44 | 45 | def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: 46 | return asyncio.run(self.call(prompt=prompt)) 47 | 48 | @property 49 | def _identifying_params(self) -> Mapping[str, Any]: 50 | """Get the identifying parameters.""" 51 | return {"model": "BardCHAT", "cookie": self.cookie} 52 | 53 | 54 | 55 | #llm = BardChat(cookie = "YOURCOOKIE") #for start new chat 56 | 57 | #print(llm("Hello, how are you?")) 58 | #print(llm("what is AI?")) 59 | #print(llm("Can you resume your previus answer?")) #now memory work well 60 | -------------------------------------------------------------------------------- /OtherAgent/FreeLLM/BingChatAPI.py: -------------------------------------------------------------------------------- 1 | from EdgeGPT import Chatbot, ConversationStyle 2 | import asyncio 3 | 4 | import requests 5 | from langchain.llms.base import LLM 6 | from typing import Optional, List, Mapping, Any 7 | import pydantic 8 | import os 9 | from langchain import PromptTemplate, LLMChain 10 | from time import sleep 11 | 12 | 13 | 14 | class BingChat(LLM): 15 | 16 | history_data: Optional[List] = [] 17 | cookiepath : Optional[str] 18 | chatbot : Optional[Chatbot] = None 19 | conversation_style : Optional[str] 20 | conversation_style_on : Optional[ConversationStyle] = ConversationStyle.precise 21 | search_result : Optional[bool] = False 22 | 23 | @property 24 | def _llm_type(self) -> str: 25 | return "custom" 26 | 27 | def select_conversation(self, conversation_style: str): 28 | if conversation_style == "precise": 29 | self.conversation_style_on = ConversationStyle.precise 30 | elif conversation_style == "creative": 31 | self.conversation_style_on = ConversationStyle.creative 32 | elif conversation_style == "balanced": 33 | self.conversation_style_on = ConversationStyle.balanced 34 | else: 35 | raise ValueError("conversation_style must be precise, creative or balaced") 36 | self.conversation_style = conversation_style 37 | 38 | async def call(self, prompt: str, stop: Optional[List[str]] = None) -> str: 39 | if stop is not None: 40 | pass 41 | #raise ValueError("stop kwargs are not permitted.") 42 | #cookiepath is a must check 43 | if self.chatbot is None: 44 | if self.cookiepath is None: 45 | raise ValueError("Need a COOKIE , check https://github.com/acheong08/EdgeGPT/tree/master#getting-authentication-required for get your COOKIE AND SAVE") 46 | else: 47 | #if self.chatbot == None: 48 | self.chatbot = await Chatbot.create(cookie_path=self.cookiepath) 49 | 50 | if self.conversation_style is not None: 51 | self.conversation_style_on = self.select_conversation(self.conversation_style) 52 | 53 | response = await self.chatbot.ask(prompt=prompt, conversation_style=self.conversation_style, search_result=self.search_result) 54 | response_messages = response.get("item", {}).get("messages", []) 55 | response_text = response_messages[1].get("text", "") 56 | 57 | if response_text == "": 58 | hidden_text = response_messages[1].get("hiddenText", "") 59 | print(">>>> [DEBBUGGER] hidden_text = " + str(hidden_text) + " [DEBBUGGER] <<<<") 60 | print(">>>> [DEBBUGGER] BING CHAT dont is open Like CHATGPT , BingCHAT have refused to respond. [DEBBUGGER] <<<<") 61 | response_text = hidden_text 62 | """ 63 | # reset the chatbot and remake the call 64 | print("[DEBUGGER] Chatbot failed to respond. Resetting and trying again. [DEBUGGER]") 65 | print("[ INFO DEBUGGER ] \n\n" + str(response) + "\n\n\n") 66 | sleep(10) 67 | self.chatbot = await Chatbot.create(cookie_path=self.cookiepath) 68 | sleep(2) 69 | response = await self.chatbot.ask(prompt=prompt) 70 | response_messages = response.get("item", {}).get("messages", []) 71 | response_text = response_messages[1].get("text", "") 72 | """ 73 | 74 | #add to history 75 | self.history_data.append({"prompt":prompt,"response":response_text}) 76 | 77 | return response_text 78 | 79 | def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: 80 | return asyncio.run(self.call(prompt=prompt)) 81 | 82 | @property 83 | def _identifying_params(self) -> Mapping[str, Any]: 84 | """Get the identifying parameters.""" 85 | return {"model": "BingCHAT", "cookiepath": self.cookiepath} 86 | 87 | 88 | 89 | #llm = BingChat(cookiepath = "YOUR-COOKIE") #for start new chat 90 | #llm = BingChat(cookiepath = "YOUR-COOKIE", conversation_style = "precise") #precise, creative or balaced 91 | #llm = BingChat(cookiepath = "YOUR-COOKIE" , conversation_style = "precise" , search_result=True) #with web access 92 | 93 | #print(llm("Hello, how are you?")) 94 | #print(llm("what is AI?")) 95 | #print(llm("Can you resume your previus answer?")) #now memory work well 96 | -------------------------------------------------------------------------------- /OtherAgent/FreeLLM/ChatGPTAPI.py: -------------------------------------------------------------------------------- 1 | from gpt4_openai import GPT4OpenAI 2 | from langchain.llms.base import LLM 3 | from typing import Optional, List, Mapping, Any 4 | from time import sleep 5 | 6 | 7 | 8 | class ChatGPT(LLM): 9 | 10 | history_data: Optional[List] = [] 11 | token : Optional[str] 12 | chatbot : Optional[GPT4OpenAI] = None 13 | call : int = 0 14 | model : str = "gpt-3" # or gpt-4 15 | plugin_id : Optional[List] = [] 16 | 17 | #### WARNING : for each api call this library will create a new chat on chat.openai.com 18 | 19 | 20 | @property 21 | def _llm_type(self) -> str: 22 | return "custom" 23 | 24 | def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: 25 | if stop is not None: 26 | pass 27 | #raise ValueError("stop kwargs are not permitted.") 28 | #token is a must check 29 | if self.chatbot is None: 30 | if self.token is None: 31 | raise ValueError("Need a token , check https://chat.openai.com/api/auth/session for get your token") 32 | else: 33 | try: 34 | if self.plugin_id == []: 35 | self.chatbot = GPT4OpenAI(token=self.token, model=self.model) 36 | else: 37 | self.chatbot = GPT4OpenAI(token=self.token, model=self.model, plugin_ids=self.plugin_id) 38 | except: 39 | raise ValueError("Error on create chatbot, check your token, or your model") 40 | 41 | response = "" 42 | # OpenAI: 50 requests / hour for each account 43 | if (self.call >= 45 and self.model == "default") or (self.call >= 23 and self.model == "gpt4"): 44 | raise ValueError("You have reached the maximum number of requests per hour ! Help me to Improve. Abusing this tool is at your own risk") 45 | else: 46 | sleep(2) 47 | response = self.chatbot(prompt) 48 | 49 | self.call += 1 50 | 51 | #add to history 52 | self.history_data.append({"prompt":prompt,"response":response}) 53 | 54 | return response 55 | 56 | @property 57 | def _identifying_params(self) -> Mapping[str, Any]: 58 | """Get the identifying parameters.""" 59 | return {"model": "ChatGPT", "token": self.token, "model": self.model} 60 | 61 | 62 | 63 | #llm = ChatGPT(token = "YOUR-COOKIE") #for start new chat 64 | 65 | #llm = ChatGPT(token = "YOUR-COOKIE" , model="gpt4") # REQUIRED CHATGPT PLUS subscription 66 | 67 | #llm = ChatGPT(token = "YOUR-COOKIE", conversation = "Add-XXXX-XXXX-Convesation-ID") #for use a chat already started 68 | 69 | #print(llm("Hello, how are you?")) 70 | #print(llm("what is AI?")) 71 | #print(llm("Can you resume your previus answer?")) #now memory work well 72 | -------------------------------------------------------------------------------- /OtherAgent/FreeLLM/HuggingChatAPI.py: -------------------------------------------------------------------------------- 1 | 2 | from hugchat import hugchat 3 | from hugchat.login import Login 4 | from langchain.llms.base import LLM 5 | from typing import Optional, List, Mapping, Any 6 | from time import sleep 7 | 8 | 9 | 10 | 11 | class HuggingChat(LLM): 12 | 13 | history_data: Optional[List] = [] 14 | chatbot : Optional[hugchat.ChatBot] = None 15 | conversation : Optional[str] = "" 16 | email : Optional[str] 17 | psw : Optional[str] 18 | #### WARNING : for each api call this library will create a new chat on chat.openai.com 19 | 20 | 21 | @property 22 | def _llm_type(self) -> str: 23 | return "custom" 24 | 25 | def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: 26 | if stop is not None: 27 | pass 28 | #raise ValueError("stop kwargs are not permitted.") 29 | #token is a must check 30 | if self.chatbot is None: 31 | if self.email is None and self.psw is None: 32 | ValueError("Email and Password is required, pls check the documentation on github") 33 | else: 34 | if self.conversation == "": 35 | sign = Login(self.email, self.psw) 36 | cookies = sign.login() 37 | 38 | # Save cookies to usercookies/.json 39 | sign.saveCookies() 40 | 41 | # Create a ChatBot 42 | self.chatbot = hugchat.ChatBot(cookies=cookies.get_dict()) 43 | else: 44 | raise ValueError("Something went wrong") 45 | 46 | 47 | sleep(2) 48 | data = self.chatbot.chat(prompt, temperature=0.5, stream=False) 49 | 50 | 51 | #add to history 52 | self.history_data.append({"prompt":prompt,"response":data}) 53 | 54 | return data 55 | 56 | @property 57 | def _identifying_params(self) -> Mapping[str, Any]: 58 | """Get the identifying parameters.""" 59 | return {"model": "HuggingCHAT"} 60 | 61 | 62 | 63 | #llm = HuggingChat(cookiepath = "YOUR-COOKIES-PATH") #for start new chat 64 | 65 | 66 | #print(llm("Hello, how are you?")) 67 | #print(llm("what is AI?")) 68 | #print(llm("Can you resume your previus answer?")) #now memory work well 69 | 70 | -------------------------------------------------------------------------------- /OtherAgent/Tool/browserQA.py: -------------------------------------------------------------------------------- 1 | # !pip install playwright 2 | # !playwright install 3 | async def async_load_playwright(url: str) -> str: 4 | """Load the specified URLs using Playwright and parse using BeautifulSoup.""" 5 | from bs4 import BeautifulSoup 6 | from playwright.async_api import async_playwright 7 | try: 8 | print(">>> WARNING <<<") 9 | print("If you are running this for the first time, you nedd to install playwright") 10 | print(">>> AUTO INSTALLING PLAYWRIGHT <<<") 11 | os.system("playwright install") 12 | print(">>> PLAYWRIGHT INSTALLED <<<") 13 | except: 14 | print(">>> PLAYWRIGHT ALREADY INSTALLED <<<") 15 | pass 16 | results = "" 17 | async with async_playwright() as p: 18 | browser = await p.chromium.launch(headless=True) 19 | try: 20 | page = await browser.new_page() 21 | await page.goto(url) 22 | 23 | page_source = await page.content() 24 | soup = BeautifulSoup(page_source, "html.parser") 25 | 26 | for script in soup(["script", "style"]): 27 | script.extract() 28 | 29 | text = soup.get_text() 30 | lines = (line.strip() for line in text.splitlines()) 31 | chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) 32 | results = "\n".join(chunk for chunk in chunks if chunk) 33 | except Exception as e: 34 | results = f"Error: {e}" 35 | await browser.close() 36 | return results 37 | 38 | def run_async(coro): 39 | event_loop = asyncio.get_event_loop() 40 | return event_loop.run_until_complete(coro) 41 | 42 | 43 | def browse_web_page(url: str) -> str: 44 | """Verbose way to scrape a whole webpage. Likely to cause issues parsing.""" 45 | return run_async(async_load_playwright(url)) 46 | 47 | 48 | from langchain.tools import BaseTool 49 | from langchain.tools import DuckDuckGoSearchRun 50 | from langchain.text_splitter import RecursiveCharacterTextSplitter 51 | 52 | from pydantic import Field 53 | from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain, BaseCombineDocumentsChain 54 | 55 | def _get_text_splitter(): 56 | return RecursiveCharacterTextSplitter( 57 | # Set a really small chunk size, just to show. 58 | chunk_size = 500, 59 | chunk_overlap = 20, 60 | length_function = len, 61 | ) 62 | 63 | 64 | class WebpageQATool(BaseTool): 65 | name = "query_webpage" 66 | description = "Browse a webpage and retrieve the information relevant to the question." 67 | text_splitter: RecursiveCharacterTextSplitter = Field(default_factory=_get_text_splitter) 68 | qa_chain: BaseCombineDocumentsChain 69 | 70 | def _run(self, url: str, question: str) -> str: 71 | """Useful for browsing websites and scraping the text information.""" 72 | result = browse_web_page.run(url) 73 | docs = [Document(page_content=result, metadata={"source": url})] 74 | web_docs = self.text_splitter.split_documents(docs) 75 | results = [] 76 | # TODO: Handle this with a MapReduceChain 77 | for i in range(0, len(web_docs), 4): 78 | input_docs = web_docs[i:i+4] 79 | window_result = self.qa_chain({"input_documents": input_docs, "question": question}, return_only_outputs=True) 80 | results.append(f"Response from window {i} - {window_result}") 81 | results_docs = [Document(page_content="\n".join(results), metadata={"source": url})] 82 | return self.qa_chain({"input_documents": results_docs, "question": question}, return_only_outputs=True) 83 | 84 | async def _arun(self, url: str, question: str) -> str: 85 | raise NotImplementedError 86 | 87 | -------------------------------------------------------------------------------- /OtherAgent/csvAgent.py: -------------------------------------------------------------------------------- 1 | import json 2 | from pathlib import Path 3 | from json import JSONDecodeError 4 | from langchain.agents import create_csv_agent 5 | from FreeLLM import ChatGPTAPI # FREE CHATGPT API 6 | from FreeLLM import HuggingChatAPI # FREE HUGGINGCHAT API 7 | from FreeLLM import BingChatAPI # FREE BINGCHAT API 8 | from FreeLLM import BardChatAPI # FREE GOOGLE BARD API 9 | 10 | from langchain.utilities import PythonREPL 11 | import os 12 | 13 | #### LOG IN FOR CHATGPT FREE LLM 14 | from dotenv import load_dotenv 15 | load_dotenv() 16 | 17 | select_model = input("Select the model you want to use (1, 2, 3 or 4) \n \ 18 | 1) ChatGPT \n \ 19 | 2) HuggingChat \n \ 20 | 3) BingChat \n \ 21 | 4) Google Bard \n \ 22 | >>> ") 23 | 24 | if select_model == "1": 25 | CG_TOKEN = os.getenv("CHATGPT_TOKEN", "your-chatgpt-token") 26 | 27 | if CG_TOKEN != "your-chatgpt-token": 28 | os.environ["CHATGPT_TOKEN"] = CG_TOKEN 29 | else: 30 | raise ValueError( 31 | "ChatGPT Token EMPTY. Edit the .env file and put your ChatGPT token" 32 | ) 33 | 34 | start_chat = os.getenv("USE_EXISTING_CHAT", False) 35 | if os.getenv("USE_GPT4") == "True": 36 | model = "gpt-4" 37 | else: 38 | model = "default" 39 | 40 | llm = ChatGPTAPI.ChatGPT(token=os.environ["CHATGPT_TOKEN"], model=model) 41 | 42 | elif select_model == "2": 43 | emailHF = os.getenv("emailHF", "your-emailHF") 44 | pswHF = os.getenv("pswHF", "your-pswHF") 45 | if emailHF != "your-emailHF" or pswHF != "your-pswHF": 46 | os.environ["emailHF"] = emailHF 47 | os.environ["pswHF"] = pswHF 48 | else: 49 | raise ValueError( 50 | "HuggingChat Token EMPTY. Edit the .env file and put your HuggingChat credentials" 51 | ) 52 | 53 | llm = HuggingChatAPI.HuggingChat(email=os.environ["emailHF"], psw=os.environ["pswHF"]) 54 | 55 | elif select_model == "3": 56 | if not os.path.exists("cookiesBing.json"): 57 | raise ValueError("File 'cookiesBing.json' not found! Create it and put your cookies in there in the JSON format.") 58 | cookie_path = Path() / "cookiesBing.json" 59 | with open("cookiesBing.json", 'r') as file: 60 | try: 61 | file_json = json.loads(file.read()) 62 | except JSONDecodeError: 63 | raise ValueError("You did not put your cookies inside 'cookiesBing.json'! You can find the simple guide to get the cookie file here: https://github.com/acheong08/EdgeGPT/tree/master#getting-authentication-required.") 64 | llm=BingChatAPI.BingChat(cookiepath=str(cookie_path), conversation_style="creative") 65 | 66 | elif select_model == "4": 67 | GB_TOKEN = os.getenv("BARDCHAT_TOKEN", "your-googlebard-token") 68 | 69 | if GB_TOKEN != "your-googlebard-token": 70 | os.environ["BARDCHAT_TOKEN"] = GB_TOKEN 71 | else: 72 | raise ValueError("GoogleBard Token EMPTY. Edit the .env file and put your GoogleBard token") 73 | cookie_path = os.environ["BARDCHAT_TOKEN"] 74 | llm=BardChatAPI.BardChat(cookie=cookie_path) 75 | 76 | #### 77 | 78 | path_csv = input("Enter the path of the csv file: ") or "OtherAgent/startup.csv" 79 | 80 | agent = create_csv_agent(llm=llm, tool=PythonREPL(), path=path_csv, verbose=True) 81 | 82 | #todo : ADD MEMORY 83 | 84 | 85 | print(">> START CSV AGENT") 86 | print("> Digit 'exit' for exit or 'your task or question' for start\n\n") 87 | prompt = input("(Enter your task or question) >> ") 88 | while prompt != "exit": 89 | agent.run(prompt) 90 | prompt = input("(Enter your task or question) >> ") 91 | 92 | -------------------------------------------------------------------------------- /OtherAgent/customAgent.py: -------------------------------------------------------------------------------- 1 | import json 2 | from pathlib import Path 3 | from json import JSONDecodeError 4 | from langchain.agents import initialize_agent 5 | from langchain.utilities import PythonREPL 6 | from langchain.utilities import WikipediaAPIWrapper 7 | from langchain.tools import BaseTool, DuckDuckGoSearchRun 8 | from langchain.tools.human.tool import HumanInputRun 9 | from FreeLLM import ChatGPTAPI # FREE CHATGPT API 10 | from FreeLLM import HuggingChatAPI # FREE HUGGINGCHAT API 11 | from FreeLLM import BingChatAPI # FREE BINGCHAT API 12 | from FreeLLM import BardChatAPI # FREE Google BArd API 13 | from langchain.agents import initialize_agent, Tool 14 | 15 | import os 16 | 17 | #### LOG IN FOR CHATGPT FREE LLM 18 | from dotenv import load_dotenv 19 | load_dotenv() 20 | select_model = input("Select the model you want to use (1, 2, 3 or 4) \n \ 21 | 1) ChatGPT \n \ 22 | 2) HuggingChat \n \ 23 | 3) BingChat \n \ 24 | 4) Google Bard \n \ 25 | >>> ") 26 | 27 | if select_model == "1": 28 | CG_TOKEN = os.getenv("CHATGPT_TOKEN", "your-chatgpt-token") 29 | 30 | if CG_TOKEN != "your-chatgpt-token": 31 | os.environ["CHATGPT_TOKEN"] = CG_TOKEN 32 | else: 33 | raise ValueError( 34 | "ChatGPT Token EMPTY. Edit the .env file and put your ChatGPT token" 35 | ) 36 | 37 | start_chat = os.getenv("USE_EXISTING_CHAT", False) 38 | if os.getenv("USE_GPT4") == "True": 39 | model = "gpt-4" 40 | else: 41 | model = "default" 42 | 43 | llm = ChatGPTAPI.ChatGPT(token=os.environ["CHATGPT_TOKEN"], model=model) 44 | 45 | 46 | elif select_model == "2": 47 | emailHF = os.getenv("emailHF", "your-emailHF") 48 | pswHF = os.getenv("pswHF", "your-pswHF") 49 | if emailHF != "your-emailHF" or pswHF != "your-pswHF": 50 | os.environ["emailHF"] = emailHF 51 | os.environ["pswHF"] = pswHF 52 | else: 53 | raise ValueError( 54 | "HuggingChat Token EMPTY. Edit the .env file and put your HuggingChat credentials" 55 | ) 56 | 57 | llm = HuggingChatAPI.HuggingChat(email=os.environ["emailHF"], psw=os.environ["pswHF"]) 58 | 59 | elif select_model == "3": 60 | if not os.path.exists("cookiesBing.json"): 61 | raise ValueError("File 'cookiesBing.json' not found! Create it and put your cookies in there in the JSON format.") 62 | cookie_path = Path() / "cookiesBing.json" 63 | with open("cookiesBing.json", 'r') as file: 64 | try: 65 | file_json = json.loads(file.read()) 66 | except JSONDecodeError: 67 | raise ValueError("You did not put your cookies inside 'cookiesBing.json'! You can find the simple guide to get the cookie file here: https://github.com/acheong08/EdgeGPT/tree/master#getting-authentication-required.") 68 | llm=BingChatAPI.BingChat(cookiepath=str(cookie_path), conversation_style="creative") 69 | 70 | elif select_model == "4": 71 | GB_TOKEN = os.getenv("BARDCHAT_TOKEN", "your-googlebard-token") 72 | 73 | if GB_TOKEN != "your-googlebard-token": 74 | os.environ["BARDCHAT_TOKEN"] = GB_TOKEN 75 | else: 76 | raise ValueError("GoogleBard Token EMPTY. Edit the .env file and put your GoogleBard token") 77 | cookie_path = os.environ["BARDCHAT_TOKEN"] 78 | llm=BardChatAPI.BardChat(cookie=cookie_path) 79 | 80 | #### 81 | 82 | wikipedia = WikipediaAPIWrapper() 83 | python_repl = PythonREPL() 84 | search = DuckDuckGoSearchRun() 85 | 86 | 87 | #from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain, BaseCombineDocumentsChain 88 | #from Tool import browserQA 89 | #query_website_tool = browserQA.WebpageQATool(qa_chain=load_qa_with_sources_chain(llm)) 90 | 91 | 92 | 93 | #define TOOLs 94 | 95 | tools = [ 96 | Tool( 97 | name = "python repl", 98 | func=python_repl.run, 99 | description="useful for when you need to use python to answer a question. You should input python code" 100 | ) 101 | ] 102 | 103 | wikipedia_tool = Tool( 104 | name='wikipedia', 105 | func= wikipedia.run, 106 | description="Useful for when you need to look up a topic, country or person on wikipedia" 107 | ) 108 | 109 | 110 | duckduckgo_tool = Tool( 111 | name='DuckDuckGo Search', 112 | func= search.run, 113 | description="Useful for when you need to do a search on the internet to find information that another tool can't find. be specific with your input." 114 | ) 115 | """ 116 | 117 | queryWebsite_tool = Tool( 118 | name= query_website_tool.name, 119 | func= query_website_tool.run, 120 | description= query_website_tool.description 121 | ) 122 | 123 | """ 124 | #human_input_tool = Tool( 125 | #name='human input', 126 | #func= HumanInputRun.run, 127 | #description="Useful for when you need to ask a human a question. be specific with your input." 128 | #) 129 | 130 | #Add here your tools 131 | #custom_tool = Tool( 132 | #name='custom tool', 133 | #func= custom_tool.run, 134 | #description="My fantasitc tool" 135 | #) 136 | 137 | 138 | tools.append(duckduckgo_tool) 139 | #tools.append(queryWebsite_tool) 140 | tools.append(wikipedia_tool) 141 | #tools.append(human_input_tool) 142 | #tools.append(custom_tool) 143 | 144 | 145 | #Create the Agent 146 | iteration = (int(input("Enter the number of iterations: ")) if input("Do you want to set the number of iterations? (y/n): ") == "y" else 3) 147 | 148 | zero_shot_agent = initialize_agent( 149 | agent="zero-shot-react-description", 150 | tools=tools, 151 | llm=llm, 152 | verbose=True, 153 | max_iterations=iteration, 154 | 155 | ) 156 | 157 | 158 | 159 | print(">> START CUSTOM AGENT") 160 | print("> Digit 'exit' for exit or 'your task or question' for start\n\n") 161 | prompt = input("(Enter your task or question) >> ") 162 | while prompt != "exit": 163 | zero_shot_agent.run(prompt) 164 | prompt = input("(Enter your task or question) >> ") 165 | -------------------------------------------------------------------------------- /OtherAgent/pythonAgent.py: -------------------------------------------------------------------------------- 1 | import json 2 | from pathlib import Path 3 | from json import JSONDecodeError 4 | from langchain.agents.agent_toolkits import create_python_agent 5 | from langchain.tools.python.tool import PythonREPLTool 6 | from FreeLLM import ChatGPTAPI # FREE CHATGPT API 7 | from FreeLLM import HuggingChatAPI # FREE HUGGINGCHAT API 8 | from FreeLLM import BingChatAPI # FREE BINGCHAT API 9 | from FreeLLM import BardChatAPI # FREE GOOGLE BARD API 10 | 11 | import os 12 | 13 | #### LOG IN FOR CHATGPT FREE LLM 14 | from dotenv import load_dotenv 15 | load_dotenv() 16 | 17 | select_model = input("Select the model you want to use (1, 2, 3 or 4) \n \ 18 | 1) ChatGPT \n \ 19 | 2) HuggingChat \n \ 20 | 3) BingChat \n \ 21 | 4) Google Bard \n \ 22 | >>> ") 23 | 24 | if select_model == "1": 25 | CG_TOKEN = os.getenv("CHATGPT_TOKEN", "your-chatgpt-token") 26 | 27 | if CG_TOKEN != "your-chatgpt-token": 28 | os.environ["CHATGPT_TOKEN"] = CG_TOKEN 29 | else: 30 | raise ValueError( 31 | "ChatGPT Token EMPTY. Edit the .env file and put your ChatGPT token" 32 | ) 33 | 34 | start_chat = os.getenv("USE_EXISTING_CHAT", False) 35 | if os.getenv("USE_GPT4") == "True": 36 | model = "gpt-4" 37 | else: 38 | model = "default" 39 | 40 | llm = ChatGPTAPI.ChatGPT(token=os.environ["CHATGPT_TOKEN"], model=model) 41 | 42 | elif select_model == "2": 43 | emailHF = os.getenv("emailHF", "your-emailHF") 44 | pswHF = os.getenv("pswHF", "your-pswHF") 45 | if emailHF != "your-emailHF" or pswHF != "your-pswHF": 46 | os.environ["emailHF"] = emailHF 47 | os.environ["pswHF"] = pswHF 48 | else: 49 | raise ValueError( 50 | "HuggingChat Token EMPTY. Edit the .env file and put your HuggingChat credentials" 51 | ) 52 | 53 | llm = HuggingChatAPI.HuggingChat(email=os.environ["emailHF"], psw=os.environ["pswHF"]) 54 | 55 | elif select_model == "3": 56 | if not os.path.exists("cookiesBing.json"): 57 | raise ValueError("File 'cookiesBing.json' not found! Create it and put your cookies in there in the JSON format.") 58 | cookie_path = Path() / "cookiesBing.json" 59 | with open("cookiesBing.json", 'r') as file: 60 | try: 61 | file_json = json.loads(file.read()) 62 | except JSONDecodeError: 63 | raise ValueError("You did not put your cookies inside 'cookiesBing.json'! You can find the simple guide to get the cookie file here: https://github.com/acheong08/EdgeGPT/tree/master#getting-authentication-required.") 64 | llm=BingChatAPI.BingChat(cookiepath=str(cookie_path), conversation_style="creative") 65 | 66 | elif select_model == "4": 67 | GB_TOKEN = os.getenv("BARDCHAT_TOKEN", "your-googlebard-token") 68 | 69 | if GB_TOKEN != "your-googlebard-token": 70 | os.environ["BARDCHAT_TOKEN"] = GB_TOKEN 71 | else: 72 | raise ValueError("GoogleBard Token EMPTY. Edit the .env file and put your GoogleBard token") 73 | cookie_path = os.environ["BARDCHAT_TOKEN"] 74 | llm=BardChatAPI.BardChat(cookie=cookie_path) 75 | 76 | 77 | 78 | #### 79 | 80 | 81 | 82 | agent_executor = create_python_agent( 83 | llm=llm, 84 | tool=PythonREPLTool(), 85 | verbose=True 86 | ) 87 | 88 | #todo : ADD MEMORY 89 | 90 | print(">> START Python AGENT") 91 | print("> Digit 'exit' for exit or 'your task or question' for start\n\n") 92 | prompt = input("(Enter your task or question) >> ") 93 | while prompt != "exit": 94 | agent_executor.run(prompt) 95 | prompt = input("(Enter your task or question) >> ") 96 | -------------------------------------------------------------------------------- /OtherAgent/startup.csv: -------------------------------------------------------------------------------- 1 | R&D Spend,Administration,Marketing Spend,State,Profit 2 | 165349.2,136897.8,471784.1,New York,192261.83 3 | 162597.7,151377.59,443898.53,California,191792.06 4 | 153441.51,101145.55,407934.54,Florida,191050.39 5 | 144372.41,118671.85,383199.62,New York,182901.99 6 | 142107.34,91391.77,366168.42,Florida,166187.94 7 | 131876.9,99814.71,362861.36,New York,156991.12 8 | 134615.46,147198.87,127716.82,California,156122.51 9 | 130298.13,145530.06,323876.68,Florida,155752.6 10 | 120542.52,148718.95,311613.29,New York,152211.77 11 | 123334.88,108679.17,304981.62,California,149759.96 12 | 101913.08,110594.11,229160.95,Florida,146121.95 13 | 100671.96,91790.61,249744.55,California,144259.4 14 | 93863.75,127320.38,249839.44,Florida,141585.52 15 | 91992.39,135495.07,252664.93,California,134307.35 16 | 119943.24,156547.42,256512.92,Florida,132602.65 17 | 114523.61,122616.84,261776.23,New York,129917.04 18 | 78013.11,121597.55,264346.06,California,126992.93 19 | 94657.16,145077.58,282574.31,New York,125370.37 20 | 91749.16,114175.79,294919.57,Florida,124266.9 21 | 86419.7,153514.11,0,New York,122776.86 22 | 76253.86,113867.3,298664.47,California,118474.03 23 | 78389.47,153773.43,299737.29,New York,111313.02 24 | 73994.56,122782.75,303319.26,Florida,110352.25 25 | 67532.53,105751.03,304768.73,Florida,108733.99 26 | 77044.01,99281.34,140574.81,New York,108552.04 27 | 64664.71,139553.16,137962.62,California,107404.34 28 | 75328.87,144135.98,134050.07,Florida,105733.54 29 | 72107.6,127864.55,353183.81,New York,105008.31 30 | 66051.52,182645.56,118148.2,Florida,103282.38 31 | 65605.48,153032.06,107138.38,New York,101004.64 32 | 61994.48,115641.28,91131.24,Florida,99937.59 33 | 61136.38,152701.92,88218.23,New York,97483.56 34 | 63408.86,129219.61,46085.25,California,97427.84 35 | 55493.95,103057.49,214634.81,Florida,96778.92 36 | 46426.07,157693.92,210797.67,California,96712.8 37 | 46014.02,85047.44,205517.64,New York,96479.51 38 | 28663.76,127056.21,201126.82,Florida,90708.19 39 | 44069.95,51283.14,197029.42,California,89949.14 40 | 20229.59,65947.93,185265.1,New York,81229.06 41 | 38558.51,82982.09,174999.3,California,81005.76 42 | 28754.33,118546.05,172795.67,California,78239.91 43 | 27892.92,84710.77,164470.71,Florida,77798.83 44 | 23640.93,96189.63,148001.11,California,71498.49 45 | 15505.73,127382.3,35534.17,New York,69758.98 46 | 22177.74,154806.14,28334.72,California,65200.33 47 | 1000.23,124153.04,1903.93,New York,64926.08 48 | 1315.46,115816.21,297114.46,Florida,49490.75 49 | 0,135426.92,0,California,42559.73 50 | 542.05,51743.15,0,New York,35673.41 51 | 0,116983.8,45173.06,California,14681.4 -------------------------------------------------------------------------------- /PlugAndPlayStart/README.md: -------------------------------------------------------------------------------- 1 | 2 | For any problem open an ISSUE 🚬, the project is very simple so any help is welcome💸. 3 | 4 | **Are you bored reading😴? Do you want to try our project now⏳? Open the notebook on Colab everything is ready!** 5 | 6 | **RUN NOW ON COLAB 😮👉** [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/14854fi6oO4lXqR3_mt6tc2Lr2IsA12oq?usp=sharing) 7 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # USE AI AGENTs, like AUTO-GPT or BABYAGI, without paids API😤 **TOTALLY FOR FREE🤑** 3 | 4 | Tired of paying for OPENAI, PINECONE, GOOGLESEARCH APIs to try out the latest developments in the AI field? 5 | Perfect, **this is the repository for you! 🎁** 6 | 7 | For any problem open an ISSUE 🚬, the project is very simple so any help is welcome💸. 8 | 9 | **Are you bored reading😴? Do you want to try our project now⏳? Open the notebook on Colab everything is ready!** 10 | 11 | **RUN NOW ON COLAB😮** [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/14854fi6oO4lXqR3_mt6tc2Lr2IsA12oq?usp=sharing) 12 | ⚠️ Abusing this tool is at your own risk 13 | 14 | ![intro](https://user-images.githubusercontent.com/108482353/236516034-737e0ca0-7ccb-4629-affb-aff0fb2b6579.png) 15 | 16 | 17 | By the way, thank you so much for [![Stars](https://img.shields.io/github/stars/IntelligenzaArtificiale/Free-AUTOGPT-with-NO-API?style=social)](https://github.com/IntelligenzaArtificiale/Free-AUTOGPT-with-NO-API/stargazers) and all the support!! 18 | 19 | ## WHY THIS REPOSITORY ? 🤔 20 | 21 | Hello everyone :smiling_face_with_three_hearts: , 22 | 23 | I wanted to start by **talking about how important it is to democratize AI**. Unfortunately, most new applications or discoveries in this field end up enriching some big companies, leaving behind small businesses or simple projects. One striking example of this is Autogpt, an autonomous AI agent capable of performing tasks. 24 | 25 | Autogpt and similar projects like BabyAGI **only work with paid APIs, which is not fair**. That's why I tried to recreate a simpler but very interesting and, above all, open-source version of Autogpt that **does not require any API and does not need any particular hardware.** 26 | 27 | I believe that by providing free and open-source AI tools, we can give small businesses and individuals the opportunity to create new and innovative projects without the need for significant financial investment. **This will allow for more equitable and diverse access to AI technology, which is essential for advancing society as a whole.** 28 | 29 | 30 | 31 | ----- 32 |
33 | 34 |
35 | 36 | 37 | ## HOW TO GET Tokens & Cookies totally for FREE 🔑🔐 38 | 39 | 40 | 41 | 42 |
43 | 44 | 45 | #### GET HUGGINGFACE TOKEN 🤗 46 | 47 | 48 | 49 | - **HUGGINGFACE TOKEN** : Visit this simple [official guide](https://huggingface.co/docs/hub/security-tokens) 50 |
51 | 52 |
53 | 54 | 55 | #### GET HUGGINGCHAT COOKIE🍪 56 | 57 | 58 | 59 | - copy your email and password int .env file 60 | 61 |
62 | 63 | 64 |
65 | 66 | 67 | #### GET CHATGPT COOKIE🍪 68 | 69 | 70 | 71 | - **(OPTIONAL BUT BETTER RESULT) CHATGPT🖥** : 72 | 1. Go to https://chat.openai.com/chat and open the developer tools by `F12`. 73 | 2. Find the `__Secure-next-auth.session-token` cookie in `Application` > `Storage` > `Cookies` > `https://chat.openai.com`. 74 | 3. Copy the value in the `Cookie Value` field in `.env` file. 75 | 4. If you have Plus subscription you can use GPT4. Edit in `.env` file this line : `USE_GPT4 = True` 76 | 77 | ![image](https://user-images.githubusercontent.com/19218518/206170122-61fbe94f-4b0c-4782-a344-e26ac0d4e2a7.png) 78 |
79 | 80 |
81 | 82 | 83 | #### GET GOOGLE BARD COOKIE🍪 84 | 85 | 86 | 87 | - **(OPTIONAL) Google Bard🖥** : 88 | 1. Go toGo to https://bard.google.com/ and open the developer tools by `F12`. 89 | 2. Find the `__Secure-1PSID` cookie in `Application` > `Storage` > `Cookies` 90 | 3. Copy the value in the `Cookie Value` field in `.env` file. 91 | 92 | ![Cattura](https://user-images.githubusercontent.com/108482353/236518416-ba0fb89c-080d-4e5e-8514-4ed7ac897b55.PNG) 93 |
94 | 95 |
96 | 97 | 98 | #### GET MICROSOFT BING COOKIE🍪 99 | 100 | 101 | 102 | - **(OPTIONAL) Bing CHAT👨‍💻** : 103 | 1. Check if you have access to [Bing Chat](https://chrome.google.com/webstore/detail/bing-chat-for-all-browser/jofbglonpbndadajbafmmaklbfbkggpo) 104 | 2. Install the cookie editor extension for [Chrome](https://chrome.google.com/webstore/detail/cookie-editor/hlkenndednhfkekhgcdicdfddnkalmdm) or [Firefox](https://addons.mozilla.org/en-US/firefox/addon/cookie-editor/) 105 | 3. Go to `bing.com` 106 | 4. Open the extension for cookie 107 | 5. Click "Export" on the bottom right, then "Export as JSON" (This saves your cookies to clipboard) 108 | 6. Paste your cookies into a file `cookiesBing.json` 109 | 110 | 111 | ![image](https://user-images.githubusercontent.com/108482353/236259872-faf7946c-5648-4733-8d66-978040eacd85.png) 112 |
113 | 114 |
115 | 116 | ----- 117 |
118 | 119 | 120 |
121 | 122 | 123 | ## ⚠️ SETUP the .env FILE ⚠️ 124 | 125 | 126 | Open the file called `.env` . 127 | If you dont see the file, open your file manger and check for **`Show hidden file`** . 128 | 129 | Now add you Cookie and Token in `.env` file . 130 | 131 |
132 | 133 | 134 | 135 | ----- 136 |
137 | 138 |
139 | 140 | 141 | ## Local using with Dev Container in VSCode by [@FlamingFury00](https://github.com/FlamingFury00)🚀 142 | 143 | 144 | 145 | 🚀Added the possibility to use Docker image using Dev Container in VSCode. How to run it : 146 | - Install [Docker Desktop](https://docs.docker.com/desktop/) 147 | - Install Visual Studio Code 148 | - Open Visual Studio and go to **Extensions -> search for Dev Container -> install it** 149 | - Restart Visual Studio 150 | - Go to the project folder, **right click** and **"Open in Visual Studio Code"** 151 | - It will ask you to reopen in a Docker Container 152 | - Click **"Reopen"** and wait for it to be complete **(you need to have Docker Desktop opened)** 153 | 154 |
155 | 156 | 157 | 158 | ----- 159 |
160 | 161 |
162 | 163 | 164 | ## HOW TO RUN BABY AGI 👶 165 | 166 | 167 | 168 | 169 | **RUN NOW ON COLAB😮** [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/14854fi6oO4lXqR3_mt6tc2Lr2IsA12oq?usp=sharing) 170 | ⚠️ Abusing this tool is at your own risk 171 | 172 | 173 | **Or use Locally :** 174 | - Dowload the repository [FREE AUTOGPT REPOSITORY](https://github.com/IntelligenzaArtificiale/Free-AUTOGPT-with-NO-API) 175 | - install using **Dev Container in VSCode** or `python3 -m pip install -r requirements.txt` 176 | - insert the **.env file** yours Token 177 | - if you dont see the **.env file** check "Show hidden file" in your file manger 178 | - Usage: **python BABYAGI.py** 179 | 180 | 181 | 182 | https://user-images.githubusercontent.com/108482353/234963635-004adace-36ab-46de-9022-61858cd3dca4.mp4 183 | 184 | 185 |
186 | 187 | 188 | 189 | 190 | ----- 191 |
192 | 193 |
194 | 195 | 196 | ## HOW TO RUN AUTOGPT 🤖 197 | 198 | 199 | 200 | **RUN NOW ON COLAB😮** [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/14854fi6oO4lXqR3_mt6tc2Lr2IsA12oq?usp=sharing) 201 | ⚠️ Abusing this tool is at your own risk 202 | 203 | **Or use Locally :** 204 | - Dowload the repository [FREE AUTOGPT REPOSITORY](https://github.com/IntelligenzaArtificiale/Free-AUTOGPT-with-NO-API) 205 | - install using **Dev Container in VSCode** or `python3 -m pip install -r requirements.txt` 206 | - insert the **.env file** yours Token 207 | - if you dont see the **.env file** check "Show hidden file" in your file manger 208 | - Usage: **python AUTOGPT.py** 209 | 210 | 211 | https://user-images.githubusercontent.com/108482353/234947600-1df35b1f-6505-40f9-be1d-3257a46eacf3.mp4 212 | 213 |
214 | 215 | 216 | ----- 217 |
218 | 219 |
220 | 221 | 222 | ## HOW TO RUN Your CUSTOM AGENT 🤖 223 | 224 | 225 | 226 | **RUN NOW ON COLAB😮** [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/14854fi6oO4lXqR3_mt6tc2Lr2IsA12oq?usp=sharing) 227 | ⚠️ Abusing this tool is at your own risk 228 | 229 | **Or use Locally :** 230 | - Dowload the repository [FREE AUTOGPT REPOSITORY](https://github.com/IntelligenzaArtificiale/Free-AUTOGPT-with-NO-API) 231 | - install using **Dev Container in VSCode** or `python3 -m pip install -r requirements.txt` 232 | - cd OtherAgent/ 233 | - Choose or develop your agent [ csvAgent.py ; pythonAgent.py ; customAgent.py ] 234 | - Usage: **python YourAgent.py** 235 | 236 | 237 | 238 | 239 | https://user-images.githubusercontent.com/108482353/235354639-998f0a40-3d2d-4f33-b187-17a3be8d7899.mp4 240 | 241 |
242 | 243 | ----- 244 |
245 | 246 |
247 | 248 | 249 | ## HOW TO RUN CAMEL 🐫 250 | 251 | 252 | 253 | 254 | **RUN NOW ON COLAB😮** [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/14854fi6oO4lXqR3_mt6tc2Lr2IsA12oq?usp=sharing) 255 | ⚠️ Abusing this tool is at your own risk 256 | 257 | **Or use Locally :** 258 | - Dowload the repository [FREE AUTOGPT REPOSITORY](https://github.com/IntelligenzaArtificiale/Free-AUTOGPT-with-NO-API) 259 | - `python3 -m pip install -r requirements.txt` 260 | - `streamlit run Camel.py` 261 | 262 | 263 | https://user-images.githubusercontent.com/108482353/235199747-c2dbdd27-80d6-4950-9cc6-7f140890f386.mp4 264 | 265 |
266 | 267 | 268 | 269 | ----- 270 |
271 | 272 |
273 | 274 | 275 | ## HOW IT WORK ? 🔨🔩 276 | 277 | 278 | 279 | 280 | To create an open-source version of Autogpt that does not require paid APIs or specific hardware, **we performed a reverse engineering process on ChatGPT**, a language model developed by OpenAI. By doing so, we were able to use the agents and new technologies of langchain for free. 281 | 282 | We then created a custom LLM wrapper with langchain, **which can be used as a plug-and-play solution with any langchain function or tool 💡**. 283 | 284 | ```python 285 | from FreeLLM import ChatGPTAPI 286 | 287 | # Instantiate a ChatGPT object with your token 288 | llm = ChatGPTAPI.ChatGPT((token="YOURTOKEN") #for start new chat 289 | 290 | # If you have a Plus subscription , you can use GPT4 model 291 | llm = ChatGPTAPI.ChatGPT((token="YOURTOKEN", model="gpt4") # REQUIRED CHATGPT PLUS subscription 292 | 293 | # or if if u would to start from an existing chat 294 | # llm = ChatGPTAPI.ChatGPT(token = "YOUR-TOKEN", conversation = "Add-XXXX-XXXX-Convesation-ID") 295 | 296 | 297 | # Generate a response based on the given prompt 298 | response = llm("Hello, how are you?") 299 | 300 | # Print the response 301 | print(response) 302 | 303 | ``` 304 | ![exixstingchat](https://user-images.githubusercontent.com/108482353/235359284-c908afe6-1f18-41ff-aa30-8216a1b9f19a.png) 305 | 306 | 307 | The code snippet provided above shows how to use our custom ChatGPT LLM class to interact with the language model. It requires a token from the ChatGPT API, which can be obtained from [https://chat.openai.com/api/auth/session](https://chat.openai.com/api/auth/session). 308 | 309 | Please note that there is a limit of 50 requests per hour for each account on the ChatGPT API 💣. Therefore, we implemented a call counter in our ChatGPT class to prevent exceeding this limit. 310 | 311 | 312 | ### Now support HuggingCHAT LLM 313 | 314 | ```python 315 | from FreeLLM import HuggingChatAPI 316 | 317 | # Instantiate a ChatGPT object with your token 318 | llm = HuggingChatAPI.HuggingChat() #for start new chat 319 | 320 | # Generate a response based on the given prompt 321 | response = llm("Hello, how are you?") 322 | 323 | # Print the response 324 | print(response) 325 | 326 | ``` 327 | 328 | ### Now support Bing CHAT LLM 329 | 330 | ```python 331 | from FreeLLM import BingChatAPI 332 | 333 | # Instantiate a Bing CHAT object with your cookie path 334 | llm=BingChatAPI.BingChat(cookiepath="cookie_path") #for start new chat 335 | 336 | # if you want set the type of conversation style 337 | #llm=BingChatAPI.BingChat(cookiepath=cookie_path, conversation_style="creative") #conversation_style must be precise, creative or balanced 338 | 339 | # if you want set Microsoft Bing internet Access 340 | #llm = =BingChatAPI.BingChat(cookiepath = "YOUR-COOKIE" , conversation_style = "precise" , search_result=True) #with web access 341 | 342 | 343 | # Generate a response based on the given prompt 344 | response = llm("Hello, how are you?") 345 | 346 | # Print the response 347 | print(response) 348 | 349 | ``` 350 | 351 | ### Now support Google BARD CHAT LLM 352 | 353 | ```python 354 | from FreeLLM import BardChatAPI 355 | 356 | # Instantiate a Bard CHAT object with your cookie path 357 | llm=BardChatAPI.BardChat(cookie="cookie") #for start new chat 358 | 359 | # Generate a response based on the given prompt 360 | response = llm("Hello, how are you?") 361 | 362 | # Print the response 363 | print(response) 364 | 365 | ``` 366 | 367 | We believe that our open-source version of Autogpt will promote equitable and diverse access to AI technology and empower individuals and small businesses to create innovative AI projects without significant financial investment. 368 | 369 | **This is an example of CUSTOM agent, in less of 60 line of code and totally for free, with:** 370 | - **Internet** access 371 | - Python **code execution** 372 | - **Wikipedia** knowledge 373 | 374 | ```python 375 | from langchain.agents import initialize_agent #use for create new agent 376 | from langchain.agents import Tool 377 | from langchain.tools import BaseTool, DuckDuckGoSearchRun 378 | from langchain.utilities import PythonREPL #tool for execute python script 379 | from langchain.utilities import WikipediaAPIWrapper #tool get wiki info 380 | from langchain.tools import DuckDuckGoSearchTool #tool get interet live info (langchain==0.0.150) 381 | 382 | from FreeLLM import ChatGPTAPI # FREE CHATGPT API 383 | #or 384 | from FreeLLM import HuggingChatAPI 385 | from FreeLLM import BingChatAPI 386 | from FreeLLM import BardChatAPI 387 | 388 | 389 | # Instantiate a ChatGPT object with your token 390 | llm = ChatGPTAPI.ChatGPT((token="YOURTOKEN") 391 | 392 | # or use Bing CHAT 393 | # llm = BingChatAPI.BingChat(cookiepath="cookie_path") 394 | 395 | # or use Google BArd CHAT 396 | # llm=BardChatAPI.BardChat(cookie="cookie") 397 | 398 | # or use HuggingChatAPI if u dont have CHATGPT, BING or Google account 399 | # llm = HuggingChatAPI.HuggingChat() 400 | 401 | 402 | # Define the tools 403 | wikipedia = WikipediaAPIWrapper() 404 | python_repl = PythonREPL() 405 | search = DuckDuckGoSearchTool() 406 | 407 | tools = [ 408 | Tool( 409 | name = "python repl", 410 | func=python_repl.run, 411 | description="useful for when you need to use python to answer a question. You should input python code" 412 | ) 413 | ] 414 | 415 | wikipedia_tool = Tool( 416 | name='wikipedia', 417 | func= wikipedia.run, 418 | description="Useful for when you need to look up a topic, country or person on wikipedia" 419 | ) 420 | 421 | duckduckgo_tool = Tool( 422 | name='DuckDuckGo Search', 423 | func= search.run, 424 | description="Useful for when you need to do a search on the internet to find information that another tool can't find. be specific with your input." 425 | ) 426 | 427 | tools.append(duckduckgo_tool) 428 | tools.append(wikipedia_tool) 429 | 430 | 431 | #Create the Agent 432 | iteration = (int(input("Enter the number of iterations: ")) if input("Do you want to set the number of iterations? (y/n): ") == "y" else 3) 433 | 434 | zero_shot_agent = initialize_agent( 435 | agent="zero-shot-react-description", 436 | tools=tools, 437 | llm=llm, 438 | verbose=True, 439 | max_iterations=iteration, 440 | ) 441 | 442 | # Start your Custom Agent in loop 443 | print(">> STRAT CUSTOM AGENT") 444 | print("> Digit 'exit' for exit or 'your task or question' for start\n\n") 445 | prompt = input("(Enter your task or question) >> ") 446 | while prompt.toLowerCase() != "exit": 447 | zero_shot_agent.run(prompt) 448 | prompt = input("(Enter your task or question) >> ") 449 | 450 | # SO ESASY :) 451 | 452 | ``` 453 | 454 | ![Schermata del 2023-04-30 16-25-11](https://user-images.githubusercontent.com/108482353/235358379-dfd7dbba-74ff-48a1-b23c-c51b63d4c181.png) 455 | 456 |
457 | 458 | 459 | ----- 460 |
461 | 462 | # **🤗 Democratize AI 🤗** 463 | 464 | [![Star History Chart](https://api.star-history.com/svg?repos=IntelligenzaArtificiale/Free-AUTOGPT-with-NO-API&type=Date)](https://star-history.com/#IntelligenzaArtificiale/Free-AUTOGPT-with-NO-API) 465 | 466 | 467 | By the way, thank you so much for [![Stars](https://img.shields.io/github/stars/IntelligenzaArtificiale/Free-AUTOGPT-with-NO-API?style=social)](https://github.com/IntelligenzaArtificiale/Free-AUTOGPT-with-NO-API/stargazers) and all the support!! 468 | 469 | 470 | 471 | ----- 472 |
473 | 474 | ## TODO , I NEED YOUR HELP 👥👨‍💻 475 | 476 | - [x] Create free LLM langchain wrapper based on [Reverse Engineered ChatGPT API by OpenAI](https://github.com/terry3041/pyChatGPT) 477 | - [x] Create free LLM langchain wrapper based on [Reverse Engineered HUGGING CHAT API by HuggingFace](https://github.com/Soulter/hugging-chat-api) 478 | - [x] Create free LLM langchain wrapper based on [Reverse Engineered Bing CHAT API by Microsoft](https://github.com/acheong08/EdgeGPT) 479 | - [x] Create free LLM langchain wrapper based on [Reverse Engineered Bard CHAT API by Google](https://github.com/acheong08/Bard) 480 | - [x] Find a way to replace OpenAIEmbeddings() using HuggingFace Embeddings infeence API 481 | - [x] Create a simple versione of CAMEL based on [Camel theory](https://arxiv.org/pdf/2303.17760.pdf) 482 | - [x] Create a simple version of BABYAGI based on [Baby AGI](https://alumni.media.mit.edu/~kris/ftp/SafeBabyAGI-J.BiegerEtAl2015.pdf) 483 | - [x] Add web search Tool 484 | - [x] Add file writer Tool 485 | - [x] Add Wikipedia Tool 486 | - [x] Add QA web page Tool 487 | - [x] Finally AUTOGPT without paids API 488 | - [x] Make a Colab Notebook for make this repository accessible to anyone 489 | - [x] Local using with Dev Container in VSCode by [@FlamingFury00](https://github.com/FlamingFury00) 490 | - [ ] Add other free Custom LLM wrapper [Add this](https://github.com/xtekky/gpt4free) 491 | - [ ] Add long term memory 492 | - [ ] Find a way to replace PINECONE api 493 | - [ ] Find a way to replace official Google API 494 | 495 | ## We are hungry for PULL REQUEST 😋 496 | 497 | ----- 498 | 499 |
500 | 501 | 502 | #### Useful LINK 👥 503 | 504 | 505 | 506 | - [VIDEO DEMO](https://watch.screencastify.com/v/vSDUBdhfvh9yEwclHUyw) 507 | - [FREE AUTOGPT REPOSITORY](https://github.com/IntelligenzaArtificiale/Free-AUTOGPT-with-NO-API) 508 | - [Camel project](https://www.camel-ai.org/) 509 | - [BABY AGI](https://python.langchain.com/en/latest/use_cases/agents/baby_agi_with_agent.html) 510 | - [AutoGPT](https://python.langchain.com/en/latest/use_cases/autonomous_agents/autogpt.html?highlight=autogpt#setup-model-and-autogpt) 511 | - [langchain for custom llm wrapper](https://python.langchain.com/en/latest/modules/models/llms/examples/custom_llm.html) 512 | 513 |
514 | 515 | ----- 516 | 517 | 518 |
519 | 520 | 521 | ### Inspiration and Credits 🤗 522 | 523 | 524 | 525 | - [https://github.com/hwchase17/langchain](https://github.com/hwchase17/langchain) 526 | - [https://github.com/terry3041/pyChatGPT](https://github.com/terry3041/pyChatGPT) 527 | - [https://github.com/Soulter/hugging-chat-api](https://github.com/Soulter/hugging-chat-api) 528 | - [https://github.com/Significant-Gravitas/Auto-GPT](https://github.com/Significant-Gravitas/Auto-GPT) 529 | 530 |
531 | 532 |
533 | 534 | 535 | ### Legal Notice 🧑‍⚖️ 536 | 537 | 538 | 539 | This repository is _not_ associated with or endorsed by providers of the APIs contained in this GitHub repository. This project is intended **for educational purposes only**. This is just a personal project. 540 | Please note the following: 541 | 542 | 1. **Disclaimer**: The APIs, services, and trademarks mentioned in this repository belong to their respective owners. This project is _not_ claiming any right over them nor is it affiliated with or endorsed by any of the providers mentioned. 543 | 544 | 2. **Responsibility**: The author of this repository is _not_ responsible for any consequences, damages, or losses arising from the use or misuse of this repository or the content provided by the third-party APIs. Users are solely responsible for their actions and any repercussions that may follow. We strongly recommend the users to follow the TOS of the each Website. 545 | 546 | 3. **Educational Purposes Only**: This repository and its content are provided strictly for educational purposes. By using the information and code provided, users acknowledge that they are using the APIs and models at their own risk and agree to comply with any applicable laws and regulations. 547 | 548 | 4. **Indemnification**: Users agree to indemnify, defend, and hold harmless the author of this repository from and against any and all claims, liabilities, damages, losses, or expenses, including legal fees and costs, arising out of or in any way connected with their use or misuse of this repository, its content, or related third-party APIs. 549 | 550 | 5. **Updates and Changes**: The author reserves the right to modify, update, or remove any content, information, or features in this repository at any time without prior notice. Users are responsible for regularly reviewing the content and any changes made to this repository. 551 | 552 | By using this repository or any code related to it, you agree to these terms. The author is not responsible for any copies, forks, or reuploads made by other users. This is the author's only account and repository. To prevent impersonation or irresponsible actions, you may comply with the MIT license this Repository uses. 553 | 554 |
555 | -------------------------------------------------------------------------------- /TransformersAgent.py: -------------------------------------------------------------------------------- 1 | from hfAgent import agents 2 | from dotenv import load_dotenv 3 | 4 | import os 5 | import json 6 | 7 | from json import JSONDecodeError 8 | from pathlib import Path 9 | 10 | import huggingface_hub 11 | 12 | load_dotenv() 13 | 14 | select_model = input( 15 | "Select the model you want to use (1, 2, 3, 4, 5, 6) \n \ 16 | 1) ChatGPT \n \ 17 | 2) HuggingChat (NOT GOOD RESULT)\n \ 18 | 3) BingChat (NOT GOOD RESULT)\n \ 19 | 4) BardChat \n \ 20 | 5) StarCoder\n \ 21 | 6) OpenAssistant\n \ 22 | >>> " 23 | ) 24 | 25 | if select_model == "1": 26 | CG_TOKEN = os.getenv("CHATGPT_TOKEN", "your-chatgpt-token") 27 | 28 | if CG_TOKEN != "your-chatgpt-token": 29 | os.environ["CHATGPT_TOKEN"] = CG_TOKEN 30 | else: 31 | raise ValueError( 32 | "ChatGPT Token EMPTY. Edit the .env file and put your ChatGPT token" 33 | ) 34 | 35 | start_chat = os.getenv("USE_EXISTING_CHAT", False) 36 | if os.getenv("USE_GPT4") == "True": 37 | model = "gpt-4" 38 | else: 39 | model = "default" 40 | 41 | agent = agents.ChatGPTAgent(token=os.environ["CHATGPT_TOKEN"], model=model) 42 | 43 | 44 | elif select_model == "2": 45 | emailHF = os.getenv("emailHF", "your-emailHF") 46 | pswHF = os.getenv("pswHF", "your-pswHF") 47 | if emailHF != "your-emailHF" or pswHF != "your-pswHF": 48 | os.environ["emailHF"] = emailHF 49 | os.environ["pswHF"] = pswHF 50 | else: 51 | raise ValueError( 52 | "HuggingChat Token EMPTY. Edit the .env file and put your HuggingChat credentials" 53 | ) 54 | 55 | agent = agents.HuggingChatAgent() 56 | 57 | elif select_model == "3": 58 | if not os.path.exists("cookiesBing.json"): 59 | raise ValueError( 60 | "File 'cookiesBing.json' not found! Create it and put your cookies in there in the JSON format." 61 | ) 62 | cookie_path = "cookiesBing.json" 63 | with open("cookiesBing.json", "r") as file: 64 | try: 65 | file_json = json.loads(file.read()) 66 | except JSONDecodeError: 67 | raise ValueError( 68 | "You did not put your cookies inside 'cookiesBing.json'! You can find the simple guide to get the cookie file here: https://github.com/acheong08/EdgeGPT/tree/master#getting-authentication-required." 69 | ) 70 | agent = agents.BingChatAgent(cookiepath=cookie_path, conversation="balanced") 71 | 72 | elif select_model == "4": 73 | GB_TOKEN = os.getenv("BARDCHAT_TOKEN", "your-googlebard-token") 74 | 75 | if GB_TOKEN != "your-googlebard-token": 76 | os.environ["BARDCHAT_TOKEN"] = GB_TOKEN 77 | else: 78 | raise ValueError( 79 | "GoogleBard Token EMPTY. Edit the .env file and put your GoogleBard token" 80 | ) 81 | cookie = os.environ["BARDCHAT_TOKEN"] 82 | agent = agents.BardChatAgent(token=cookie) 83 | elif select_model == "5": 84 | HF_TOKEN = os.getenv("HUGGINGFACE_TOKEN", "your-huggingface-token") 85 | if HF_TOKEN != "your-huggingface-token": 86 | os.environ["HUGGINGFACEHUB_API_TOKEN"] = HF_TOKEN 87 | huggingface_hub.login(token=HF_TOKEN) 88 | else: 89 | raise ValueError( 90 | "HuggingFace Token EMPTY. Edit the .env file and put your HuggingFace token" 91 | ) 92 | 93 | from transformers.tools import HfAgent 94 | 95 | agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder") 96 | 97 | elif select_model == "6": 98 | HF_TOKEN = os.getenv("HUGGINGFACE_TOKEN", "your-huggingface-token") 99 | if HF_TOKEN != "your-huggingface-token": 100 | os.environ["HUGGINGFACEHUB_API_TOKEN"] = HF_TOKEN 101 | huggingface_hub.login(token=HF_TOKEN) 102 | else: 103 | raise ValueError( 104 | "HuggingFace Token EMPTY. Edit the .env file and put your HuggingFace token" 105 | ) 106 | 107 | from transformers.tools import HfAgent 108 | 109 | agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder") 110 | 111 | from transformers.tools import HfAgent 112 | 113 | agent = HfAgent( 114 | url_endpoint="https://api-inference.huggingface.co/models/OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5" 115 | ) 116 | 117 | 118 | prompt = input(">>> Input prompt:\n>") 119 | while prompt != "exit": 120 | agent.run(prompt, remote=True) 121 | prompt = input(">>> Input prompt:\n>") 122 | -------------------------------------------------------------------------------- /cookiesBing.json: -------------------------------------------------------------------------------- 1 | Copy you BingCHAT cookie HERE 2 | -------------------------------------------------------------------------------- /hfAgent/FreeLLM/BardChatAPI.py: -------------------------------------------------------------------------------- 1 | from Bard import Chatbot 2 | import asyncio 3 | 4 | import requests 5 | from langchain.llms.base import LLM 6 | from typing import Optional, List, Mapping, Any 7 | import pydantic 8 | import os 9 | from langchain import PromptTemplate, LLMChain 10 | from time import sleep 11 | 12 | 13 | 14 | class BardChat(LLM): 15 | 16 | history_data: Optional[List] = [] 17 | cookie : Optional[str] 18 | chatbot : Optional[Chatbot] = None 19 | 20 | 21 | @property 22 | def _llm_type(self) -> str: 23 | return "custom" 24 | 25 | async def call(self, prompt: str, stop: Optional[List[str]] = None) -> str: 26 | if stop is not None: 27 | pass 28 | #raise ValueError("stop kwargs are not permitted.") 29 | #cookie is a must check 30 | if self.chatbot is None: 31 | if self.cookie is None: 32 | raise ValueError("Need a COOKIE , check https://github.com/acheong08/EdgeGPT/tree/master#getting-authentication-required for get your COOKIE AND SAVE") 33 | else: 34 | #if self.chatbot == None: 35 | self.chatbot = Chatbot(self.cookie) 36 | 37 | response = self.chatbot.ask(prompt) 38 | #print(response) 39 | response_text = response['content'] 40 | #add to history 41 | self.history_data.append({"prompt":prompt,"response":response_text}) 42 | 43 | return response_text 44 | 45 | def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: 46 | return asyncio.run(self.call(prompt=prompt)) 47 | 48 | @property 49 | def _identifying_params(self) -> Mapping[str, Any]: 50 | """Get the identifying parameters.""" 51 | return {"model": "BardCHAT", "cookie": self.cookie} 52 | 53 | 54 | 55 | #llm = BardChat(cookie = "YOURCOOKIE") #for start new chat 56 | 57 | #print(llm("Hello, how are you?")) 58 | #print(llm("what is AI?")) 59 | #print(llm("Can you resume your previus answer?")) #now memory work well 60 | -------------------------------------------------------------------------------- /hfAgent/FreeLLM/BingChatAPI.py: -------------------------------------------------------------------------------- 1 | from EdgeGPT import Chatbot, ConversationStyle 2 | import asyncio 3 | 4 | import requests 5 | from langchain.llms.base import LLM 6 | from typing import Optional, List, Mapping, Any 7 | import pydantic 8 | import os 9 | from langchain import PromptTemplate, LLMChain 10 | from time import sleep 11 | 12 | 13 | 14 | class BingChat(LLM): 15 | 16 | history_data: Optional[List] = [] 17 | cookiepath : Optional[str] 18 | chatbot : Optional[Chatbot] = None 19 | conversation_style : Optional[str] 20 | conversation_style_on : Optional[ConversationStyle] = ConversationStyle.precise 21 | search_result : Optional[bool] = False 22 | 23 | @property 24 | def _llm_type(self) -> str: 25 | return "custom" 26 | 27 | def select_conversation(self, conversation_style: str): 28 | if conversation_style == "precise": 29 | self.conversation_style_on = ConversationStyle.precise 30 | elif conversation_style == "creative": 31 | self.conversation_style_on = ConversationStyle.creative 32 | elif conversation_style == "balanced": 33 | self.conversation_style_on = ConversationStyle.balanced 34 | else: 35 | raise ValueError("conversation_style must be precise, creative or balaced") 36 | self.conversation_style = conversation_style 37 | 38 | async def call(self, prompt: str, stop: Optional[List[str]] = None) -> str: 39 | if stop is not None: 40 | raise ValueError("stop kwargs are not permitted.") 41 | #cookiepath is a must check 42 | if self.chatbot is None: 43 | if self.cookiepath is None: 44 | raise ValueError("Need a COOKIE , check https://github.com/acheong08/EdgeGPT/tree/master#getting-authentication-required for get your COOKIE AND SAVE") 45 | else: 46 | #if self.chatbot == None: 47 | self.chatbot = await Chatbot.create(cookie_path=self.cookiepath) 48 | 49 | if self.conversation_style is not None: 50 | self.conversation_style_on = self.select_conversation(self.conversation_style) 51 | 52 | response = await self.chatbot.ask(prompt=prompt, conversation_style=self.conversation_style, search_result=self.search_result) 53 | """ 54 | this is a sample response. 55 | {'type': 2, 'invocationId': '0', 56 | 'item': {'messages': [{'text': 'Hello, how are you?', 'author': 'user', 'from': {'id': '985157152860707', 'name': None}, 'createdAt': '2023-05-03T19:51:39.5491558+00:00', 'timestamp': '2023-05-03T19:51:39.5455787+00:00', 'locale': 'en-us', 'market': 'en-us', 'region': 'us', 'messageId': '87f90c57-b2ad-4b3a-b24f-99f633f5332f', 'requestId': '87f90c57-b2ad-4b3a-b24f-99f633f5332f', 'nlu': {'scoredClassification': {'classification': 'CHAT_GPT', 'score': None}, 'classificationRanking': [{'classification': 'CHAT_GPT', 'score': None}], 'qualifyingClassifications': None, 'ood': None, 'metaData': None, 'entities': None}, 'offense': 'None', 'feedback': {'tag': None, 'updatedOn': None, 'type': 'None'}, 'contentOrigin': 'cib', 'privacy': None, 'inputMethod': 'Keyboard'}, {'text': "Hello! I'm doing well, thank you. How can I assist you today?", 'author': 'bot', 'createdAt': '2023-05-03T19:51:41.5176164+00:00', 'timestamp': '2023-05-03T19:51:41.5176164+00:00', 'messageId': '1d013e71-408b-4031-a131-2f5c009fe938', 'requestId': '87f90c57-b2ad-4b3a-b24f-99f633f5332f', 'offense': 'None', 'adaptiveCards': [{'type': 'AdaptiveCard', 'version': '1.0', 'body': [{'type': 'TextBlock', 'text': "Hello! I'm doing well, thank you. How can I assist you today?\n", 'wrap': True}]}], 57 | 'sourceAttributions': [], 58 | 'feedback': {'tag': None, 'updatedOn': None, 'type': 'None'}, 59 | 'contentOrigin': 'DeepLeo', 60 | 'privacy': None, 61 | 'suggestedResponses': [{'text': 'What is the weather like today?', 'author': 'user', 'createdAt': '2023-05-03T19:51:42.7502696+00:00', 'timestamp': '2023-05-03T19:51:42.7502696+00:00', 'messageId': 'cd7a84d3-f9bc-47ff-9897-077b2de12e21', 'messageType': 'Suggestion', 'offense': 'Unknown', 'feedback': {'tag': None, 'updatedOn': None, 'type': 'None'}, 'contentOrigin': 'DeepLeo', 'privacy': None}, {'text': 'What is the latest news?', 'author': 'user', 'createdAt': '2023-05-03T19:51:42.7502739+00:00', 'timestamp': '2023-05-03T19:51:42.7502739+00:00', 'messageId': 'b611632a-9a8e-42de-86eb-8eb3b7b8ddbb', 'messageType': 'Suggestion', 'offense': 'Unknown', 'feedback': {'tag': None, 'updatedOn': None, 'type': 'None'}, 'contentOrigin': 'DeepLeo', 'privacy': None}, {'text': 'Tell me a joke.', 'author': 'user', 'createdAt': '2023-05-03T19:51:42.7502743+00:00', 'timestamp': '2023-05-03T19:51:42.7502743+00:00', 'messageId': '70232e45-d7e8-4d77-83fc-752b3cd3355c', 'messageType': 'Suggestion', 'offense': 'Unknown', 'feedback': {'tag': None, 'updatedOn': None, 'type': 'None'}, 'contentOrigin': 'DeepLeo', 'privacy': None}], 'spokenText': 'How can I assist you today?'}], 'firstNewMessageIndex': 1, 'defaultChatName': None, 'conversationId': '51D|BingProd|3E1274E188350D7BE273FFE95E02DD2984DAB52F95260300D0A2937162F98FDA', 'requestId': '87f90c57-b2ad-4b3a-b24f-99f633f5332f', 'conversationExpiryTime': '2023-05-04T01:51:42.8260286Z', 'shouldInitiateConversation': True, 'telemetry': {'metrics': None, 'startTime': '2023-05-03T19:51:39.5456555Z'}, 'throttling': {'maxNumUserMessagesInConversation': 20, 'numUserMessagesInConversation': 1}, 'result': {'value': 'Success', 'serviceVersion': '20230501.30'}}} 62 | """ 63 | response_messages = response.get("item", {}).get("messages", []) 64 | response_text = response_messages[1].get("text", "") 65 | 66 | if response_text == "": 67 | hidden_text = response_messages[1].get("hiddenText", "") 68 | print(">>>> [DEBBUGGER] hidden_text = " + str(hidden_text) + " [DEBBUGGER] <<<<") 69 | print(">>>> [DEBBUGGER] BING CHAT dont is open Like CHATGPT , BingCHAT have refused to respond. [DEBBUGGER] <<<<") 70 | response_text = hidden_text 71 | """ 72 | # reset the chatbot and remake the call 73 | print("[DEBUGGER] Chatbot failed to respond. Resetting and trying again. [DEBUGGER]") 74 | print("[ INFO DEBUGGER ] \n\n" + str(response) + "\n\n\n") 75 | sleep(10) 76 | self.chatbot = await Chatbot.create(cookie_path=self.cookiepath) 77 | sleep(2) 78 | response = await self.chatbot.ask(prompt=prompt) 79 | response_messages = response.get("item", {}).get("messages", []) 80 | response_text = response_messages[1].get("text", "") 81 | """ 82 | 83 | #add to history 84 | self.history_data.append({"prompt":prompt,"response":response_text}) 85 | 86 | return response_text 87 | 88 | def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: 89 | return asyncio.run(self.call(prompt=prompt)) 90 | 91 | @property 92 | def _identifying_params(self) -> Mapping[str, Any]: 93 | """Get the identifying parameters.""" 94 | return {"model": "BingCHAT", "cookiepath": self.cookiepath} 95 | 96 | 97 | 98 | #llm = BingChat(cookiepath = "YOUR-COOKIE") #for start new chat 99 | #llm = BingChat(cookiepath = "YOUR-COOKIE", conversation_style = "precise") #precise, creative or balaced 100 | #llm = BingChat(cookiepath = "YOUR-COOKIE" , conversation_style = "precise" , search_result=True) #with web access 101 | 102 | #print(llm("Hello, how are you?")) 103 | #print(llm("what is AI?")) 104 | #print(llm("Can you resume your previus answer?")) #now memory work well 105 | -------------------------------------------------------------------------------- /hfAgent/FreeLLM/ChatGPTAPI.py: -------------------------------------------------------------------------------- 1 | from gpt4_openai import GPT4OpenAI 2 | from langchain.llms.base import LLM 3 | from typing import Optional, List, Mapping, Any 4 | from time import sleep 5 | 6 | 7 | 8 | class ChatGPT(LLM): 9 | 10 | history_data: Optional[List] = [] 11 | token : Optional[str] 12 | chatbot : Optional[GPT4OpenAI] = None 13 | call : int = 0 14 | model : str = "gpt-3" # or gpt-4 15 | plugin_id : Optional[List] = [] 16 | 17 | #### WARNING : for each api call this library will create a new chat on chat.openai.com 18 | 19 | 20 | @property 21 | def _llm_type(self) -> str: 22 | return "custom" 23 | 24 | def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: 25 | if stop is not None: 26 | pass 27 | #raise ValueError("stop kwargs are not permitted.") 28 | #token is a must check 29 | if self.chatbot is None: 30 | if self.token is None: 31 | raise ValueError("Need a token , check https://chat.openai.com/api/auth/session for get your token") 32 | else: 33 | try: 34 | if self.plugin_id == []: 35 | self.chatbot = GPT4OpenAI(token=self.token, model=self.model) 36 | else: 37 | self.chatbot = GPT4OpenAI(token=self.token, model=self.model, plugin_ids=self.plugin_id) 38 | except: 39 | raise ValueError("Error on create chatbot, check your token, or your model") 40 | 41 | response = "" 42 | # OpenAI: 50 requests / hour for each account 43 | if (self.call >= 45 and self.model == "default") or (self.call >= 23 and self.model == "gpt4"): 44 | raise ValueError("You have reached the maximum number of requests per hour ! Help me to Improve. Abusing this tool is at your own risk") 45 | else: 46 | sleep(2) 47 | response = self.chatbot(prompt) 48 | 49 | self.call += 1 50 | 51 | #add to history 52 | self.history_data.append({"prompt":prompt,"response":response}) 53 | 54 | return response 55 | 56 | @property 57 | def _identifying_params(self) -> Mapping[str, Any]: 58 | """Get the identifying parameters.""" 59 | return {"model": "ChatGPT", "token": self.token, "model": self.model} 60 | 61 | 62 | 63 | #llm = ChatGPT(token = "YOUR-COOKIE") #for start new chat 64 | 65 | #llm = ChatGPT(token = "YOUR-COOKIE" , model="gpt4") # REQUIRED CHATGPT PLUS subscription 66 | 67 | #llm = ChatGPT(token = "YOUR-COOKIE", conversation = "Add-XXXX-XXXX-Convesation-ID") #for use a chat already started 68 | 69 | #print(llm("Hello, how are you?")) 70 | #print(llm("what is AI?")) 71 | #print(llm("Can you resume your previus answer?")) #now memory work well 72 | -------------------------------------------------------------------------------- /hfAgent/FreeLLM/HuggingChatAPI.py: -------------------------------------------------------------------------------- 1 | 2 | from hugchat import hugchat 3 | from hugchat.login import Login 4 | from langchain.llms.base import LLM 5 | from typing import Optional, List, Mapping, Any 6 | from time import sleep 7 | 8 | 9 | 10 | 11 | class HuggingChat(LLM): 12 | 13 | history_data: Optional[List] = [] 14 | chatbot : Optional[hugchat.ChatBot] = None 15 | conversation : Optional[str] = "" 16 | email : Optional[str] 17 | psw : Optional[str] 18 | #### WARNING : for each api call this library will create a new chat on chat.openai.com 19 | 20 | 21 | @property 22 | def _llm_type(self) -> str: 23 | return "custom" 24 | 25 | def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: 26 | if stop is not None: 27 | pass 28 | #raise ValueError("stop kwargs are not permitted.") 29 | #token is a must check 30 | if self.chatbot is None: 31 | if self.email is None and self.psw is None: 32 | ValueError("Email and Password is required, pls check the documentation on github") 33 | else: 34 | if self.conversation == "": 35 | sign = Login(self.email, self.psw) 36 | cookies = sign.login() 37 | 38 | # Save cookies to usercookies/.json 39 | sign.saveCookies() 40 | 41 | # Create a ChatBot 42 | self.chatbot = hugchat.ChatBot(cookies=cookies.get_dict()) 43 | else: 44 | raise ValueError("Something went wrong") 45 | 46 | 47 | sleep(2) 48 | data = self.chatbot.chat(prompt, temperature=0.5, stream=False) 49 | 50 | 51 | #add to history 52 | self.history_data.append({"prompt":prompt,"response":data}) 53 | 54 | return data 55 | 56 | @property 57 | def _identifying_params(self) -> Mapping[str, Any]: 58 | """Get the identifying parameters.""" 59 | return {"model": "HuggingCHAT"} 60 | 61 | 62 | 63 | #llm = HuggingChat(cookiepath = "YOUR-COOKIES-PATH") #for start new chat 64 | 65 | 66 | #print(llm("Hello, how are you?")) 67 | #print(llm("what is AI?")) 68 | #print(llm("Can you resume your previus answer?")) #now memory work well 69 | 70 | -------------------------------------------------------------------------------- /hfAgent/FreeLLM/thanks.txt: -------------------------------------------------------------------------------- 1 | thank you for try this project 2 | -------------------------------------------------------------------------------- /hfAgent/agents.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding=utf-8 3 | 4 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | import importlib.util 18 | import json 19 | import os 20 | import time 21 | from dataclasses import dataclass 22 | from typing import Dict 23 | 24 | import requests 25 | from huggingface_hub import HfFolder, hf_hub_download, list_spaces 26 | 27 | from transformers.utils import logging 28 | from transformers.tools.base import TASK_MAPPING, TOOL_CONFIG_FILE, Tool, load_tool, supports_remote 29 | from transformers.tools.prompts import CHAT_MESSAGE_PROMPT, CHAT_PROMPT_TEMPLATE, RUN_PROMPT_TEMPLATE 30 | from transformers.tools.python_interpreter import evaluate 31 | 32 | 33 | logger = logging.get_logger(__name__) 34 | 35 | 36 | #if is_openai_available(): 37 | import openai 38 | 39 | _tools_are_initialized = False 40 | 41 | 42 | BASE_PYTHON_TOOLS = { 43 | "print": print, 44 | "float": float, 45 | "int": int, 46 | "bool": bool, 47 | "str": str, 48 | } 49 | 50 | 51 | @dataclass 52 | class PreTool: 53 | task: str 54 | description: str 55 | repo_id: str 56 | 57 | 58 | HUGGINGFACE_DEFAULT_TOOLS = {} 59 | 60 | 61 | HUGGINGFACE_DEFAULT_TOOLS_FROM_HUB = [ 62 | "image-transformation", 63 | "text-download", 64 | "text-to-image", 65 | "text-to-video", 66 | ] 67 | 68 | 69 | def get_remote_tools(organization="huggingface-tools"): 70 | spaces = list_spaces(author=organization) 71 | tools = {} 72 | for space_info in spaces: 73 | repo_id = space_info.id 74 | resolved_config_file = hf_hub_download(repo_id, TOOL_CONFIG_FILE, repo_type="space") 75 | with open(resolved_config_file, encoding="utf-8") as reader: 76 | config = json.load(reader) 77 | 78 | task = repo_id.split("/")[-1] 79 | tools[config["name"]] = PreTool(task=task, description=config["description"], repo_id=repo_id) 80 | 81 | return tools 82 | 83 | 84 | def _setup_default_tools(): 85 | global HUGGINGFACE_DEFAULT_TOOLS 86 | global _tools_are_initialized 87 | 88 | if _tools_are_initialized: 89 | return 90 | 91 | main_module = importlib.import_module("transformers") 92 | tools_module = main_module.tools 93 | 94 | remote_tools = get_remote_tools() 95 | for task_name in TASK_MAPPING: 96 | tool_class_name = TASK_MAPPING.get(task_name) 97 | tool_class = getattr(tools_module, tool_class_name) 98 | description = tool_class.description 99 | HUGGINGFACE_DEFAULT_TOOLS[tool_class.name] = PreTool(task=task_name, description=description, repo_id=None) 100 | 101 | for task_name in HUGGINGFACE_DEFAULT_TOOLS_FROM_HUB: 102 | found = False 103 | for tool_name, tool in remote_tools.items(): 104 | if tool.task == task_name: 105 | HUGGINGFACE_DEFAULT_TOOLS[tool_name] = tool 106 | found = True 107 | break 108 | 109 | if not found: 110 | raise ValueError(f"{task_name} is not implemented on the Hub.") 111 | 112 | _tools_are_initialized = True 113 | 114 | 115 | def resolve_tools(code, toolbox, remote=False, cached_tools=None): 116 | if cached_tools is None: 117 | resolved_tools = BASE_PYTHON_TOOLS.copy() 118 | else: 119 | resolved_tools = cached_tools 120 | for name, tool in toolbox.items(): 121 | if name not in code or name in resolved_tools: 122 | continue 123 | 124 | if isinstance(tool, Tool): 125 | resolved_tools[name] = tool 126 | else: 127 | task_or_repo_id = tool.task if tool.repo_id is None else tool.repo_id 128 | _remote = remote and supports_remote(task_or_repo_id) 129 | resolved_tools[name] = load_tool(task_or_repo_id, remote=_remote) 130 | 131 | return resolved_tools 132 | 133 | 134 | def get_tool_creation_code(code, toolbox, remote=False): 135 | code_lines = ["from transformers import load_tool", ""] 136 | for name, tool in toolbox.items(): 137 | if name not in code or isinstance(tool, Tool): 138 | continue 139 | 140 | task_or_repo_id = tool.task if tool.repo_id is None else tool.repo_id 141 | line = f'{name} = load_tool("{task_or_repo_id}"' 142 | if remote: 143 | line += ", remote=True" 144 | line += ")" 145 | code_lines.append(line) 146 | 147 | return "\n".join(code_lines) + "\n" 148 | 149 | 150 | def clean_code_for_chat(result): 151 | lines = result.split("\n") 152 | idx = 0 153 | while idx < len(lines) and not lines[idx].lstrip().startswith("```"): 154 | idx += 1 155 | explanation = "\n".join(lines[:idx]).strip() 156 | if idx == len(lines): 157 | return explanation, None 158 | 159 | idx += 1 160 | start_idx = idx 161 | while not lines[idx].lstrip().startswith("```"): 162 | idx += 1 163 | code = "\n".join(lines[start_idx:idx]).strip() 164 | 165 | #if code start with "py`" or "python`" 166 | if code.startswith("py`"): 167 | code = code[3:] 168 | elif code.startswith("python`"): 169 | code = code[7:] 170 | elif code.startswith("`"): 171 | code = code[1:] 172 | 173 | if code.endswith("`"): 174 | code = code[:-1] 175 | 176 | return explanation, code 177 | 178 | 179 | def clean_code_for_run(result): 180 | result = f"I will use the following {result}" 181 | try: 182 | explanation, code = result.split("Answer:") 183 | except ValueError: 184 | explanation = result 185 | code = "#Problem with the code" 186 | 187 | explanation = explanation.strip() 188 | code = code.strip() 189 | 190 | code_lines = code.split("\n") 191 | if code_lines[0] in ["```", "```py", "```python", "py`", "`"]: 192 | code_lines = code_lines[1:] 193 | if code_lines[-1] == "```": 194 | code_lines = code_lines[:-1] 195 | code = "\n".join(code_lines) 196 | 197 | #if code start with "py`" or "python`" 198 | if code.startswith("py`"): 199 | code = code[3:] 200 | elif code.startswith("python`"): 201 | code = code[7:] 202 | elif code.startswith("`"): 203 | code = code[1:] 204 | 205 | if code.endswith("`"): 206 | code = code[:-1] 207 | 208 | 209 | return explanation, code 210 | 211 | 212 | class Agent: 213 | """ 214 | Base class for all agents which contains the main API methods. 215 | 216 | Args: 217 | chat_prompt_template (`str`, *optional*): 218 | Pass along your own prompt if you want to override the default template for the `chat` method. 219 | run_prompt_template (`str`, *optional*): 220 | Pass along your own prompt if you want to override the default template for the `run` method. 221 | additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*): 222 | Any additional tools to include on top of the default ones. If you pass along a tool with the same name as 223 | one of the default tools, that default tool will be overridden. 224 | """ 225 | 226 | def __init__(self, chat_prompt_template=None, run_prompt_template=None, additional_tools=None): 227 | _setup_default_tools() 228 | 229 | self.chat_prompt_template = CHAT_MESSAGE_PROMPT if chat_prompt_template is None else chat_prompt_template 230 | self.run_prompt_template = RUN_PROMPT_TEMPLATE if run_prompt_template is None else run_prompt_template 231 | self._toolbox = HUGGINGFACE_DEFAULT_TOOLS.copy() 232 | if additional_tools is not None: 233 | if isinstance(additional_tools, (list, tuple)): 234 | additional_tools = {t.name: t for t in additional_tools} 235 | elif not isinstance(additional_tools, dict): 236 | additional_tools = {additional_tools.name: additional_tools} 237 | 238 | replacements = {name: tool for name, tool in additional_tools.items() if name in HUGGINGFACE_DEFAULT_TOOLS} 239 | self._toolbox.update(additional_tools) 240 | if len(replacements) > 1: 241 | names = "\n".join([f"- {n}: {t}" for n, t in replacements.items()]) 242 | logger.warn( 243 | f"The following tools have been replaced by the ones provided in `additional_tools`:\n{names}." 244 | ) 245 | elif len(replacements) == 1: 246 | name = list(replacements.keys())[0] 247 | logger.warn(f"{name} has been replaced by {replacements[name]} as provided in `additional_tools`.") 248 | 249 | self.prepare_for_new_chat() 250 | 251 | @property 252 | def toolbox(self) -> Dict[str, Tool]: 253 | """Get all tool currently available to the agent""" 254 | return self._toolbox 255 | 256 | def format_prompt(self, task, chat_mode=False): 257 | description = "\n".join([f"- {name}: {tool.description}" for name, tool in self.toolbox.items()]) 258 | if chat_mode: 259 | if self.chat_history is None: 260 | prompt = CHAT_PROMPT_TEMPLATE.replace("<>", description) 261 | else: 262 | prompt = self.chat_history 263 | prompt += CHAT_MESSAGE_PROMPT.replace("<>", task) 264 | else: 265 | prompt = self.run_prompt_template.replace("<>", description) 266 | prompt = prompt.replace("<>", task) 267 | return prompt 268 | 269 | def chat(self, task, *, return_code=False, remote=False, **kwargs): 270 | """ 271 | Sends a new request to the agent in a chat. Will use the previous ones in its history. 272 | 273 | Args: 274 | task (`str`): The task to perform 275 | return_code (`bool`, *optional*, defaults to `False`): 276 | Whether to just return code and not evaluate it. 277 | remote (`bool`, *optional*, defaults to `False`): 278 | Whether or not to use remote tools (inference endpoints) instead of local ones. 279 | kwargs: 280 | Any keyword argument to send to the agent when evaluating the code. 281 | 282 | Example: 283 | 284 | ```py 285 | from transformers import HfAgent 286 | 287 | agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder") 288 | agent.chat("Draw me a picture of rivers and lakes") 289 | 290 | agent.chat("Transform the picture so that there is a rock in there") 291 | ``` 292 | """ 293 | prompt = self.format_prompt(task, chat_mode=True) 294 | result = self.generate_one(prompt, stop=["Human:", "====="]) 295 | self.chat_history = prompt + result.strip() + "\n" 296 | explanation, code = clean_code_for_chat(result) 297 | 298 | print(f"==Explanation from the agent==\n{explanation}") 299 | 300 | if code is not None: 301 | print(f"\n\n==Code generated by the agent==\n{code}") 302 | if not return_code: 303 | print("\n\n==Result==") 304 | self.cached_tools = resolve_tools(code, self.toolbox, remote=remote, cached_tools=self.cached_tools) 305 | self.chat_state.update(kwargs) 306 | return evaluate(code, self.cached_tools, self.chat_state, chat_mode=True) 307 | else: 308 | tool_code = get_tool_creation_code(code, self.toolbox, remote=remote) 309 | return f"{tool_code}\n{code}" 310 | 311 | def prepare_for_new_chat(self): 312 | """ 313 | Clears the history of prior calls to [`~Agent.chat`]. 314 | """ 315 | self.chat_history = None 316 | self.chat_state = {} 317 | self.cached_tools = None 318 | 319 | def run(self, task, *, return_code=False, remote=False, **kwargs): 320 | """ 321 | Sends a request to the agent. 322 | 323 | Args: 324 | task (`str`): The task to perform 325 | return_code (`bool`, *optional*, defaults to `False`): 326 | Whether to just return code and not evaluate it. 327 | remote (`bool`, *optional*, defaults to `False`): 328 | Whether or not to use remote tools (inference endpoints) instead of local ones. 329 | kwargs: 330 | Any keyword argument to send to the agent when evaluating the code. 331 | 332 | Example: 333 | 334 | ```py 335 | from transformers import HfAgent 336 | 337 | agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder") 338 | agent.run("Draw me a picture of rivers and lakes") 339 | ``` 340 | """ 341 | prompt = self.format_prompt(task) 342 | result = self.generate_one(prompt, stop=["Task:"]) 343 | explanation, code = clean_code_for_run(result) 344 | 345 | print(f"==Explanation from the agent==\n{explanation}") 346 | 347 | print(f"\n\n==Code generated by the agent==\n{code}") 348 | if not return_code: 349 | print("\n\n==Result==") 350 | self.cached_tools = resolve_tools(code, self.toolbox, remote=remote, cached_tools=self.cached_tools) 351 | return evaluate(code, self.cached_tools, state=kwargs.copy()) 352 | else: 353 | tool_code = get_tool_creation_code(code, self.toolbox, remote=remote) 354 | return f"{tool_code}\n{code}" 355 | 356 | def generate_one(self, prompt, stop): 357 | # This is the method to implement in your custom agent. 358 | raise NotImplementedError 359 | 360 | def generate_many(self, prompts, stop): 361 | # Override if you have a way to do batch generation faster than one by one 362 | return [self.generate_one(prompt, stop) for prompt in prompts] 363 | 364 | 365 | class OpenAiAgent(Agent): 366 | """ 367 | Agent that uses the openai API to generate code. 368 | 369 | 370 | 371 | The openAI models are used in generation mode, so even for the `chat()` API, it's better to use models like 372 | `"text-davinci-003"` over the chat-GPT variant. Proper support for chat-GPT models will come in a next version. 373 | 374 | 375 | 376 | Args: 377 | model (`str`, *optional*, defaults to `"text-davinci-003"`): 378 | The name of the OpenAI model to use. 379 | api_key (`str`, *optional*): 380 | The API key to use. If unset, will look for the environment variable `"OPENAI_API_KEY"`. 381 | chat_prompt_template (`str`, *optional*): 382 | Pass along your own prompt if you want to override the default template for the `chat` method. 383 | run_prompt_template (`str`, *optional*): 384 | Pass along your own prompt if you want to override the default template for the `run` method. 385 | additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*): 386 | Any additional tools to include on top of the default ones. If you pass along a tool with the same name as 387 | one of the default tools, that default tool will be overridden. 388 | 389 | Example: 390 | 391 | ```py 392 | from transformers import OpenAiAgent 393 | 394 | agent = OpenAiAgent(model="text-davinci-003", api_key=xxx) 395 | agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!") 396 | ``` 397 | """ 398 | 399 | def __init__( 400 | self, 401 | model="text-davinci-003", 402 | api_key=None, 403 | chat_prompt_template=None, 404 | run_prompt_template=None, 405 | additional_tools=None, 406 | ): 407 | #if not is_openai_available(): 408 | #raise ImportError("Using `OpenAiAgent` requires `openai`: `pip install openai`.") 409 | 410 | if api_key is None: 411 | api_key = os.environ.get("OPENAI_API_KEY", None) 412 | if api_key is None: 413 | raise ValueError( 414 | "You need an openai key to use `OpenAIAgent`. You can get one here: Get one here " 415 | "https://openai.com/api/`. If you have one, set it in your env with `os.environ['OPENAI_API_KEY'] = " 416 | "xxx." 417 | ) 418 | else: 419 | openai.api_key = api_key 420 | self.model = model 421 | super().__init__( 422 | chat_prompt_template=chat_prompt_template, 423 | run_prompt_template=run_prompt_template, 424 | additional_tools=additional_tools, 425 | ) 426 | 427 | def generate_many(self, prompts, stop): 428 | if "gpt" in self.model: 429 | return [self._chat_generate(prompt, stop) for prompt in prompts] 430 | else: 431 | return self._completion_generate(prompts, stop) 432 | 433 | def generate_one(self, prompt, stop): 434 | if "gpt" in self.model: 435 | return self._chat_generate(prompt, stop) 436 | else: 437 | return self._completion_generate([prompt], stop)[0] 438 | 439 | def _chat_generate(self, prompt, stop): 440 | result = openai.ChatCompletion.create( 441 | model=self.model, 442 | messages=[{"role": "user", "content": prompt}], 443 | temperature=0, 444 | stop=stop, 445 | ) 446 | return result["choices"][0]["message"]["content"] 447 | 448 | def _completion_generate(self, prompts, stop): 449 | result = openai.Completion.create( 450 | model=self.model, 451 | prompt=prompts, 452 | temperature=0, 453 | stop=stop, 454 | max_tokens=200, 455 | ) 456 | return [answer["text"] for answer in result["choices"]] 457 | 458 | 459 | class HfAgent(Agent): 460 | """ 461 | Agent that uses and inference endpoint to generate code. 462 | 463 | Args: 464 | url_endpoint (`str`): 465 | The name of the url endpoint to use. 466 | token (`str`, *optional*): 467 | The token to use as HTTP bearer authorization for remote files. If unset, will use the token generated when 468 | running `huggingface-cli login` (stored in `~/.huggingface`). 469 | chat_prompt_template (`str`, *optional*): 470 | Pass along your own prompt if you want to override the default template for the `chat` method. 471 | run_prompt_template (`str`, *optional*): 472 | Pass along your own prompt if you want to override the default template for the `run` method. 473 | additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*): 474 | Any additional tools to include on top of the default ones. If you pass along a tool with the same name as 475 | one of the default tools, that default tool will be overridden. 476 | 477 | Example: 478 | 479 | ```py 480 | from transformers import HfAgent 481 | 482 | agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder") 483 | agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!") 484 | ``` 485 | """ 486 | 487 | def __init__( 488 | self, url_endpoint, token=None, chat_prompt_template=None, run_prompt_template=None, additional_tools=None 489 | ): 490 | self.url_endpoint = url_endpoint 491 | if token is None: 492 | self.token = f"Bearer {HfFolder().get_token()}" 493 | elif token.startswith("Bearer") or token.startswith("Basic"): 494 | self.token = token 495 | else: 496 | self.token = f"Bearer {token}" 497 | super().__init__( 498 | chat_prompt_template=chat_prompt_template, 499 | run_prompt_template=run_prompt_template, 500 | additional_tools=additional_tools, 501 | ) 502 | 503 | def generate_one(self, prompt, stop): 504 | headers = {"Authorization": self.token} 505 | inputs = { 506 | "inputs": prompt, 507 | "parameters": {"max_new_tokens": 200, "return_full_text": False, "stop": stop}, 508 | } 509 | 510 | response = requests.post(self.url_endpoint, json=inputs, headers=headers) 511 | if response.status_code == 429: 512 | print("Getting rate-limited, waiting a tiny bit before trying again.") 513 | time.sleep(1) 514 | return self._generate_one(prompt) 515 | elif response.status_code != 200: 516 | raise ValueError(f"Error {response.status_code}: {response.json()}") 517 | 518 | result = response.json()[0]["generated_text"] 519 | # Inference API returns the stop sequence 520 | for stop_seq in stop: 521 | if result.endswith(stop_seq): 522 | result = result[: -len(stop_seq)] 523 | return result 524 | 525 | 526 | 527 | 528 | class ChatGPTAgent(Agent): 529 | """ 530 | Agent that uses and inference endpoint of CHATGPT by IntelligenzaArtificialeItalia.net 531 | 532 | Args: 533 | token (`str`, *optional*): 534 | The token to use as HTTP bearer authorization for remote files. If unset, will use the token generated when 535 | running `huggingface-cli login` (stored in `~/.huggingface`). 536 | chat_prompt_template (`str`, *optional*): 537 | Pass along your own prompt if you want to override the default template for the `chat` method. 538 | run_prompt_template (`str`, *optional*): 539 | Pass along your own prompt if you want to override the default template for the `run` method. 540 | additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*): 541 | Any additional tools to include on top of the default ones. If you pass along a tool with the same name as 542 | one of the default tools, that default tool will be overridden. 543 | 544 | Example: 545 | 546 | ```py 547 | from hfAgent import ChatGPTAgent 548 | 549 | agent = ChatGPTAgent("TOKEN") 550 | agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!") 551 | agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!") 552 | ``` 553 | """ 554 | 555 | def __init__( 556 | self, token, chat_prompt_template=None, run_prompt_template=None, additional_tools=None, llm=None, model=None 557 | ): 558 | 559 | if token is None: 560 | ValueError("You must provide a ChatGPT token") 561 | else: 562 | from .FreeLLM import ChatGPTAPI 563 | import asyncio 564 | self.token = token 565 | if model is not None: 566 | self.llm = ChatGPTAPI.ChatGPT(token = self.token, model = "gpt-4") 567 | else: 568 | self.llm = ChatGPTAPI.ChatGPT(token = self.token) 569 | 570 | 571 | super().__init__( 572 | chat_prompt_template=chat_prompt_template, 573 | run_prompt_template=run_prompt_template, 574 | additional_tools=additional_tools, 575 | ) 576 | 577 | def generate_one(self, prompt, stop): 578 | 579 | result = self.llm(prompt + "\nRemember to use the following stop sequence: " + str(stop)) 580 | 581 | # Inference API returns the stop sequence 582 | for stop_seq in stop: 583 | if result.endswith(stop_seq): 584 | result = result[: -len(stop_seq)] 585 | return result 586 | 587 | 588 | 589 | 590 | 591 | class HuggingChatAgent(Agent): 592 | """ 593 | Agent that uses and inference endpoint of HuggingCHAT by IntelligenzaArtificialeItalia.net 594 | 595 | Args: 596 | chat_prompt_template (`str`, *optional*): 597 | Pass along your own prompt if you want to override the default template for the `chat` method. 598 | run_prompt_template (`str`, *optional*): 599 | Pass along your own prompt if you want to override the default template for the `run` method. 600 | additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*): 601 | Any additional tools to include on top of the default ones. If you pass along a tool with the same name as 602 | one of the default tools, that default tool will be overridden. 603 | 604 | Example: 605 | 606 | ```py 607 | from hfAgent import HuggingChatAgent 608 | 609 | agent = HuggingChatAgent() 610 | agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!") 611 | agent.chat("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!") 612 | ``` 613 | """ 614 | 615 | def __init__( 616 | self, chat_prompt_template=None, run_prompt_template=None, additional_tools=None, llm=None, model=None 617 | ): 618 | 619 | import json 620 | from pathlib import Path 621 | from json import JSONDecodeError 622 | 623 | emailHF = os.getenv("emailHF", "your-emailHF") 624 | pswHF = os.getenv("pswHF", "your-pswHF") 625 | if emailHF != "your-emailHF" or pswHF != "your-pswHF": 626 | os.environ["emailHF"] = emailHF 627 | os.environ["pswHF"] = pswHF 628 | else: 629 | raise ValueError( 630 | "HuggingChat Token EMPTY. Edit the .env file and put your HuggingChat credentials" 631 | ) 632 | 633 | from .FreeLLM import HuggingChatAPI 634 | self.llm = HuggingChatAPI.HuggingChat(email=emailHF, psw=pswHF) 635 | 636 | 637 | 638 | 639 | 640 | super().__init__( 641 | chat_prompt_template=chat_prompt_template, 642 | run_prompt_template=run_prompt_template, 643 | additional_tools=additional_tools, 644 | ) 645 | 646 | def generate_one(self, prompt, stop): 647 | 648 | result = self.llm(prompt + "\nRemember to use the following stop sequence: " + str(stop)) 649 | # Inference API returns the stop sequence 650 | for stop_seq in stop: 651 | if result.endswith(stop_seq): 652 | result = result[: -len(stop_seq)] 653 | return result 654 | 655 | 656 | class BingChatAgent(Agent): 657 | """ 658 | Agent that uses and inference endpoint of BingCHAT by IntelligenzaArtificialeItalia.net 659 | 660 | Args: 661 | cookiepath (`str`): 662 | chat_prompt_template (`str`, *optional*): 663 | Pass along your own prompt if you want to override the default template for the `chat` method. 664 | run_prompt_template (`str`, *optional*): 665 | Pass along your own prompt if you want to override the default template for the `run` method. 666 | additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*): 667 | Any additional tools to include on top of the default ones. If you pass along a tool with the same name as 668 | one of the default tools, that default tool will be overridden. 669 | 670 | Example: 671 | 672 | ```py 673 | from hfAgent import BingChatAgent 674 | 675 | agent = BingChatAgent("cookie-path") 676 | agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!") 677 | agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!") 678 | ``` 679 | """ 680 | 681 | def __init__( 682 | self, cookiepath, chat_prompt_template=None, run_prompt_template=None, additional_tools=None, llm=None, model=None , conversation = "balanced" 683 | ): 684 | 685 | 686 | from .FreeLLM import BingChatAPI 687 | 688 | if cookiepath is None: 689 | ValueError("You must provide a cookie path") 690 | else: 691 | self.cookiepath = cookiepath 692 | if conversation == "balanced": 693 | self.llm = BingChatAPI.BingChat(cookiepath = self.cookiepath, conversation_style = "balanced") 694 | elif conversation == "creative": 695 | self.llm = BingChatAPI.BingChat(cookiepath = self.cookiepath, conversation_style = "creative") 696 | elif conversation == "precise": 697 | self.llm = BingChatAPI.BingChat(cookiepath = self.cookiepath, conversation_style = "precise") 698 | 699 | 700 | super().__init__( 701 | chat_prompt_template=chat_prompt_template, 702 | run_prompt_template=run_prompt_template, 703 | additional_tools=additional_tools, 704 | ) 705 | 706 | def generate_one(self, prompt, stop): 707 | 708 | result = self.llm(prompt + "\nRemember to use the following stop sequence: " + str(stop)) 709 | # Inference API returns the stop sequence 710 | for stop_seq in stop: 711 | if result.endswith(stop_seq): 712 | result = result[: -len(stop_seq)] 713 | return result 714 | 715 | 716 | 717 | class BardChatAgent(Agent): 718 | """ 719 | Agent that uses and inference endpoint of Bard Chat by IntelligenzaArtificialeItalia.net 720 | 721 | Args: 722 | token (`str`): 723 | chat_prompt_template (`str`, *optional*): 724 | Pass along your own prompt if you want to override the default template for the `chat` method. 725 | run_prompt_template (`str`, *optional*): 726 | Pass along your own prompt if you want to override the default template for the `run` method. 727 | additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*): 728 | Any additional tools to include on top of the default ones. If you pass along a tool with the same name as 729 | one of the default tools, that default tool will be overridden. 730 | 731 | Example: 732 | 733 | ```py 734 | from hfAgent import BardChatAgent 735 | 736 | agent = BardChatAgent("token") 737 | agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!") 738 | agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!") 739 | ``` 740 | """ 741 | 742 | def __init__( 743 | self, token ,chat_prompt_template=None, run_prompt_template=None, additional_tools=None, llm=None, model=None , conversation = "balanced" 744 | ): 745 | 746 | 747 | from .FreeLLM import BardChatAPI 748 | 749 | if token is None: 750 | ValueError("You must provide a cookie path") 751 | else: 752 | self.token = token 753 | self.llm = BardChatAPI.BardChat(cookie = self.token) 754 | 755 | 756 | super().__init__( 757 | chat_prompt_template=chat_prompt_template, 758 | run_prompt_template=run_prompt_template, 759 | additional_tools=additional_tools, 760 | ) 761 | 762 | def generate_one(self, prompt, stop): 763 | 764 | result = self.llm(prompt + "\nRemember to use the following stop sequence: " + str(stop)) 765 | 766 | # Inference API returns the stop sequence 767 | for stop_seq in stop: 768 | if result.endswith(stop_seq): 769 | result = result[: -len(stop_seq)] 770 | return result 771 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | requests 2 | langchain 3 | streamlit 4 | streamlit-chat-media 5 | numpy 6 | retry 7 | duckduckgo-search 8 | transformers 9 | tabulate 10 | wikipedia 11 | faiss-gpu # if u have a GPU 12 | #faiss-cpu # if u dont have a GPU 13 | nest_asyncio 14 | torch 15 | # tensorflow >= 2.0 -- For other future models 16 | # flax -- For other future models 17 | pytest-playwright 18 | undetected-chromedriver>=3.2.1 19 | markdownify>=0.11.6 20 | hugchat 21 | python-dotenv 22 | EdgeGPT 23 | GoogleBard 24 | lark 25 | accelerate 26 | diffusers 27 | gpt4-openai-api --------------------------------------------------------------------------------