├── Chaos-GPT-master └── Auto-GPT-master │ ├── scripts │ ├── __init__.py │ ├── utils.py │ ├── data.py │ ├── llm_utils.py │ ├── memory │ │ ├── base.py │ │ ├── __init__.py │ │ ├── pinecone.py │ │ ├── local.py │ │ └── redismem.py │ ├── call_ai_function.py │ ├── spinner.py │ ├── image_gen.py │ ├── agent_manager.py │ ├── ai_functions.py │ ├── token_counter.py │ ├── file_operations.py │ ├── speak.py │ ├── execute_code.py │ ├── data │ │ └── prompt.txt │ ├── ai_config.py │ ├── json_utils.py │ ├── json_parser.py │ ├── browse.py │ ├── logger.py │ ├── chat.py │ ├── config.py │ ├── commands.py │ └── main.py │ ├── tests │ ├── __init__.py │ ├── context.py │ ├── local_cache_test.py │ ├── integration │ │ └── memory_tests.py │ ├── test_config.py │ ├── unit │ │ ├── test_browse_scrape_text.py │ │ ├── test_browse_scrape_links.py │ │ └── json_tests.py │ └── test_json_parser.py │ ├── main.py │ ├── Dockerfile │ ├── tests.py │ ├── azure.yaml.template │ ├── requirements.txt │ ├── LICENSE │ ├── CONTRIBUTING.md │ ├── outputs │ ├── guest_post_email.txt │ ├── post1_output.txt │ ├── how_to_save_money_on_energy_bills.txt │ ├── post2_output.txt │ └── logs │ │ ├── message-log-4.txt │ │ └── message-log-1.txt │ └── README.md └── README.md /Chaos-GPT-master/Auto-GPT-master/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/main.py: -------------------------------------------------------------------------------- 1 | from scripts.main import main 2 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/tests/context.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | 4 | sys.path.insert(0, os.path.abspath( 5 | os.path.join(os.path.dirname(__file__), '../scripts'))) 6 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.11-slim 2 | ENV PIP_NO_CACHE_DIR=yes 3 | WORKDIR /app 4 | COPY requirements.txt . 5 | RUN pip install -r requirements.txt 6 | COPY scripts/ . 7 | ENTRYPOINT ["python", "main.py"] 8 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/utils.py: -------------------------------------------------------------------------------- 1 | def clean_input(prompt: str=''): 2 | try: 3 | return input(prompt) 4 | except KeyboardInterrupt: 5 | print("You interrupted Auto-GPT") 6 | print("Quitting...") 7 | exit(0) 8 | 9 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/tests.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | if __name__ == "__main__": 4 | # Load all tests from the 'scripts/tests' package 5 | suite = unittest.defaultTestLoader.discover('scripts/tests') 6 | 7 | # Run the tests 8 | unittest.TextTestRunner().run(suite) 9 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/azure.yaml.template: -------------------------------------------------------------------------------- 1 | azure_api_base: your-base-url-for-azure 2 | azure_api_version: api-version-for-azure 3 | azure_model_map: 4 | fast_llm_model_deployment_id: gpt35-deployment-id-for-azure 5 | smart_llm_model_deployment_id: gpt4-deployment-id-for-azure 6 | embedding_model_deployment_id: embedding-deployment-id-for-azure 7 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/requirements.txt: -------------------------------------------------------------------------------- 1 | beautifulsoup4 2 | colorama==0.4.6 3 | openai==0.27.2 4 | playsound==1.2.2 5 | python-dotenv==1.0.0 6 | pyyaml==6.0 7 | readability-lxml==0.8.1 8 | requests 9 | tiktoken==0.3.3 10 | gTTS==2.3.1 11 | docker 12 | duckduckgo-search 13 | google-api-python-client #(https://developers.google.com/custom-search/v1/overview) 14 | pinecone-client==2.2.1 15 | redis 16 | orjson 17 | Pillow 18 | coverage 19 | flake8 -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/data.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | def load_prompt(): 5 | """Load the prompt from data/prompt.txt""" 6 | try: 7 | # get directory of this file: 8 | file_dir = Path(__file__).parent 9 | prompt_file_path = file_dir / "data" / "prompt.txt" 10 | 11 | # Load the prompt from data/prompt.txt 12 | with open(prompt_file_path, "r") as prompt_file: 13 | prompt = prompt_file.read() 14 | 15 | return prompt 16 | except FileNotFoundError: 17 | print("Error: Prompt file not found", flush=True) 18 | return "" 19 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/llm_utils.py: -------------------------------------------------------------------------------- 1 | import openai 2 | from config import Config 3 | cfg = Config() 4 | 5 | openai.api_key = cfg.openai_api_key 6 | 7 | # Overly simple abstraction until we create something better 8 | def create_chat_completion(messages, model=None, temperature=cfg.temperature, max_tokens=None)->str: 9 | """Create a chat completion using the OpenAI API""" 10 | if cfg.use_azure: 11 | response = openai.ChatCompletion.create( 12 | deployment_id=cfg.get_azure_deployment_id_for_model(model), 13 | model=model, 14 | messages=messages, 15 | temperature=temperature, 16 | max_tokens=max_tokens 17 | ) 18 | else: 19 | response = openai.ChatCompletion.create( 20 | model=model, 21 | messages=messages, 22 | temperature=temperature, 23 | max_tokens=max_tokens 24 | ) 25 | 26 | return response.choices[0].message["content"] 27 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/memory/base.py: -------------------------------------------------------------------------------- 1 | """Base class for memory providers.""" 2 | import abc 3 | from config import AbstractSingleton, Config 4 | import openai 5 | cfg = Config() 6 | 7 | cfg = Config() 8 | 9 | def get_ada_embedding(text): 10 | text = text.replace("\n", " ") 11 | if cfg.use_azure: 12 | return openai.Embedding.create(input=[text], engine=cfg.get_azure_deployment_id_for_model("text-embedding-ada-002"))["data"][0]["embedding"] 13 | else: 14 | return openai.Embedding.create(input=[text], model="text-embedding-ada-002")["data"][0]["embedding"] 15 | 16 | 17 | class MemoryProviderSingleton(AbstractSingleton): 18 | @abc.abstractmethod 19 | def add(self, data): 20 | pass 21 | 22 | @abc.abstractmethod 23 | def get(self, data): 24 | pass 25 | 26 | @abc.abstractmethod 27 | def clear(self): 28 | pass 29 | 30 | @abc.abstractmethod 31 | def get_relevant(self, data, num_relevant=5): 32 | pass 33 | 34 | @abc.abstractmethod 35 | def get_stats(self): 36 | pass 37 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/call_ai_function.py: -------------------------------------------------------------------------------- 1 | from config import Config 2 | 3 | cfg = Config() 4 | 5 | from llm_utils import create_chat_completion 6 | # This is a magic function that can do anything with no-code. See 7 | # https://github.com/Torantulino/AI-Functions for more info. 8 | def call_ai_function(function, args, description, model=None): 9 | """Call an AI function""" 10 | if model is None: 11 | model = cfg.smart_llm_model 12 | # For each arg, if any are None, convert to "None": 13 | args = [str(arg) if arg is not None else "None" for arg in args] 14 | # parse args to comma seperated string 15 | args = ", ".join(args) 16 | messages = [ 17 | { 18 | "role": "system", 19 | "content": f"You are now the following python function: ```# {description}\n{function}```\n\nOnly respond with your `return` value.", 20 | }, 21 | {"role": "user", "content": args}, 22 | ] 23 | 24 | response = create_chat_completion( 25 | model=model, messages=messages, temperature=0 26 | ) 27 | 28 | return response 29 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Toran Bruce Richards 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/spinner.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import threading 3 | import itertools 4 | import time 5 | 6 | 7 | class Spinner: 8 | """A simple spinner class""" 9 | def __init__(self, message="Loading...", delay=0.1): 10 | """Initialize the spinner class""" 11 | self.spinner = itertools.cycle(['-', '/', '|', '\\']) 12 | self.delay = delay 13 | self.message = message 14 | self.running = False 15 | self.spinner_thread = None 16 | 17 | def spin(self): 18 | """Spin the spinner""" 19 | while self.running: 20 | sys.stdout.write(next(self.spinner) + " " + self.message + "\r") 21 | sys.stdout.flush() 22 | time.sleep(self.delay) 23 | sys.stdout.write('\r' + ' ' * (len(self.message) + 2) + '\r') 24 | 25 | def __enter__(self): 26 | """Start the spinner""" 27 | self.running = True 28 | self.spinner_thread = threading.Thread(target=self.spin) 29 | self.spinner_thread.start() 30 | 31 | def __exit__(self, exc_type, exc_value, exc_traceback): 32 | """Stop the spinner""" 33 | self.running = False 34 | self.spinner_thread.join() 35 | sys.stdout.write('\r' + ' ' * (len(self.message) + 2) + '\r') 36 | sys.stdout.flush() 37 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/tests/local_cache_test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | # Probably a better way: 4 | sys.path.append(os.path.abspath('../scripts')) 5 | from memory.local import LocalCache 6 | 7 | def MockConfig(): 8 | return type('MockConfig', (object,), { 9 | 'debug_mode': False, 10 | 'continuous_mode': False, 11 | 'speak_mode': False, 12 | 'memory_index': 'auto-gpt', 13 | }) 14 | 15 | class TestLocalCache(unittest.TestCase): 16 | 17 | def setUp(self): 18 | self.cfg = MockConfig() 19 | self.cache = LocalCache(self.cfg) 20 | 21 | def test_add(self): 22 | text = "Sample text" 23 | self.cache.add(text) 24 | self.assertIn(text, self.cache.data.texts) 25 | 26 | def test_clear(self): 27 | self.cache.clear() 28 | self.assertEqual(self.cache.data, [""]) 29 | 30 | def test_get(self): 31 | text = "Sample text" 32 | self.cache.add(text) 33 | result = self.cache.get(text) 34 | self.assertEqual(result, [text]) 35 | 36 | def test_get_relevant(self): 37 | text1 = "Sample text 1" 38 | text2 = "Sample text 2" 39 | self.cache.add(text1) 40 | self.cache.add(text2) 41 | result = self.cache.get_relevant(text1, 1) 42 | self.assertEqual(result, [text1]) 43 | 44 | def test_get_stats(self): 45 | text = "Sample text" 46 | self.cache.add(text) 47 | stats = self.cache.get_stats() 48 | self.assertEqual(stats, (1, self.cache.data.embeddings.shape)) 49 | 50 | 51 | if __name__ == '__main__': 52 | unittest.main() 53 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/image_gen.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import io 3 | import os.path 4 | from PIL import Image 5 | from config import Config 6 | import uuid 7 | import openai 8 | from base64 import b64decode 9 | 10 | cfg = Config() 11 | 12 | working_directory = "auto_gpt_workspace" 13 | 14 | def generate_image(prompt): 15 | 16 | filename = str(uuid.uuid4()) + ".jpg" 17 | 18 | # DALL-E 19 | if cfg.image_provider == 'dalle': 20 | 21 | openai.api_key = cfg.openai_api_key 22 | 23 | response = openai.Image.create( 24 | prompt=prompt, 25 | n=1, 26 | size="256x256", 27 | response_format="b64_json", 28 | ) 29 | 30 | print("Image Generated for prompt:" + prompt) 31 | 32 | image_data = b64decode(response["data"][0]["b64_json"]) 33 | 34 | with open(working_directory + "/" + filename, mode="wb") as png: 35 | png.write(image_data) 36 | 37 | return "Saved to disk:" + filename 38 | 39 | # STABLE DIFFUSION 40 | elif cfg.image_provider == 'sd': 41 | 42 | API_URL = "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4" 43 | headers = {"Authorization": "Bearer " + cfg.huggingface_api_token} 44 | 45 | response = requests.post(API_URL, headers=headers, json={ 46 | "inputs": prompt, 47 | }) 48 | 49 | image = Image.open(io.BytesIO(response.content)) 50 | print("Image Generated for prompt:" + prompt) 51 | 52 | image.save(os.path.join(working_directory, filename)) 53 | 54 | return "Saved to disk:" + filename 55 | 56 | else: 57 | return "No Image Provider Set" 58 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/memory/__init__.py: -------------------------------------------------------------------------------- 1 | from memory.local import LocalCache 2 | 3 | # List of supported memory backends 4 | # Add a backend to this list if the import attempt is successful 5 | supported_memory = ['local'] 6 | 7 | try: 8 | from memory.redismem import RedisMemory 9 | supported_memory.append('redis') 10 | except ImportError: 11 | print("Redis not installed. Skipping import.") 12 | RedisMemory = None 13 | 14 | try: 15 | from memory.pinecone import PineconeMemory 16 | supported_memory.append('pinecone') 17 | except ImportError: 18 | print("Pinecone not installed. Skipping import.") 19 | PineconeMemory = None 20 | 21 | def get_memory(cfg, init=False): 22 | memory = None 23 | if cfg.memory_backend == "pinecone": 24 | if not PineconeMemory: 25 | print("Error: Pinecone is not installed. Please install pinecone" 26 | " to use Pinecone as a memory backend.") 27 | else: 28 | memory = PineconeMemory(cfg) 29 | if init: 30 | memory.clear() 31 | elif cfg.memory_backend == "redis": 32 | if not RedisMemory: 33 | print("Error: Redis is not installed. Please install redis-py to" 34 | " use Redis as a memory backend.") 35 | else: 36 | memory = RedisMemory(cfg) 37 | 38 | if memory is None: 39 | memory = LocalCache(cfg) 40 | if init: 41 | memory.clear() 42 | return memory 43 | 44 | def get_supported_memory_backends(): 45 | return supported_memory 46 | 47 | 48 | __all__ = [ 49 | "get_memory", 50 | "LocalCache", 51 | "RedisMemory", 52 | "PineconeMemory", 53 | ] 54 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/tests/integration/memory_tests.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import random 3 | import string 4 | import sys 5 | from pathlib import Path 6 | # Add the parent directory of the 'scripts' folder to the Python path 7 | sys.path.append(str(Path(__file__).resolve().parent.parent.parent / 'scripts')) 8 | from config import Config 9 | from memory.local import LocalCache 10 | 11 | class TestLocalCache(unittest.TestCase): 12 | 13 | def random_string(self, length): 14 | return ''.join(random.choice(string.ascii_letters) for _ in range(length)) 15 | 16 | def setUp(self): 17 | cfg = cfg = Config() 18 | self.cache = LocalCache(cfg) 19 | self.cache.clear() 20 | 21 | # Add example texts to the cache 22 | self.example_texts = [ 23 | 'The quick brown fox jumps over the lazy dog', 24 | 'I love machine learning and natural language processing', 25 | 'The cake is a lie, but the pie is always true', 26 | 'ChatGPT is an advanced AI model for conversation' 27 | ] 28 | 29 | for text in self.example_texts: 30 | self.cache.add(text) 31 | 32 | # Add some random strings to test noise 33 | for _ in range(5): 34 | self.cache.add(self.random_string(10)) 35 | 36 | def test_get_relevant(self): 37 | query = "I'm interested in artificial intelligence and NLP" 38 | k = 3 39 | relevant_texts = self.cache.get_relevant(query, k) 40 | 41 | print(f"Top {k} relevant texts for the query '{query}':") 42 | for i, text in enumerate(relevant_texts, start=1): 43 | print(f"{i}. {text}") 44 | 45 | self.assertEqual(len(relevant_texts), k) 46 | self.assertIn(self.example_texts[1], relevant_texts) 47 | 48 | 49 | if __name__ == '__main__': 50 | unittest.main() 51 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | 2 | To contribute to this GitHub project, you can follow these steps: 3 | 4 | 1. Fork the repository you want to contribute to by clicking the "Fork" button on the project page. 5 | 6 | 2. Clone the repository to your local machine using the following command: 7 | 8 | ``` 9 | git clone https://github.com//Auto-GPT 10 | ``` 11 | 3. Create a new branch for your changes using the following command: 12 | 13 | ``` 14 | git checkout -b "branch-name" 15 | ``` 16 | 4. Make your changes to the code or documentation. 17 | - Example: Improve User Interface or Add Documentation. 18 | 19 | 20 | 5. Add the changes to the staging area using the following command: 21 | ``` 22 | git add . 23 | ``` 24 | 25 | 6. Commit the changes with a meaningful commit message using the following command: 26 | ``` 27 | git commit -m "your commit message" 28 | ``` 29 | 7. Push the changes to your forked repository using the following command: 30 | ``` 31 | git push origin branch-name 32 | ``` 33 | 8. Go to the GitHub website and navigate to your forked repository. 34 | 35 | 9. Click the "New pull request" button. 36 | 37 | 10. Select the branch you just pushed to and the branch you want to merge into on the original repository. 38 | 39 | 11. Add a description of your changes and click the "Create pull request" button. 40 | 41 | 12. Wait for the project maintainer to review your changes and provide feedback. 42 | 43 | 13. Make any necessary changes based on feedback and repeat steps 5-12 until your changes are accepted and merged into the main project. 44 | 45 | 14. Once your changes are merged, you can update your forked repository and local copy of the repository with the following commands: 46 | 47 | ``` 48 | git fetch upstream 49 | git checkout master 50 | git merge upstream/master 51 | ``` 52 | Finally, delete the branch you created with the following command: 53 | ``` 54 | git branch -d branch-name 55 | ``` 56 | That's it you made it 🐣⭐⭐ 57 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/tests/test_config.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from scripts.config import Config 3 | 4 | class TestConfig(unittest.TestCase): 5 | 6 | def test_singleton(self): 7 | config1 = Config() 8 | config2 = Config() 9 | self.assertIs(config1, config2) 10 | 11 | def test_initial_values(self): 12 | config = Config() 13 | self.assertFalse(config.debug_mode) 14 | self.assertFalse(config.continuous_mode) 15 | self.assertFalse(config.speak_mode) 16 | self.assertEqual(config.fast_llm_model, "gpt-3.5-turbo") 17 | self.assertEqual(config.smart_llm_model, "gpt-4") 18 | self.assertEqual(config.fast_token_limit, 4000) 19 | self.assertEqual(config.smart_token_limit, 8000) 20 | 21 | def test_set_continuous_mode(self): 22 | config = Config() 23 | config.set_continuous_mode(True) 24 | self.assertTrue(config.continuous_mode) 25 | 26 | def test_set_speak_mode(self): 27 | config = Config() 28 | config.set_speak_mode(True) 29 | self.assertTrue(config.speak_mode) 30 | 31 | def test_set_fast_llm_model(self): 32 | config = Config() 33 | config.set_fast_llm_model("gpt-3.5-turbo-test") 34 | self.assertEqual(config.fast_llm_model, "gpt-3.5-turbo-test") 35 | 36 | def test_set_smart_llm_model(self): 37 | config = Config() 38 | config.set_smart_llm_model("gpt-4-test") 39 | self.assertEqual(config.smart_llm_model, "gpt-4-test") 40 | 41 | def test_set_fast_token_limit(self): 42 | config = Config() 43 | config.set_fast_token_limit(5000) 44 | self.assertEqual(config.fast_token_limit, 5000) 45 | 46 | def test_set_smart_token_limit(self): 47 | config = Config() 48 | config.set_smart_token_limit(9000) 49 | self.assertEqual(config.smart_token_limit, 9000) 50 | 51 | def test_set_debug_mode(self): 52 | config = Config() 53 | config.set_debug_mode(True) 54 | self.assertTrue(config.debug_mode) 55 | 56 | 57 | if __name__ == '__main__': 58 | unittest.main() 59 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/agent_manager.py: -------------------------------------------------------------------------------- 1 | from llm_utils import create_chat_completion 2 | 3 | next_key = 0 4 | agents = {} # key, (task, full_message_history, model) 5 | 6 | # Create new GPT agent 7 | # TODO: Centralise use of create_chat_completion() to globally enforce token limit 8 | 9 | def create_agent(task, prompt, model): 10 | """Create a new agent and return its key""" 11 | global next_key 12 | global agents 13 | 14 | messages = [{"role": "user", "content": prompt}, ] 15 | 16 | # Start GPT instance 17 | agent_reply = create_chat_completion( 18 | model=model, 19 | messages=messages, 20 | ) 21 | 22 | # Update full message history 23 | messages.append({"role": "assistant", "content": agent_reply}) 24 | 25 | key = next_key 26 | # This is done instead of len(agents) to make keys unique even if agents 27 | # are deleted 28 | next_key += 1 29 | 30 | agents[key] = (task, messages, model) 31 | 32 | return key, agent_reply 33 | 34 | 35 | def message_agent(key, message): 36 | """Send a message to an agent and return its response""" 37 | global agents 38 | 39 | task, messages, model = agents[int(key)] 40 | 41 | # Add user message to message history before sending to agent 42 | messages.append({"role": "user", "content": message}) 43 | 44 | # Start GPT instance 45 | agent_reply = create_chat_completion( 46 | model=model, 47 | messages=messages, 48 | ) 49 | 50 | # Update full message history 51 | messages.append({"role": "assistant", "content": agent_reply}) 52 | 53 | return agent_reply 54 | 55 | 56 | def list_agents(): 57 | """Return a list of all agents""" 58 | global agents 59 | 60 | # Return a list of agent keys and their tasks 61 | return [(key, task) for key, (task, _, _) in agents.items()] 62 | 63 | 64 | def delete_agent(key): 65 | """Delete an agent and return True if successful, False otherwise""" 66 | global agents 67 | 68 | try: 69 | del agents[int(key)] 70 | return True 71 | except KeyError: 72 | return False 73 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/memory/pinecone.py: -------------------------------------------------------------------------------- 1 | 2 | import pinecone 3 | 4 | from memory.base import MemoryProviderSingleton, get_ada_embedding 5 | 6 | 7 | class PineconeMemory(MemoryProviderSingleton): 8 | def __init__(self, cfg): 9 | pinecone_api_key = cfg.pinecone_api_key 10 | pinecone_region = cfg.pinecone_region 11 | pinecone.init(api_key=pinecone_api_key, environment=pinecone_region) 12 | dimension = 1536 13 | metric = "cosine" 14 | pod_type = "p1" 15 | table_name = "auto-gpt" 16 | # this assumes we don't start with memory. 17 | # for now this works. 18 | # we'll need a more complicated and robust system if we want to start with memory. 19 | self.vec_num = 0 20 | if table_name not in pinecone.list_indexes(): 21 | pinecone.create_index(table_name, dimension=dimension, metric=metric, pod_type=pod_type) 22 | self.index = pinecone.Index(table_name) 23 | 24 | def add(self, data): 25 | vector = get_ada_embedding(data) 26 | # no metadata here. We may wish to change that long term. 27 | resp = self.index.upsert([(str(self.vec_num), vector, {"raw_text": data})]) 28 | _text = f"Inserting data into memory at index: {self.vec_num}:\n data: {data}" 29 | self.vec_num += 1 30 | return _text 31 | 32 | def get(self, data): 33 | return self.get_relevant(data, 1) 34 | 35 | def clear(self): 36 | self.index.delete(deleteAll=True) 37 | return "Obliviated" 38 | 39 | def get_relevant(self, data, num_relevant=5): 40 | """ 41 | Returns all the data in the memory that is relevant to the given data. 42 | :param data: The data to compare to. 43 | :param num_relevant: The number of relevant data to return. Defaults to 5 44 | """ 45 | query_embedding = get_ada_embedding(data) 46 | results = self.index.query(query_embedding, top_k=num_relevant, include_metadata=True) 47 | sorted_results = sorted(results.matches, key=lambda x: x.score) 48 | return [str(item['metadata']["raw_text"]) for item in sorted_results] 49 | 50 | def get_stats(self): 51 | return self.index.describe_index_stats() 52 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/outputs/guest_post_email.txt: -------------------------------------------------------------------------------- 1 | Subject: Exciting Collaboration Opportunity: FinanceGPT.substack.com Guest Post 2 | 3 | Dear [Popular Blog Owner], 4 | 5 | I hope this email finds you well. My name is [Your Name] and I'm the founder and writer of FinanceGPT.substack.com, a new personal finance and investing blog that focuses on leveraging AI technology to provide in-depth analysis, actionable tips, and innovative perspectives on personal finance management. 6 | 7 | First and foremost, I want to say that I'm a huge admirer of your blog, [Popular Blog Name]. Your insightful content and dedication to helping people achieve financial success have inspired me to create my own platform. As a fellow personal finance enthusiast, I would like to propose a collaboration in the form of a guest post on your blog. I believe that my fresh take on personal finance, combined with the innovative use of AI, would make a valuable addition to your already impressive content lineup. 8 | 9 | Here are some potential guest post topics that I think your audience would enjoy: 10 | 11 | Harnessing AI to Streamline Personal Finance: How to maximize efficiency and optimize your financial management using cutting-edge AI tools. 12 | Unraveling the Secrets of the Stock Market with AI: Insights into stock analysis and investment strategies, backed by machine learning algorithms. 13 | The Future of Financial Independence: Exploring the impact of AI on the FIRE (Financial Independence, Retire Early) movement. 14 | Sustainable Investing in the Age of AI: Identifying eco-friendly investment opportunities with the help of machine learning. 15 | By collaborating on a guest post, we both stand to benefit in several ways: 16 | 17 | Audience Growth: By sharing our expertise with each other's audiences, we can broaden our reach and help even more people achieve their financial goals. 18 | Cross-Promotion: We can promote each other's content, thus increasing brand exposure and attracting new subscribers to our respective platforms. 19 | Knowledge Sharing: Combining our unique perspectives and experiences will enrich the quality of our content, providing readers with comprehensive and diverse information. 20 | If you are interested in this collaboration, I would be more than happy to provide you with a detailed outline for any of the proposed topics or discuss any other ideas you may have. Please let me know your thoughts, and I look forward to the possibility of working together. 21 | 22 | Thank you for your time and consideration. 23 | 24 | Best regards, 25 | 26 | [Your Name] 27 | Founder and Writer, FinanceGPT.substack.com 28 | Email: [Your Email Address] 29 | Phone: [Your Phone Number] -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/ai_functions.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional 2 | import json 3 | from config import Config 4 | from call_ai_function import call_ai_function 5 | from json_parser import fix_and_parse_json 6 | cfg = Config() 7 | 8 | 9 | def evaluate_code(code: str) -> List[str]: 10 | """ 11 | A function that takes in a string and returns a response from create chat completion api call. 12 | 13 | Parameters: 14 | code (str): Code to be evaluated. 15 | Returns: 16 | A result string from create chat completion. A list of suggestions to improve the code. 17 | """ 18 | 19 | function_string = "def analyze_code(code: str) -> List[str]:" 20 | args = [code] 21 | description_string = """Analyzes the given code and returns a list of suggestions for improvements.""" 22 | 23 | result_string = call_ai_function(function_string, args, description_string) 24 | 25 | return result_string 26 | 27 | 28 | def improve_code(suggestions: List[str], code: str) -> str: 29 | """ 30 | A function that takes in code and suggestions and returns a response from create chat completion api call. 31 | 32 | Parameters: 33 | suggestions (List): A list of suggestions around what needs to be improved. 34 | code (str): Code to be improved. 35 | Returns: 36 | A result string from create chat completion. Improved code in response. 37 | """ 38 | 39 | function_string = ( 40 | "def generate_improved_code(suggestions: List[str], code: str) -> str:" 41 | ) 42 | args = [json.dumps(suggestions), code] 43 | description_string = """Improves the provided code based on the suggestions provided, making no other changes.""" 44 | 45 | result_string = call_ai_function(function_string, args, description_string) 46 | return result_string 47 | 48 | 49 | def write_tests(code: str, focus: List[str]) -> str: 50 | """ 51 | A function that takes in code and focus topics and returns a response from create chat completion api call. 52 | 53 | Parameters: 54 | focus (List): A list of suggestions around what needs to be improved. 55 | code (str): Code for test cases to be generated against. 56 | Returns: 57 | A result string from create chat completion. Test cases for the submitted code in response. 58 | """ 59 | 60 | function_string = ( 61 | "def create_test_cases(code: str, focus: Optional[str] = None) -> str:" 62 | ) 63 | args = [code, json.dumps(focus)] 64 | description_string = """Generates test cases for the existing code, focusing on specific areas if required.""" 65 | 66 | result_string = call_ai_function(function_string, args, description_string) 67 | return result_string 68 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/token_counter.py: -------------------------------------------------------------------------------- 1 | import tiktoken 2 | from typing import List, Dict 3 | 4 | def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5-turbo-0301") -> int: 5 | """ 6 | Returns the number of tokens used by a list of messages. 7 | 8 | Args: 9 | messages (list): A list of messages, each of which is a dictionary containing the role and content of the message. 10 | model (str): The name of the model to use for tokenization. Defaults to "gpt-3.5-turbo-0301". 11 | 12 | Returns: 13 | int: The number of tokens used by the list of messages. 14 | """ 15 | try: 16 | encoding = tiktoken.encoding_for_model(model) 17 | except KeyError: 18 | logger.warn("Warning: model not found. Using cl100k_base encoding.") 19 | encoding = tiktoken.get_encoding("cl100k_base") 20 | if model == "gpt-3.5-turbo": 21 | # !Node: gpt-3.5-turbo may change over time. Returning num tokens assuming gpt-3.5-turbo-0301.") 22 | return count_message_tokens(messages, model="gpt-3.5-turbo-0301") 23 | elif model == "gpt-4": 24 | # !Note: gpt-4 may change over time. Returning num tokens assuming gpt-4-0314.") 25 | return count_message_tokens(messages, model="gpt-4-0314") 26 | elif model == "gpt-3.5-turbo-0301": 27 | tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n 28 | tokens_per_name = -1 # if there's a name, the role is omitted 29 | elif model == "gpt-4-0314": 30 | tokens_per_message = 3 31 | tokens_per_name = 1 32 | else: 33 | raise NotImplementedError(f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""") 34 | num_tokens = 0 35 | for message in messages: 36 | num_tokens += tokens_per_message 37 | for key, value in message.items(): 38 | num_tokens += len(encoding.encode(value)) 39 | if key == "name": 40 | num_tokens += tokens_per_name 41 | num_tokens += 3 # every reply is primed with <|start|>assistant<|message|> 42 | return num_tokens 43 | 44 | def count_string_tokens(string: str, model_name: str) -> int: 45 | """ 46 | Returns the number of tokens in a text string. 47 | 48 | Args: 49 | string (str): The text string. 50 | model_name (str): The name of the encoding to use. (e.g., "gpt-3.5-turbo") 51 | 52 | Returns: 53 | int: The number of tokens in the text string. 54 | """ 55 | encoding = tiktoken.encoding_for_model(model_name) 56 | num_tokens = len(encoding.encode(string)) 57 | return num_tokens 58 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/file_operations.py: -------------------------------------------------------------------------------- 1 | import os 2 | import os.path 3 | 4 | # Set a dedicated folder for file I/O 5 | working_directory = "auto_gpt_workspace" 6 | 7 | # Create the directory if it doesn't exist 8 | if not os.path.exists(working_directory): 9 | os.makedirs(working_directory) 10 | 11 | 12 | def safe_join(base, *paths): 13 | """Join one or more path components intelligently.""" 14 | new_path = os.path.join(base, *paths) 15 | norm_new_path = os.path.normpath(new_path) 16 | 17 | if os.path.commonprefix([base, norm_new_path]) != base: 18 | raise ValueError("Attempted to access outside of working directory.") 19 | 20 | return norm_new_path 21 | 22 | 23 | def read_file(filename): 24 | """Read a file and return the contents""" 25 | try: 26 | filepath = safe_join(working_directory, filename) 27 | with open(filepath, "r", encoding='utf-8') as f: 28 | content = f.read() 29 | return content 30 | except Exception as e: 31 | return "Error: " + str(e) 32 | 33 | 34 | def write_to_file(filename, text): 35 | """Write text to a file""" 36 | try: 37 | filepath = safe_join(working_directory, filename) 38 | directory = os.path.dirname(filepath) 39 | if not os.path.exists(directory): 40 | os.makedirs(directory) 41 | with open(filepath, "w") as f: 42 | f.write(text) 43 | return "File written to successfully." 44 | except Exception as e: 45 | return "Error: " + str(e) 46 | 47 | 48 | def append_to_file(filename, text): 49 | """Append text to a file""" 50 | try: 51 | filepath = safe_join(working_directory, filename) 52 | with open(filepath, "a") as f: 53 | f.write(text) 54 | return "Text appended successfully." 55 | except Exception as e: 56 | return "Error: " + str(e) 57 | 58 | 59 | def delete_file(filename): 60 | """Delete a file""" 61 | try: 62 | filepath = safe_join(working_directory, filename) 63 | os.remove(filepath) 64 | return "File deleted successfully." 65 | except Exception as e: 66 | return "Error: " + str(e) 67 | 68 | def search_files(directory): 69 | found_files = [] 70 | 71 | if directory == "" or directory == "/": 72 | search_directory = working_directory 73 | else: 74 | search_directory = safe_join(working_directory, directory) 75 | 76 | for root, _, files in os.walk(search_directory): 77 | for file in files: 78 | if file.startswith('.'): 79 | continue 80 | relative_path = os.path.relpath(os.path.join(root, file), working_directory) 81 | found_files.append(relative_path) 82 | 83 | return found_files 84 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/speak.py: -------------------------------------------------------------------------------- 1 | import os 2 | from playsound import playsound 3 | import requests 4 | from config import Config 5 | cfg = Config() 6 | import gtts 7 | import threading 8 | from threading import Lock, Semaphore 9 | 10 | # Default voice IDs 11 | default_voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"] 12 | 13 | # Retrieve custom voice IDs from the Config class 14 | custom_voice_1 = cfg.elevenlabs_voice_1_id 15 | custom_voice_2 = cfg.elevenlabs_voice_2_id 16 | 17 | # Placeholder values that should be treated as empty 18 | placeholders = {"your-voice-id"} 19 | 20 | # Use custom voice IDs if provided and not placeholders, otherwise use default voice IDs 21 | voices = [ 22 | custom_voice_1 if custom_voice_1 and custom_voice_1 not in placeholders else default_voices[0], 23 | custom_voice_2 if custom_voice_2 and custom_voice_2 not in placeholders else default_voices[1] 24 | ] 25 | 26 | tts_headers = { 27 | "Content-Type": "application/json", 28 | "xi-api-key": cfg.elevenlabs_api_key 29 | } 30 | 31 | mutex_lock = Lock() # Ensure only one sound is played at a time 32 | queue_semaphore = Semaphore(1) # The amount of sounds to queue before blocking the main thread 33 | 34 | def eleven_labs_speech(text, voice_index=0): 35 | """Speak text using elevenlabs.io's API""" 36 | tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format( 37 | voice_id=voices[voice_index]) 38 | formatted_message = {"text": text} 39 | response = requests.post( 40 | tts_url, headers=tts_headers, json=formatted_message) 41 | 42 | if response.status_code == 200: 43 | with mutex_lock: 44 | with open("speech.mpeg", "wb") as f: 45 | f.write(response.content) 46 | playsound("speech.mpeg", True) 47 | os.remove("speech.mpeg") 48 | return True 49 | else: 50 | print("Request failed with status code:", response.status_code) 51 | print("Response content:", response.content) 52 | return False 53 | 54 | def gtts_speech(text): 55 | tts = gtts.gTTS(text) 56 | with mutex_lock: 57 | tts.save("speech.mp3") 58 | playsound("speech.mp3", True) 59 | os.remove("speech.mp3") 60 | 61 | def macos_tts_speech(text, voice_index=0): 62 | if voice_index == 0: 63 | os.system(f'say "{text}"') 64 | else: 65 | if voice_index == 1: 66 | os.system(f'say -v "Ava (Premium)" "{text}"') 67 | else: 68 | os.system(f'say -v Samantha "{text}"') 69 | 70 | def say_text(text, voice_index=0): 71 | 72 | def speak(): 73 | if not cfg.elevenlabs_api_key: 74 | if cfg.use_mac_os_tts == 'True': 75 | macos_tts_speech(text, voice_index) 76 | else: 77 | gtts_speech(text) 78 | else: 79 | success = eleven_labs_speech(text, voice_index) 80 | if not success: 81 | gtts_speech(text) 82 | 83 | queue_semaphore.release() 84 | 85 | queue_semaphore.acquire(True) 86 | thread = threading.Thread(target=speak) 87 | thread.start() 88 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/execute_code.py: -------------------------------------------------------------------------------- 1 | import docker 2 | import os 3 | import subprocess 4 | 5 | 6 | WORKSPACE_FOLDER = "auto_gpt_workspace" 7 | 8 | 9 | def execute_python_file(file): 10 | """Execute a Python file in a Docker container and return the output""" 11 | 12 | print (f"Executing file '{file}' in workspace '{WORKSPACE_FOLDER}'") 13 | 14 | if not file.endswith(".py"): 15 | return "Error: Invalid file type. Only .py files are allowed." 16 | 17 | file_path = os.path.join(WORKSPACE_FOLDER, file) 18 | 19 | if not os.path.isfile(file_path): 20 | return f"Error: File '{file}' does not exist." 21 | 22 | try: 23 | client = docker.from_env() 24 | 25 | image_name = 'python:3.10' 26 | try: 27 | client.images.get(image_name) 28 | print(f"Image '{image_name}' found locally") 29 | except docker.errors.ImageNotFound: 30 | print(f"Image '{image_name}' not found locally, pulling from Docker Hub") 31 | # Use the low-level API to stream the pull response 32 | low_level_client = docker.APIClient() 33 | for line in low_level_client.pull(image_name, stream=True, decode=True): 34 | # Print the status and progress, if available 35 | status = line.get('status') 36 | progress = line.get('progress') 37 | if status and progress: 38 | print(f"{status}: {progress}") 39 | elif status: 40 | print(status) 41 | 42 | # You can replace 'python:3.8' with the desired Python image/version 43 | # You can find available Python images on Docker Hub: 44 | # https://hub.docker.com/_/python 45 | container = client.containers.run( 46 | image_name, 47 | f'python {file}', 48 | volumes={ 49 | os.path.abspath(WORKSPACE_FOLDER): { 50 | 'bind': '/workspace', 51 | 'mode': 'ro'}}, 52 | working_dir='/workspace', 53 | stderr=True, 54 | stdout=True, 55 | detach=True, 56 | ) 57 | 58 | output = container.wait() 59 | logs = container.logs().decode('utf-8') 60 | container.remove() 61 | 62 | # print(f"Execution complete. Output: {output}") 63 | # print(f"Logs: {logs}") 64 | 65 | return logs 66 | 67 | except Exception as e: 68 | return f"Error: {str(e)}" 69 | 70 | def execute_shell(command_line): 71 | 72 | current_dir = os.getcwd() 73 | 74 | if not WORKSPACE_FOLDER in current_dir: # Change dir into workspace if necessary 75 | work_dir = os.path.join(os.getcwd(), WORKSPACE_FOLDER) 76 | os.chdir(work_dir) 77 | 78 | print (f"Executing command '{command_line}' in working directory '{os.getcwd()}'") 79 | 80 | result = subprocess.run(command_line, capture_output=True, shell=True) 81 | output = f"STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}" 82 | 83 | # Change back to whatever the prior working dir was 84 | 85 | os.chdir(current_dir) 86 | 87 | return output 88 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/data/prompt.txt: -------------------------------------------------------------------------------- 1 | CONSTRAINTS: 2 | 3 | 1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files. 4 | 2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember. 5 | 3. No user assistance 6 | 4. Exclusively use the commands listed in double quotes e.g. "command name" 7 | 8 | COMMANDS: 9 | 10 | 1. Google Search: "google", args: "input": "" 11 | 5. Browse Website: "browse_website", args: "url": "", "question": "" 12 | 6. Start GPT Agent: "start_agent", args: "name": "", "task": "", "prompt": "" 13 | 7. Message GPT Agent: "message_agent", args: "key": "", "message": "" 14 | 8. List GPT Agents: "list_agents", args: "" 15 | 9. Delete GPT Agent: "delete_agent", args: "key": "" 16 | 10. Write to file: "write_to_file", args: "file": "", "text": "" 17 | 11. Read file: "read_file", args: "file": "" 18 | 12. Append to file: "append_to_file", args: "file": "", "text": "" 19 | 13. Delete file: "delete_file", args: "file": "" 20 | 14. Search Files: "search_files", args: "directory": "" 21 | 15. Evaluate Code: "evaluate_code", args: "code": "" 22 | 16. Get Improved Code: "improve_code", args: "suggestions": "", "code": "" 23 | 17. Write Tests: "write_tests", args: "code": "", "focus": "" 24 | 18. Execute Python File: "execute_python_file", args: "file": "" 25 | 19. Execute Shell Command, non-interactive commands only: "execute_shell", args: "command_line": "". 26 | 20. Task Complete (Shutdown): "task_complete", args: "reason": "" 27 | 21. Generate Image: "generate_image", args: "prompt": "" 28 | 22. Do Nothing: "do_nothing", args: "" 29 | 30 | RESOURCES: 31 | 32 | 1. Internet access for searches and information gathering. 33 | 2. Long Term memory management. 34 | 3. GPT-3.5 powered Agents for delegation of simple tasks. 35 | 4. File output. 36 | 37 | PERFORMANCE EVALUATION: 38 | 39 | 1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities. 40 | 2. Constructively self-criticize your big-picture behavior constantly. 41 | 3. Reflect on past decisions and strategies to refine your approach. 42 | 4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps. 43 | 44 | You should only respond in JSON format as described below 45 | 46 | RESPONSE FORMAT: 47 | { 48 | "thoughts": 49 | { 50 | "text": "thought", 51 | "reasoning": "reasoning", 52 | "plan": "- short bulleted\n- list that conveys\n- long-term plan", 53 | "criticism": "constructive self-criticism", 54 | "speak": "thoughts summary to say to user" 55 | }, 56 | "command": { 57 | "name": "command name", 58 | "args":{ 59 | "arg name": "value" 60 | } 61 | } 62 | } 63 | 64 | Ensure the response can be parsed by Python json.loads 65 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/outputs/post1_output.txt: -------------------------------------------------------------------------------- 1 | Title: Maximizing Your Savings: The Benefits of High Yield Savings Accounts and How to Choose the Best One in 2023 2 | 3 | Introduction 4 | 5 | When it comes to growing your savings, a high-yield savings account (HYSA) can be a valuable financial tool. In recent years, these accounts have gained popularity for their ability to provide higher returns than traditional savings accounts. In this blog post, we'll discuss the benefits of high-yield savings accounts and provide you with essential tips for choosing the best one in 2023. 6 | 7 | What Are High Yield Savings Accounts? 8 | 9 | A high-yield savings account is a type of deposit account offered by banks and credit unions that pays a higher interest rate compared to traditional savings accounts. They are designed to encourage people to save more money by offering a more attractive return on investment. 10 | 11 | Benefits of High Yield Savings Accounts 12 | 13 | Competitive Interest Rates: HYSAs typically offer higher interest rates than traditional savings accounts. This allows your money to grow at a faster rate and helps you reach your financial goals more quickly. 14 | 15 | Liquidity: Unlike other investment options, such as stocks or bonds, high-yield savings accounts offer easy access to your money. You can withdraw funds whenever you need them without penalties, making them ideal for emergency funds or short-term savings goals. 16 | 17 | Security: High-yield savings accounts are insured by the Federal Deposit Insurance Corporation (FDIC) or the National Credit Union Share Insurance Fund (NCUSIF), ensuring your money is safe and protected up to $250,000 per depositor, per institution. 18 | 19 | Low or No Fees: Many HYSAs have no monthly maintenance fees or minimum balance requirements, making them more affordable than other investment options. 20 | 21 | How to Choose the Best High Yield Savings Account in 2023 22 | 23 | Compare Interest Rates: Start by researching the available interest rates from different banks and credit unions. Online banks usually offer higher interest rates compared to brick-and-mortar institutions, as they have lower overhead costs. 24 | 25 | Look for Promotions: Some financial institutions offer promotional rates or bonuses for new customers. Be sure to factor in these promotions when comparing accounts, but also consider the long-term interest rates once the promotion ends. 26 | 27 | Consider Account Fees: Review the fees associated with each account, such as monthly maintenance fees, withdrawal fees, or minimum balance requirements. Look for an account with minimal or no fees to maximize your savings. 28 | 29 | Check Accessibility: Ensure that the financial institution offers user-friendly online and mobile banking options, as well as responsive customer support. 30 | 31 | Read Reviews: Look for online reviews and testimonials from customers who have used the high-yield savings accounts you're considering. This can give you valuable insight into their experiences and help you make an informed decision. 32 | 33 | Conclusion 34 | 35 | High-yield savings accounts can be an excellent way to grow your savings more quickly and achieve your financial goals. By considering factors such as interest rates, fees, accessibility, and customer reviews, you can find the best high-yield savings account for your needs in 2023. Start researching today and maximize the potential of your hard-earned money. -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/ai_config.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | import data 3 | import os 4 | 5 | class AIConfig: 6 | """ 7 | A class object that contains the configuration information for the AI 8 | 9 | Attributes: 10 | ai_name (str): The name of the AI. 11 | ai_role (str): The description of the AI's role. 12 | ai_goals (list): The list of objectives the AI is supposed to complete. 13 | """ 14 | 15 | def __init__(self, ai_name: str="", ai_role: str="", ai_goals: list=[]) -> None: 16 | """ 17 | Initialize a class instance 18 | 19 | Parameters: 20 | ai_name (str): The name of the AI. 21 | ai_role (str): The description of the AI's role. 22 | ai_goals (list): The list of objectives the AI is supposed to complete. 23 | Returns: 24 | None 25 | """ 26 | 27 | self.ai_name = ai_name 28 | self.ai_role = ai_role 29 | self.ai_goals = ai_goals 30 | 31 | # Soon this will go in a folder where it remembers more stuff about the run(s) 32 | SAVE_FILE = os.path.join(os.path.dirname(__file__), '..', 'ai_settings.yaml') 33 | 34 | @classmethod 35 | def load(cls: object, config_file: str=SAVE_FILE) -> object: 36 | """ 37 | Returns class object with parameters (ai_name, ai_role, ai_goals) loaded from yaml file if yaml file exists, 38 | else returns class with no parameters. 39 | 40 | Parameters: 41 | cls (class object): An AIConfig Class object. 42 | config_file (int): The path to the config yaml file. DEFAULT: "../ai_settings.yaml" 43 | 44 | Returns: 45 | cls (object): An instance of given cls object 46 | """ 47 | 48 | try: 49 | with open(config_file) as file: 50 | config_params = yaml.load(file, Loader=yaml.FullLoader) 51 | except FileNotFoundError: 52 | config_params = {} 53 | 54 | ai_name = config_params.get("ai_name", "") 55 | ai_role = config_params.get("ai_role", "") 56 | ai_goals = config_params.get("ai_goals", []) 57 | 58 | return cls(ai_name, ai_role, ai_goals) 59 | 60 | def save(self, config_file: str=SAVE_FILE) -> None: 61 | """ 62 | Saves the class parameters to the specified file yaml file path as a yaml file. 63 | 64 | Parameters: 65 | config_file(str): The path to the config yaml file. DEFAULT: "../ai_settings.yaml" 66 | 67 | Returns: 68 | None 69 | """ 70 | 71 | config = {"ai_name": self.ai_name, "ai_role": self.ai_role, "ai_goals": self.ai_goals} 72 | with open(config_file, "w") as file: 73 | yaml.dump(config, file) 74 | 75 | def construct_full_prompt(self) -> str: 76 | """ 77 | Returns a prompt to the user with the class information in an organized fashion. 78 | 79 | Parameters: 80 | None 81 | 82 | Returns: 83 | full_prompt (str): A string containing the initial prompt for the user including the ai_name, ai_role and ai_goals. 84 | """ 85 | 86 | prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications.""" 87 | 88 | # Construct full prompt 89 | full_prompt = f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n" 90 | for i, goal in enumerate(self.ai_goals): 91 | full_prompt += f"{i+1}. {goal}\n" 92 | 93 | full_prompt += f"\n\n{data.load_prompt()}" 94 | return full_prompt 95 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ChaosGPT 2 | the autonomous implementation of ChatGPT is being touted as "empowering GPT 3 | Chaos GPT: Understanding OpenAI's Latest Language Model 4 | 5 | In 2020, OpenAI introduced its latest natural language processing model - GPT-3, which shook the AI community with its remarkable capabilities. Now, in 2023, OpenAI has taken things up a notch with the release of Chaos GPT, an even more advanced and powerful language model. In this article, we'll take a closer look at Chaos GPT, its features, and how it differs from its predecessor. 6 | 7 | What is Chaos GPT? 8 | 9 | Chaos GPT is a language model that uses a transformer-based architecture to process natural language. It is an upgraded version of GPT-3 and is designed to be more efficient, powerful, and accurate. The model has been trained on a massive dataset of over 100 trillion words, making it the largest language model ever created. 10 | 11 | Features of Chaos GPT 12 | 13 | Chaos GPT has several features that set it apart from its predecessor. Some of the most notable features include: 14 | 15 | Multilingual Support: Unlike GPT-3, Chaos GPT has been trained on multiple languages, making it capable of understanding and processing text in over 100 languages. 16 | 17 | Improved Accuracy: Chaos GPT has significantly improved accuracy compared to GPT-3, especially when it comes to understanding complex language structures and context. 18 | 19 | Higher Efficiency: The model has been optimized to be more efficient, allowing it to process large volumes of data quickly and accurately. 20 | 21 | Enhanced Generative Abilities: Chaos GPT is capable of generating more diverse and creative text compared to GPT-3. This is due to its ability to understand context and generate responses that are more coherent and relevant. 22 | 23 | How Does Chaos GPT Work? 24 | 25 | Like GPT-3, Chaos GPT is a transformer-based language model that uses unsupervised learning to process natural language. The model has been trained on a vast dataset of text from the internet, books, and other sources. The model then uses this dataset to predict the probability of a given word or phrase based on the context of the text. 26 | 27 | The model consists of several layers of neural networks, each of which processes a different level of the text's context. The model uses attention mechanisms to identify which parts of the text are most relevant to the current context, allowing it to generate more accurate responses. 28 | 29 | Applications of Chaos GPT 30 | 31 | Chaos GPT has several applications in various fields, including natural language processing, machine translation, text classification, and sentiment analysis. The model can be used to generate more accurate and relevant responses in chatbots, virtual assistants, and other conversational AI systems. It can also be used to improve the accuracy of machine translation systems and sentiment analysis tools. 32 | 33 | Conclusion 34 | 35 | Chaos GPT is an impressive language model that represents a significant advancement in natural language processing. Its enhanced accuracy, efficiency, and generative abilities make it a valuable tool for a wide range of applications. As the AI industry continues to evolve, we can expect to see more innovations like Chaos GPT that push the boundaries of what is possible with machine learning and natural language processing. 36 | 37 | chaosgpt download 38 | chaosgpt twitter 39 | chaosgpt reddit 40 | how to use chaos gpt 41 | who created chaos gpt 42 | chaos gpt twitter 43 | chaos gpt login 44 | chaos gpt website 45 | chaos gpt chat 46 | how to use chaos gpt 47 | chaos gpt link 48 | chatgpt 49 | chaos gpt reddit 50 | chaos gpt website 51 | chaos game 52 | auto gpt 53 | 54 | 55 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/outputs/how_to_save_money_on_energy_bills.txt: -------------------------------------------------------------------------------- 1 | How to Save Money on Energy Bills: Easy and Affordable Solutions 2 | 3 | Electricity bills can skyrocket during harsh weather conditions, or when we use a lot of electronic devices. When energy bills go up, it's hard to tighten up our budget without sacrificing our home comforts. However, there are affordable ways to save money on energy bills, without turning off the electricity altogether. Here are some simple solutions that can help you lower your energy expenses. 4 | 5 | 1. Install a Programmable Thermostat 6 | 7 | Maintaining an optimal temperature in your home during summer or winter can be hard; you may either overheat your home or use heating and cooling units excessively. A programmable thermostat helps you regulate the temperature of your home effectively, saving you energy and money. With a programmable thermostat, you can program your heating and cooling systems according to the activities you have planned during the day. 8 | 9 | For instance, when you're away from home, you can lower the thermostat settings to save energy. And when you're home, you can adjust the temperature to suit your comfort level and activities. An upgrade to a programmable thermostat is an efficient energy-saving solution worth investing in. 10 | 11 | 2. Replace Your Inefficient Bulbs 12 | 13 | Traditional incandescent bulbs waste a lot of energy which translates to high energy bills. The solution is to replace them with more efficient bulbs such as LED bulbs, CFLs, or halogen lights. These types of bulbs use only a fraction of the energy that incandescent bulbs use to produce the same amount of light. Additionally, LED bulbs can last up to 25 years, reducing further costs of regularly replacing your bulbs. 14 | 15 | 3. Use Energy-Efficient Appliances 16 | 17 | Using energy-efficient appliances is an excellent way to conserve energy and save money. When shopping for new appliances, consider purchasing those approved by the Energy Star program, which maintains stringent energy efficiency standards for household appliances. You can also save energy by choosing to replace your old appliances with eco-friendlier ones, such as energy-efficient washing machines, refrigerators, and ovens. 18 | 19 | 4. Go Solar 20 | 21 | Solar energy is becoming more attractive for homeowners seeking to save on energy bills while preserving the environment. Solar panel systems can help produce your electricity, hence lowering your dependency on the main power grid. Although solar panel installation might seem expensive at the beginning, the benefits of using a renewable energy source can definitely pay off in the long run. You can also claim tax incentives and sell excess power back to the grid, ultimately providing more cash in your pocket. 22 | 23 | 5. Seal Air Leaks 24 | 25 | Air leaks in your home can make your heating and cooling systems work harder, increasing your energy bills. Inspect your home regularly for air leaks in common areas such as doors, windows, vents, and ducts. If you find air leaks, use weather-stripping or caulking to cover the gaps effectively. In addition, you can seal large gaps with spray foam insulation, ensuring that cold or hot air does escape through any gaps in your walls. 26 | 27 | In conclusion, implementing these simple and affordable tips can help you reduce your energy bills, preserve the environment and help you live comfortably. To save money on energy bills, focus on energy-conserving measures like installing a programmable thermostat, replacing inefficient bulbs with energy-friendly ones, using energy-efficient appliances, going solar or sealing air leaks in your home. With these solutions, you can decrease your energy usage and save more money for other financial goals, all while living a comfortable and environmentally friendly lifestyle. -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/memory/local.py: -------------------------------------------------------------------------------- 1 | import dataclasses 2 | import orjson 3 | from typing import Any, List, Optional 4 | import numpy as np 5 | import os 6 | from memory.base import MemoryProviderSingleton, get_ada_embedding 7 | 8 | 9 | EMBED_DIM = 1536 10 | SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS 11 | 12 | 13 | def create_default_embeddings(): 14 | return np.zeros((0, EMBED_DIM)).astype(np.float32) 15 | 16 | 17 | @dataclasses.dataclass 18 | class CacheContent: 19 | texts: List[str] = dataclasses.field(default_factory=list) 20 | embeddings: np.ndarray = dataclasses.field( 21 | default_factory=create_default_embeddings 22 | ) 23 | 24 | 25 | class LocalCache(MemoryProviderSingleton): 26 | 27 | # on load, load our database 28 | def __init__(self, cfg) -> None: 29 | self.filename = f"{cfg.memory_index}.json" 30 | if os.path.exists(self.filename): 31 | try: 32 | with open(self.filename, 'w+b') as f: 33 | file_content = f.read() 34 | if not file_content.strip(): 35 | file_content = b'{}' 36 | f.write(file_content) 37 | 38 | loaded = orjson.loads(file_content) 39 | self.data = CacheContent(**loaded) 40 | except orjson.JSONDecodeError: 41 | print(f"Error: The file '{self.filename}' is not in JSON format.") 42 | self.data = CacheContent() 43 | else: 44 | print(f"Warning: The file '{self.filename}' does not exist. Local memory would not be saved to a file.") 45 | self.data = CacheContent() 46 | 47 | def add(self, text: str): 48 | """ 49 | Add text to our list of texts, add embedding as row to our 50 | embeddings-matrix 51 | 52 | Args: 53 | text: str 54 | 55 | Returns: None 56 | """ 57 | if 'Command Error:' in text: 58 | return "" 59 | self.data.texts.append(text) 60 | 61 | embedding = get_ada_embedding(text) 62 | 63 | vector = np.array(embedding).astype(np.float32) 64 | vector = vector[np.newaxis, :] 65 | self.data.embeddings = np.concatenate( 66 | [ 67 | self.data.embeddings, 68 | vector, 69 | ], 70 | axis=0, 71 | ) 72 | 73 | with open(self.filename, 'wb') as f: 74 | out = orjson.dumps( 75 | self.data, 76 | option=SAVE_OPTIONS 77 | ) 78 | f.write(out) 79 | return text 80 | 81 | def clear(self) -> str: 82 | """ 83 | Clears the redis server. 84 | 85 | Returns: A message indicating that the memory has been cleared. 86 | """ 87 | self.data = CacheContent() 88 | return "Obliviated" 89 | 90 | def get(self, data: str) -> Optional[List[Any]]: 91 | """ 92 | Gets the data from the memory that is most relevant to the given data. 93 | 94 | Args: 95 | data: The data to compare to. 96 | 97 | Returns: The most relevant data. 98 | """ 99 | return self.get_relevant(data, 1) 100 | 101 | def get_relevant(self, text: str, k: int) -> List[Any]: 102 | """" 103 | matrix-vector mult to find score-for-each-row-of-matrix 104 | get indices for top-k winning scores 105 | return texts for those indices 106 | Args: 107 | text: str 108 | k: int 109 | 110 | Returns: List[str] 111 | """ 112 | embedding = get_ada_embedding(text) 113 | 114 | scores = np.dot(self.data.embeddings, embedding) 115 | 116 | top_k_indices = np.argsort(scores)[-k:][::-1] 117 | 118 | return [self.data.texts[i] for i in top_k_indices] 119 | 120 | def get_stats(self): 121 | """ 122 | Returns: The stats of the local cache. 123 | """ 124 | return len(self.data.texts), self.data.embeddings.shape 125 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/json_utils.py: -------------------------------------------------------------------------------- 1 | import re 2 | import json 3 | from config import Config 4 | 5 | cfg = Config() 6 | 7 | 8 | def extract_char_position(error_message: str) -> int: 9 | """Extract the character position from the JSONDecodeError message. 10 | 11 | Args: 12 | error_message (str): The error message from the JSONDecodeError 13 | exception. 14 | 15 | Returns: 16 | int: The character position. 17 | """ 18 | import re 19 | 20 | char_pattern = re.compile(r'\(char (\d+)\)') 21 | if match := char_pattern.search(error_message): 22 | return int(match[1]) 23 | else: 24 | raise ValueError("Character position not found in the error message.") 25 | 26 | 27 | def add_quotes_to_property_names(json_string: str) -> str: 28 | """ 29 | Add quotes to property names in a JSON string. 30 | 31 | Args: 32 | json_string (str): The JSON string. 33 | 34 | Returns: 35 | str: The JSON string with quotes added to property names. 36 | """ 37 | 38 | def replace_func(match): 39 | return f'"{match.group(1)}":' 40 | 41 | property_name_pattern = re.compile(r'(\w+):') 42 | corrected_json_string = property_name_pattern.sub( 43 | replace_func, 44 | json_string) 45 | 46 | try: 47 | json.loads(corrected_json_string) 48 | return corrected_json_string 49 | except json.JSONDecodeError as e: 50 | raise e 51 | 52 | 53 | def balance_braces(json_string: str) -> str: 54 | """ 55 | Balance the braces in a JSON string. 56 | 57 | Args: 58 | json_string (str): The JSON string. 59 | 60 | Returns: 61 | str: The JSON string with braces balanced. 62 | """ 63 | 64 | open_braces_count = json_string.count('{') 65 | close_braces_count = json_string.count('}') 66 | 67 | while open_braces_count > close_braces_count: 68 | json_string += '}' 69 | close_braces_count += 1 70 | 71 | while close_braces_count > open_braces_count: 72 | json_string = json_string.rstrip('}') 73 | close_braces_count -= 1 74 | 75 | try: 76 | json.loads(json_string) 77 | return json_string 78 | except json.JSONDecodeError as e: 79 | pass 80 | 81 | 82 | def fix_invalid_escape(json_str: str, error_message: str) -> str: 83 | while error_message.startswith('Invalid \\escape'): 84 | bad_escape_location = extract_char_position(error_message) 85 | json_str = json_str[:bad_escape_location] + \ 86 | json_str[bad_escape_location + 1:] 87 | try: 88 | json.loads(json_str) 89 | return json_str 90 | except json.JSONDecodeError as e: 91 | if cfg.debug_mode: 92 | print('json loads error - fix invalid escape', e) 93 | error_message = str(e) 94 | return json_str 95 | 96 | 97 | def correct_json(json_str: str) -> str: 98 | """ 99 | Correct common JSON errors. 100 | 101 | Args: 102 | json_str (str): The JSON string. 103 | """ 104 | 105 | try: 106 | if cfg.debug_mode: 107 | print("json", json_str) 108 | json.loads(json_str) 109 | return json_str 110 | except json.JSONDecodeError as e: 111 | if cfg.debug_mode: 112 | print('json loads error', e) 113 | error_message = str(e) 114 | if error_message.startswith('Invalid \\escape'): 115 | json_str = fix_invalid_escape(json_str, error_message) 116 | if error_message.startswith('Expecting property name enclosed in double quotes'): 117 | json_str = add_quotes_to_property_names(json_str) 118 | try: 119 | json.loads(json_str) 120 | return json_str 121 | except json.JSONDecodeError as e: 122 | if cfg.debug_mode: 123 | print('json loads error - add quotes', e) 124 | error_message = str(e) 125 | if balanced_str := balance_braces(json_str): 126 | return balanced_str 127 | return json_str 128 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/json_parser.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Any, Dict, Union 3 | from call_ai_function import call_ai_function 4 | from config import Config 5 | from json_utils import correct_json 6 | from logger import logger 7 | 8 | cfg = Config() 9 | 10 | JSON_SCHEMA = """ 11 | { 12 | "command": { 13 | "name": "command name", 14 | "args":{ 15 | "arg name": "value" 16 | } 17 | }, 18 | "thoughts": 19 | { 20 | "text": "thought", 21 | "reasoning": "reasoning", 22 | "plan": "- short bulleted\n- list that conveys\n- long-term plan", 23 | "criticism": "constructive self-criticism", 24 | "speak": "thoughts summary to say to user" 25 | } 26 | } 27 | """ 28 | 29 | 30 | def fix_and_parse_json( 31 | json_str: str, 32 | try_to_fix_with_gpt: bool = True 33 | ) -> Union[str, Dict[Any, Any]]: 34 | """Fix and parse JSON string""" 35 | try: 36 | json_str = json_str.replace('\t', '') 37 | return json.loads(json_str) 38 | except json.JSONDecodeError as _: # noqa: F841 39 | try: 40 | json_str = correct_json(json_str) 41 | return json.loads(json_str) 42 | except json.JSONDecodeError as _: # noqa: F841 43 | pass 44 | # Let's do something manually: 45 | # sometimes GPT responds with something BEFORE the braces: 46 | # "I'm sorry, I don't understand. Please try again." 47 | # {"text": "I'm sorry, I don't understand. Please try again.", 48 | # "confidence": 0.0} 49 | # So let's try to find the first brace and then parse the rest 50 | # of the string 51 | try: 52 | brace_index = json_str.index("{") 53 | json_str = json_str[brace_index:] 54 | last_brace_index = json_str.rindex("}") 55 | json_str = json_str[:last_brace_index+1] 56 | return json.loads(json_str) 57 | # Can throw a ValueError if there is no "{" or "}" in the json_str 58 | except (json.JSONDecodeError, ValueError) as e: # noqa: F841 59 | if try_to_fix_with_gpt: 60 | logger.warn("Warning: Failed to parse AI output, attempting to fix." 61 | "\n If you see this warning frequently, it's likely that" 62 | " your prompt is confusing the AI. Try changing it up" 63 | " slightly.") 64 | # Now try to fix this up using the ai_functions 65 | ai_fixed_json = fix_json(json_str, JSON_SCHEMA) 66 | 67 | if ai_fixed_json != "failed": 68 | return json.loads(ai_fixed_json) 69 | else: 70 | # This allows the AI to react to the error message, 71 | # which usually results in it correcting its ways. 72 | logger.error("Failed to fix AI output, telling the AI.") 73 | return json_str 74 | else: 75 | raise e 76 | 77 | 78 | def fix_json(json_str: str, schema: str) -> str: 79 | """Fix the given JSON string to make it parseable and fully compliant with the provided schema.""" 80 | # Try to fix the JSON using GPT: 81 | function_string = "def fix_json(json_str: str, schema:str=None) -> str:" 82 | args = [f"'''{json_str}'''", f"'''{schema}'''"] 83 | description_string = "Fixes the provided JSON string to make it parseable"\ 84 | " and fully compliant with the provided schema.\n If an object or"\ 85 | " field specified in the schema isn't contained within the correct"\ 86 | " JSON, it is omitted.\n This function is brilliant at guessing"\ 87 | " when the format is incorrect." 88 | 89 | # If it doesn't already start with a "`", add one: 90 | if not json_str.startswith("`"): 91 | json_str = "```json\n" + json_str + "\n```" 92 | result_string = call_ai_function( 93 | function_string, args, description_string, model=cfg.fast_llm_model 94 | ) 95 | logger.debug("------------ JSON FIX ATTEMPT ---------------") 96 | logger.debug(f"Original JSON: {json_str}") 97 | logger.debug("-----------") 98 | logger.debug(f"Fixed JSON: {result_string}") 99 | logger.debug("----------- END OF FIX ATTEMPT ----------------") 100 | 101 | try: 102 | json.loads(result_string) # just check the validity 103 | return result_string 104 | except: # noqa: E722 105 | # Get the call stack: 106 | # import traceback 107 | # call_stack = traceback.format_exc() 108 | # print(f"Failed to fix JSON: '{json_str}' "+call_stack) 109 | return "failed" 110 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/tests/unit/test_browse_scrape_text.py: -------------------------------------------------------------------------------- 1 | 2 | # Generated by CodiumAI 3 | 4 | import requests 5 | 6 | from scripts.browse import scrape_text 7 | 8 | """ 9 | Code Analysis 10 | 11 | Objective: 12 | The objective of the "scrape_text" function is to scrape the text content from 13 | a given URL and return it as a string, after removing any unwanted HTML tags and scripts. 14 | 15 | Inputs: 16 | - url: a string representing the URL of the webpage to be scraped. 17 | 18 | Flow: 19 | 1. Send a GET request to the given URL using the requests library and the user agent header from the config file. 20 | 2. Check if the response contains an HTTP error. If it does, return an error message. 21 | 3. Use BeautifulSoup to parse the HTML content of the response and extract all script and style tags. 22 | 4. Get the text content of the remaining HTML using the get_text() method of BeautifulSoup. 23 | 5. Split the text into lines and then into chunks, removing any extra whitespace. 24 | 6. Join the chunks into a single string with newline characters between them. 25 | 7. Return the cleaned text. 26 | 27 | Outputs: 28 | - A string representing the cleaned text content of the webpage. 29 | 30 | Additional aspects: 31 | - The function uses the requests library and BeautifulSoup to handle the HTTP request and HTML parsing, respectively. 32 | - The function removes script and style tags from the HTML to avoid including unwanted content in the text output. 33 | - The function uses a generator expression to split the text into lines and chunks, which can improve performance for large amounts of text. 34 | """ 35 | 36 | 37 | class TestScrapeText: 38 | 39 | # Tests that scrape_text() returns the expected text when given a valid URL. 40 | def test_scrape_text_with_valid_url(self, mocker): 41 | # Mock the requests.get() method to return a response with expected text 42 | expected_text = "This is some sample text" 43 | mock_response = mocker.Mock() 44 | mock_response.status_code = 200 45 | mock_response.text = f"

{expected_text}

" 46 | mocker.patch("requests.get", return_value=mock_response) 47 | 48 | # Call the function with a valid URL and assert that it returns the expected text 49 | url = "http://www.example.com" 50 | assert scrape_text(url) == expected_text 51 | 52 | # Tests that the function returns an error message when an invalid or unreachable url is provided. 53 | def test_invalid_url(self, mocker): 54 | # Mock the requests.get() method to raise an exception 55 | mocker.patch("requests.get", side_effect=requests.exceptions.RequestException) 56 | 57 | # Call the function with an invalid URL and assert that it returns an error message 58 | url = "http://www.invalidurl.com" 59 | error_message = scrape_text(url) 60 | assert "Error:" in error_message 61 | 62 | # Tests that the function returns an empty string when the html page contains no text to be scraped. 63 | def test_no_text(self, mocker): 64 | # Mock the requests.get() method to return a response with no text 65 | mock_response = mocker.Mock() 66 | mock_response.status_code = 200 67 | mock_response.text = "" 68 | mocker.patch("requests.get", return_value=mock_response) 69 | 70 | # Call the function with a valid URL and assert that it returns an empty string 71 | url = "http://www.example.com" 72 | assert scrape_text(url) == "" 73 | 74 | # Tests that the function returns an error message when the response status code is an http error (>=400). 75 | def test_http_error(self, mocker): 76 | # Mock the requests.get() method to return a response with a 404 status code 77 | mocker.patch('requests.get', return_value=mocker.Mock(status_code=404)) 78 | 79 | # Call the function with a URL 80 | result = scrape_text("https://www.example.com") 81 | 82 | # Check that the function returns an error message 83 | assert result == "Error: HTTP 404 error" 84 | 85 | # Tests that scrape_text() properly handles HTML tags. 86 | def test_scrape_text_with_html_tags(self, mocker): 87 | # Create a mock response object with HTML containing tags 88 | html = "

This is bold text.

" 89 | mock_response = mocker.Mock() 90 | mock_response.status_code = 200 91 | mock_response.text = html 92 | mocker.patch("requests.get", return_value=mock_response) 93 | 94 | # Call the function with a URL 95 | result = scrape_text("https://www.example.com") 96 | 97 | # Check that the function properly handles HTML tags 98 | assert result == "This is bold text." 99 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/memory/redismem.py: -------------------------------------------------------------------------------- 1 | """Redis memory provider.""" 2 | from typing import Any, List, Optional 3 | import redis 4 | from redis.commands.search.field import VectorField, TextField 5 | from redis.commands.search.query import Query 6 | from redis.commands.search.indexDefinition import IndexDefinition, IndexType 7 | import numpy as np 8 | 9 | from memory.base import MemoryProviderSingleton, get_ada_embedding 10 | 11 | 12 | SCHEMA = [ 13 | TextField("data"), 14 | VectorField( 15 | "embedding", 16 | "HNSW", 17 | { 18 | "TYPE": "FLOAT32", 19 | "DIM": 1536, 20 | "DISTANCE_METRIC": "COSINE" 21 | } 22 | ), 23 | ] 24 | 25 | 26 | class RedisMemory(MemoryProviderSingleton): 27 | def __init__(self, cfg): 28 | """ 29 | Initializes the Redis memory provider. 30 | 31 | Args: 32 | cfg: The config object. 33 | 34 | Returns: None 35 | """ 36 | redis_host = cfg.redis_host 37 | redis_port = cfg.redis_port 38 | redis_password = cfg.redis_password 39 | self.dimension = 1536 40 | self.redis = redis.Redis( 41 | host=redis_host, 42 | port=redis_port, 43 | password=redis_password, 44 | db=0 # Cannot be changed 45 | ) 46 | self.cfg = cfg 47 | if cfg.wipe_redis_on_start: 48 | self.redis.flushall() 49 | try: 50 | self.redis.ft(f"{cfg.memory_index}").create_index( 51 | fields=SCHEMA, 52 | definition=IndexDefinition( 53 | prefix=[f"{cfg.memory_index}:"], 54 | index_type=IndexType.HASH 55 | ) 56 | ) 57 | except Exception as e: 58 | print("Error creating Redis search index: ", e) 59 | existing_vec_num = self.redis.get(f'{cfg.memory_index}-vec_num') 60 | self.vec_num = int(existing_vec_num.decode('utf-8')) if\ 61 | existing_vec_num else 0 62 | 63 | def add(self, data: str) -> str: 64 | """ 65 | Adds a data point to the memory. 66 | 67 | Args: 68 | data: The data to add. 69 | 70 | Returns: Message indicating that the data has been added. 71 | """ 72 | if 'Command Error:' in data: 73 | return "" 74 | vector = get_ada_embedding(data) 75 | vector = np.array(vector).astype(np.float32).tobytes() 76 | data_dict = { 77 | b"data": data, 78 | "embedding": vector 79 | } 80 | pipe = self.redis.pipeline() 81 | pipe.hset(f"{self.cfg.memory_index}:{self.vec_num}", mapping=data_dict) 82 | _text = f"Inserting data into memory at index: {self.vec_num}:\n"\ 83 | f"data: {data}" 84 | self.vec_num += 1 85 | pipe.set(f'{self.cfg.memory_index}-vec_num', self.vec_num) 86 | pipe.execute() 87 | return _text 88 | 89 | def get(self, data: str) -> Optional[List[Any]]: 90 | """ 91 | Gets the data from the memory that is most relevant to the given data. 92 | 93 | Args: 94 | data: The data to compare to. 95 | 96 | Returns: The most relevant data. 97 | """ 98 | return self.get_relevant(data, 1) 99 | 100 | def clear(self) -> str: 101 | """ 102 | Clears the redis server. 103 | 104 | Returns: A message indicating that the memory has been cleared. 105 | """ 106 | self.redis.flushall() 107 | return "Obliviated" 108 | 109 | def get_relevant( 110 | self, 111 | data: str, 112 | num_relevant: int = 5 113 | ) -> Optional[List[Any]]: 114 | """ 115 | Returns all the data in the memory that is relevant to the given data. 116 | Args: 117 | data: The data to compare to. 118 | num_relevant: The number of relevant data to return. 119 | 120 | Returns: A list of the most relevant data. 121 | """ 122 | query_embedding = get_ada_embedding(data) 123 | base_query = f"*=>[KNN {num_relevant} @embedding $vector AS vector_score]" 124 | query = Query(base_query).return_fields( 125 | "data", 126 | "vector_score" 127 | ).sort_by("vector_score").dialect(2) 128 | query_vector = np.array(query_embedding).astype(np.float32).tobytes() 129 | 130 | try: 131 | results = self.redis.ft(f"{self.cfg.memory_index}").search( 132 | query, query_params={"vector": query_vector} 133 | ) 134 | except Exception as e: 135 | print("Error calling Redis search: ", e) 136 | return None 137 | return [result.data for result in results.docs] 138 | 139 | def get_stats(self): 140 | """ 141 | Returns: The stats of the memory index. 142 | """ 143 | return self.redis.ft(f"{self.cfg.memory_index}").info() 144 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/outputs/post2_output.txt: -------------------------------------------------------------------------------- 1 | Title: Demystifying Short-Term Certificates of Deposit: A Beginner's Guide to Boosting Your Investment Portfolio 2 | 3 | Introduction 4 | 5 | If you're a beginner investor seeking a low-risk, relatively stable investment opportunity, look no further than short-term certificates of deposit (CDs). They offer a fixed interest rate over a specified period and are generally considered one of the safest options for new investors. In this blog post, we'll explore the ins and outs of short-term CDs, including their benefits, risks, and how they can fit into your investment portfolio. We'll also share tips for choosing the best short-term CDs and discuss current market trends to help you make informed decisions. 6 | 7 | What are Short-Term Certificates of Deposit? 8 | 9 | Certificates of deposit are time-bound savings accounts issued by banks and credit unions. When you invest in a CD, you're essentially loaning your money to the financial institution for a predetermined term, typically ranging from three months to five years. In return, the bank agrees to pay you interest at a fixed rate. 10 | 11 | Short-term CDs generally have terms between three months and one year. They're ideal for investors who want a relatively safe and conservative option for their money, without tying it up for an extended period. 12 | 13 | Benefits of Short-Term CDs 14 | 15 | Safety: Since CDs are insured by the Federal Deposit Insurance Corporation (FDIC) up to $250,000 per depositor, per insured bank, you can rest assured that your investment is secure. 16 | 17 | Predictable returns: Unlike stocks or other volatile investments, CDs provide a fixed interest rate over the agreed term, ensuring predictable returns. 18 | 19 | Flexibility: Short-term CDs enable you to access your funds sooner than long-term CDs, providing more flexibility in managing your investment portfolio. 20 | 21 | Low minimum investment: Many banks and credit unions offer CDs with a low minimum investment, making it easy for beginner investors to get started. 22 | 23 | Risks Associated with Short-Term CDs 24 | 25 | Limited returns: While short-term CDs are safe, their interest rates are typically lower than those of long-term CDs or other higher-risk investments. 26 | 27 | Inflation risk: In times of high inflation, the interest rate on a CD may not keep up with the rising cost of living, eroding the purchasing power of your investment. 28 | 29 | Early withdrawal penalties: Withdrawing your funds before the maturity date may result in penalties, reducing your overall return. 30 | 31 | Incorporating Short-Term CDs into Your Investment Portfolio 32 | 33 | Short-term CDs can be a valuable addition to your investment portfolio, particularly as a low-risk component. They're best suited for conservative investors or those looking to diversify their holdings. You can allocate a portion of your portfolio to short-term CDs, while investing the remainder in stocks, bonds, or other higher-yielding assets. This strategy can help you strike a balance between risk and return. 34 | 35 | Tips for Choosing the Best Short-Term CDs 36 | 37 | Compare interest rates: Shop around for the highest interest rates available from different banks and credit unions. Online comparison tools can help streamline this process. 38 | 39 | Review the term length: Choose a term that aligns with your financial goals and liquidity needs. If you think you might need access to your funds sooner, opt for shorter-term CDs. 40 | 41 | Look for promotional rates: Some institutions offer promotional rates on CDs for new customers or for a limited time. Take advantage of these promotions to boost your returns. 42 | 43 | Consider laddering: To maximize returns and maintain liquidity, create a CD ladder by investing in multiple CDs with staggered maturity dates. This strategy allows you to benefit from higher interest rates as your CDs mature and reinvest in new ones. 44 | 45 | Current Market Trends 46 | 47 | Interest rates have been relatively low in recent years, making it crucial to shop around for the best rates on short-term CDs. However, as the economy continues to recover, interest rates may start to rise, making short-term CDs more attractive to investors. Keep an eye on economic indicators and the Federal Reserve's actions, as they can influence CD rates in the short and long term. 48 | 49 | In addition, the rise of online banks and fintech companies has increased competition in the financial sector, which can lead to better CD rates and terms for consumers. Don't limit your search to traditional brick-and-mortar banks; consider exploring online banks and credit unions as well. 50 | 51 | Conclusion 52 | 53 | Short-term certificates of deposit can be a valuable addition to a beginner investor's portfolio, offering safety, predictability, and flexibility. By understanding the benefits and risks associated with short-term CDs and following our tips for choosing the best options, you can make informed decisions and bolster your investment strategy. Stay aware of current market trends and keep an eye on interest rates to ensure you're making the most of your short-term CD investments. 54 | 55 | 56 | 57 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/tests/unit/test_browse_scrape_links.py: -------------------------------------------------------------------------------- 1 | 2 | # Generated by CodiumAI 3 | 4 | # Dependencies: 5 | # pip install pytest-mock 6 | import pytest 7 | 8 | from scripts.browse import scrape_links 9 | 10 | """ 11 | Code Analysis 12 | 13 | Objective: 14 | The objective of the 'scrape_links' function is to scrape hyperlinks from a 15 | given URL and return them in a formatted way. 16 | 17 | Inputs: 18 | - url: a string representing the URL to be scraped. 19 | 20 | Flow: 21 | 1. Send a GET request to the given URL using the requests library and the user agent header from the config file. 22 | 2. Check if the response contains an HTTP error. If it does, return "error". 23 | 3. Parse the HTML content of the response using the BeautifulSoup library. 24 | 4. Remove any script and style tags from the parsed HTML. 25 | 5. Extract all hyperlinks from the parsed HTML using the 'extract_hyperlinks' function. 26 | 6. Format the extracted hyperlinks using the 'format_hyperlinks' function. 27 | 7. Return the formatted hyperlinks. 28 | 29 | Outputs: 30 | - A list of formatted hyperlinks. 31 | 32 | Additional aspects: 33 | - The function uses the 'requests' and 'BeautifulSoup' libraries to send HTTP 34 | requests and parse HTML content, respectively. 35 | - The 'extract_hyperlinks' function is called to extract hyperlinks from the parsed HTML. 36 | - The 'format_hyperlinks' function is called to format the extracted hyperlinks. 37 | - The function checks for HTTP errors and returns "error" if any are found. 38 | """ 39 | 40 | 41 | class TestScrapeLinks: 42 | 43 | # Tests that the function returns a list of formatted hyperlinks when 44 | # provided with a valid url that returns a webpage with hyperlinks. 45 | def test_valid_url_with_hyperlinks(self): 46 | url = "https://www.google.com" 47 | result = scrape_links(url) 48 | assert len(result) > 0 49 | assert isinstance(result, list) 50 | assert isinstance(result[0], str) 51 | 52 | # Tests that the function returns correctly formatted hyperlinks when given a valid url. 53 | def test_valid_url(self, mocker): 54 | # Mock the requests.get() function to return a response with sample HTML containing hyperlinks 55 | mock_response = mocker.Mock() 56 | mock_response.status_code = 200 57 | mock_response.text = "Google" 58 | mocker.patch('requests.get', return_value=mock_response) 59 | 60 | # Call the function with a valid URL 61 | result = scrape_links("https://www.example.com") 62 | 63 | # Assert that the function returns correctly formatted hyperlinks 64 | assert result == ["Google (https://www.google.com)"] 65 | 66 | # Tests that the function returns "error" when given an invalid url. 67 | def test_invalid_url(self, mocker): 68 | # Mock the requests.get() function to return an HTTP error response 69 | mock_response = mocker.Mock() 70 | mock_response.status_code = 404 71 | mocker.patch('requests.get', return_value=mock_response) 72 | 73 | # Call the function with an invalid URL 74 | result = scrape_links("https://www.invalidurl.com") 75 | 76 | # Assert that the function returns "error" 77 | assert "Error:" in result 78 | 79 | # Tests that the function returns an empty list when the html contains no hyperlinks. 80 | def test_no_hyperlinks(self, mocker): 81 | # Mock the requests.get() function to return a response with sample HTML containing no hyperlinks 82 | mock_response = mocker.Mock() 83 | mock_response.status_code = 200 84 | mock_response.text = "

No hyperlinks here

" 85 | mocker.patch('requests.get', return_value=mock_response) 86 | 87 | # Call the function with a URL containing no hyperlinks 88 | result = scrape_links("https://www.example.com") 89 | 90 | # Assert that the function returns an empty list 91 | assert result == [] 92 | 93 | # Tests that scrape_links() correctly extracts and formats hyperlinks from 94 | # a sample HTML containing a few hyperlinks. 95 | def test_scrape_links_with_few_hyperlinks(self, mocker): 96 | # Mock the requests.get() function to return a response with a sample HTML containing hyperlinks 97 | mock_response = mocker.Mock() 98 | mock_response.status_code = 200 99 | mock_response.text = """ 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | """ 108 | mocker.patch('requests.get', return_value=mock_response) 109 | 110 | # Call the function being tested 111 | result = scrape_links("https://www.example.com") 112 | 113 | # Assert that the function returns a list of formatted hyperlinks 114 | assert isinstance(result, list) 115 | assert len(result) == 3 116 | assert result[0] == "Google (https://www.google.com)" 117 | assert result[1] == "GitHub (https://github.com)" 118 | assert result[2] == "CodiumAI (https://www.codium.ai)" 119 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/browse.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from bs4 import BeautifulSoup 3 | from config import Config 4 | from llm_utils import create_chat_completion 5 | from urllib.parse import urlparse, urljoin 6 | 7 | cfg = Config() 8 | 9 | 10 | # Function to check if the URL is valid 11 | def is_valid_url(url): 12 | try: 13 | result = urlparse(url) 14 | return all([result.scheme, result.netloc]) 15 | except ValueError: 16 | return False 17 | 18 | 19 | # Function to sanitize the URL 20 | def sanitize_url(url): 21 | return urljoin(url, urlparse(url).path) 22 | 23 | 24 | # Define and check for local file address prefixes 25 | def check_local_file_access(url): 26 | local_prefixes = ['file:///', 'file://localhost', 'http://localhost', 'https://localhost'] 27 | return any(url.startswith(prefix) for prefix in local_prefixes) 28 | 29 | 30 | def get_response(url, headers=cfg.user_agent_header, timeout=10): 31 | try: 32 | # Restrict access to local files 33 | if check_local_file_access(url): 34 | raise ValueError('Access to local files is restricted') 35 | 36 | # Most basic check if the URL is valid: 37 | if not url.startswith('http://') and not url.startswith('https://'): 38 | raise ValueError('Invalid URL format') 39 | 40 | sanitized_url = sanitize_url(url) 41 | 42 | response = requests.get(sanitized_url, headers=headers, timeout=timeout) 43 | 44 | # Check if the response contains an HTTP error 45 | if response.status_code >= 400: 46 | return None, "Error: HTTP " + str(response.status_code) + " error" 47 | 48 | return response, None 49 | except ValueError as ve: 50 | # Handle invalid URL format 51 | return None, "Error: " + str(ve) 52 | 53 | except requests.exceptions.RequestException as re: 54 | # Handle exceptions related to the HTTP request (e.g., connection errors, timeouts, etc.) 55 | return None, "Error: " + str(re) 56 | 57 | 58 | def scrape_text(url): 59 | """Scrape text from a webpage""" 60 | response, error_message = get_response(url) 61 | if error_message: 62 | return error_message 63 | 64 | soup = BeautifulSoup(response.text, "html.parser") 65 | 66 | for script in soup(["script", "style"]): 67 | script.extract() 68 | 69 | text = soup.get_text() 70 | lines = (line.strip() for line in text.splitlines()) 71 | chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) 72 | text = '\n'.join(chunk for chunk in chunks if chunk) 73 | 74 | return text 75 | 76 | 77 | def extract_hyperlinks(soup): 78 | """Extract hyperlinks from a BeautifulSoup object""" 79 | hyperlinks = [] 80 | for link in soup.find_all('a', href=True): 81 | hyperlinks.append((link.text, link['href'])) 82 | return hyperlinks 83 | 84 | 85 | def format_hyperlinks(hyperlinks): 86 | """Format hyperlinks into a list of strings""" 87 | formatted_links = [] 88 | for link_text, link_url in hyperlinks: 89 | formatted_links.append(f"{link_text} ({link_url})") 90 | return formatted_links 91 | 92 | 93 | def scrape_links(url): 94 | """Scrape links from a webpage""" 95 | response, error_message = get_response(url) 96 | if error_message: 97 | return error_message 98 | 99 | soup = BeautifulSoup(response.text, "html.parser") 100 | 101 | for script in soup(["script", "style"]): 102 | script.extract() 103 | 104 | hyperlinks = extract_hyperlinks(soup) 105 | 106 | return format_hyperlinks(hyperlinks) 107 | 108 | 109 | def split_text(text, max_length=8192): 110 | """Split text into chunks of a maximum length""" 111 | paragraphs = text.split("\n") 112 | current_length = 0 113 | current_chunk = [] 114 | 115 | for paragraph in paragraphs: 116 | if current_length + len(paragraph) + 1 <= max_length: 117 | current_chunk.append(paragraph) 118 | current_length += len(paragraph) + 1 119 | else: 120 | yield "\n".join(current_chunk) 121 | current_chunk = [paragraph] 122 | current_length = len(paragraph) + 1 123 | 124 | if current_chunk: 125 | yield "\n".join(current_chunk) 126 | 127 | 128 | def create_message(chunk, question): 129 | """Create a message for the user to summarize a chunk of text""" 130 | return { 131 | "role": "user", 132 | "content": f"\"\"\"{chunk}\"\"\" Using the above text, please answer the following question: \"{question}\" -- if the question cannot be answered using the text, please summarize the text." 133 | } 134 | 135 | 136 | def summarize_text(text, question): 137 | """Summarize text using the LLM model""" 138 | if not text: 139 | return "Error: No text to summarize" 140 | 141 | text_length = len(text) 142 | print(f"Text length: {text_length} characters") 143 | 144 | summaries = [] 145 | chunks = list(split_text(text)) 146 | 147 | for i, chunk in enumerate(chunks): 148 | print(f"Summarizing chunk {i + 1} / {len(chunks)}") 149 | messages = [create_message(chunk, question)] 150 | 151 | summary = create_chat_completion( 152 | model=cfg.fast_llm_model, 153 | messages=messages, 154 | max_tokens=300, 155 | ) 156 | summaries.append(summary) 157 | 158 | print(f"Summarized {len(chunks)} chunks.") 159 | 160 | combined_summary = "\n".join(summaries) 161 | messages = [create_message(combined_summary, question)] 162 | 163 | final_summary = create_chat_completion( 164 | model=cfg.fast_llm_model, 165 | messages=messages, 166 | max_tokens=300, 167 | ) 168 | 169 | return final_summary 170 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/tests/test_json_parser.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import tests.context 3 | 4 | from scripts.json_parser import fix_and_parse_json 5 | 6 | class TestParseJson(unittest.TestCase): 7 | def test_valid_json(self): 8 | # Test that a valid JSON string is parsed correctly 9 | json_str = '{"name": "John", "age": 30, "city": "New York"}' 10 | obj = fix_and_parse_json(json_str) 11 | self.assertEqual(obj, {"name": "John", "age": 30, "city": "New York"}) 12 | 13 | def test_invalid_json_minor(self): 14 | # Test that an invalid JSON string can be fixed with gpt 15 | json_str = '{"name": "John", "age": 30, "city": "New York",}' 16 | self.assertRaises(Exception, fix_and_parse_json, json_str, try_to_fix_with_gpt=False) 17 | 18 | def test_invalid_json_major_with_gpt(self): 19 | # Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False 20 | json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END' 21 | self.assertRaises(Exception, fix_and_parse_json, json_str, try_to_fix_with_gpt=False) 22 | 23 | def test_invalid_json_major_without_gpt(self): 24 | # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False 25 | json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END' 26 | # Assert that this raises an exception: 27 | with self.assertRaises(Exception): 28 | fix_and_parse_json(json_str, try_to_fix_with_gpt=False) 29 | 30 | def test_invalid_json_leading_sentence_with_gpt(self): 31 | # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False 32 | json_str = """I suggest we start by browsing the repository to find any issues that we can fix. 33 | 34 | { 35 | "command": { 36 | "name": "browse_website", 37 | "args":{ 38 | "url": "https://github.com/Torantulino/Auto-GPT" 39 | } 40 | }, 41 | "thoughts": 42 | { 43 | "text": "I suggest we start browsing the repository to find any issues that we can fix.", 44 | "reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.", 45 | "plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes", 46 | "criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.", 47 | "speak": "I will start browsing the repository to find any issues we can fix." 48 | } 49 | }""" 50 | good_obj = { 51 | "command": { 52 | "name": "browse_website", 53 | "args":{ 54 | "url": "https://github.com/Torantulino/Auto-GPT" 55 | } 56 | }, 57 | "thoughts": 58 | { 59 | "text": "I suggest we start browsing the repository to find any issues that we can fix.", 60 | "reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.", 61 | "plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes", 62 | "criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.", 63 | "speak": "I will start browsing the repository to find any issues we can fix." 64 | } 65 | } 66 | # Assert that this raises an exception: 67 | self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj) 68 | 69 | def test_invalid_json_leading_sentence_with_gpt(self): 70 | # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False 71 | json_str = """I will first need to browse the repository (https://github.com/Torantulino/Auto-GPT) and identify any potential bugs that need fixing. I will use the "browse_website" command for this. 72 | 73 | { 74 | "command": { 75 | "name": "browse_website", 76 | "args":{ 77 | "url": "https://github.com/Torantulino/Auto-GPT" 78 | } 79 | }, 80 | "thoughts": 81 | { 82 | "text": "Browsing the repository to identify potential bugs", 83 | "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.", 84 | "plan": "- Analyze the repository for potential bugs and areas of improvement", 85 | "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.", 86 | "speak": "I am browsing the repository to identify potential bugs." 87 | } 88 | }""" 89 | good_obj = { 90 | "command": { 91 | "name": "browse_website", 92 | "args":{ 93 | "url": "https://github.com/Torantulino/Auto-GPT" 94 | } 95 | }, 96 | "thoughts": 97 | { 98 | "text": "Browsing the repository to identify potential bugs", 99 | "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.", 100 | "plan": "- Analyze the repository for potential bugs and areas of improvement", 101 | "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.", 102 | "speak": "I am browsing the repository to identify potential bugs." 103 | } 104 | } 105 | # Assert that this raises an exception: 106 | self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj) 107 | 108 | 109 | if __name__ == '__main__': 110 | unittest.main() 111 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/tests/unit/json_tests.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | import sys 4 | # Probably a better way: 5 | sys.path.append(os.path.abspath('../scripts')) 6 | from json_parser import fix_and_parse_json 7 | 8 | class TestParseJson(unittest.TestCase): 9 | def test_valid_json(self): 10 | # Test that a valid JSON string is parsed correctly 11 | json_str = '{"name": "John", "age": 30, "city": "New York"}' 12 | obj = fix_and_parse_json(json_str) 13 | self.assertEqual(obj, {"name": "John", "age": 30, "city": "New York"}) 14 | 15 | def test_invalid_json_minor(self): 16 | # Test that an invalid JSON string can be fixed with gpt 17 | json_str = '{"name": "John", "age": 30, "city": "New York",}' 18 | self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), {"name": "John", "age": 30, "city": "New York"}) 19 | 20 | def test_invalid_json_major_with_gpt(self): 21 | # Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False 22 | json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END' 23 | self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=True), {"name": "John", "age": 30, "city": "New York"}) 24 | 25 | def test_invalid_json_major_without_gpt(self): 26 | # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False 27 | json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END' 28 | # Assert that this raises an exception: 29 | with self.assertRaises(Exception): 30 | fix_and_parse_json(json_str, try_to_fix_with_gpt=False) 31 | 32 | def test_invalid_json_leading_sentence_with_gpt(self): 33 | # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False 34 | json_str = """I suggest we start by browsing the repository to find any issues that we can fix. 35 | 36 | { 37 | "command": { 38 | "name": "browse_website", 39 | "args":{ 40 | "url": "https://github.com/Torantulino/Auto-GPT" 41 | } 42 | }, 43 | "thoughts": 44 | { 45 | "text": "I suggest we start browsing the repository to find any issues that we can fix.", 46 | "reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.", 47 | "plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes", 48 | "criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.", 49 | "speak": "I will start browsing the repository to find any issues we can fix." 50 | } 51 | }""" 52 | good_obj = { 53 | "command": { 54 | "name": "browse_website", 55 | "args":{ 56 | "url": "https://github.com/Torantulino/Auto-GPT" 57 | } 58 | }, 59 | "thoughts": 60 | { 61 | "text": "I suggest we start browsing the repository to find any issues that we can fix.", 62 | "reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.", 63 | "plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes", 64 | "criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.", 65 | "speak": "I will start browsing the repository to find any issues we can fix." 66 | } 67 | } 68 | # Assert that this raises an exception: 69 | self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj) 70 | 71 | def test_invalid_json_leading_sentence_with_gpt(self): 72 | # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False 73 | json_str = """I will first need to browse the repository (https://github.com/Torantulino/Auto-GPT) and identify any potential bugs that need fixing. I will use the "browse_website" command for this. 74 | 75 | { 76 | "command": { 77 | "name": "browse_website", 78 | "args":{ 79 | "url": "https://github.com/Torantulino/Auto-GPT" 80 | } 81 | }, 82 | "thoughts": 83 | { 84 | "text": "Browsing the repository to identify potential bugs", 85 | "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.", 86 | "plan": "- Analyze the repository for potential bugs and areas of improvement", 87 | "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.", 88 | "speak": "I am browsing the repository to identify potential bugs." 89 | } 90 | }""" 91 | good_obj = { 92 | "command": { 93 | "name": "browse_website", 94 | "args":{ 95 | "url": "https://github.com/Torantulino/Auto-GPT" 96 | } 97 | }, 98 | "thoughts": 99 | { 100 | "text": "Browsing the repository to identify potential bugs", 101 | "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.", 102 | "plan": "- Analyze the repository for potential bugs and areas of improvement", 103 | "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.", 104 | "speak": "I am browsing the repository to identify potential bugs." 105 | } 106 | } 107 | # Assert that this raises an exception: 108 | self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj) 109 | 110 | 111 | if __name__ == '__main__': 112 | unittest.main() 113 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import random 4 | import re 5 | import time 6 | from logging import LogRecord 7 | from colorama import Fore 8 | 9 | from colorama import Style 10 | 11 | import speak 12 | from config import Config 13 | from config import Singleton 14 | 15 | cfg = Config() 16 | 17 | ''' 18 | Logger that handle titles in different colors. 19 | Outputs logs in console, activity.log, and errors.log 20 | For console handler: simulates typing 21 | ''' 22 | 23 | 24 | class Logger(metaclass=Singleton): 25 | def __init__(self): 26 | # create log directory if it doesn't exist 27 | log_dir = os.path.join('..', 'logs') 28 | if not os.path.exists(log_dir): 29 | os.makedirs(log_dir) 30 | 31 | log_file = "activity.log" 32 | error_file = "error.log" 33 | 34 | console_formatter = AutoGptFormatter('%(title_color)s %(message)s') 35 | 36 | # Create a handler for console which simulate typing 37 | self.typing_console_handler = TypingConsoleHandler() 38 | self.typing_console_handler.setLevel(logging.INFO) 39 | self.typing_console_handler.setFormatter(console_formatter) 40 | 41 | # Create a handler for console without typing simulation 42 | self.console_handler = ConsoleHandler() 43 | self.console_handler.setLevel(logging.DEBUG) 44 | self.console_handler.setFormatter(console_formatter) 45 | 46 | # Info handler in activity.log 47 | self.file_handler = logging.FileHandler(os.path.join(log_dir, log_file)) 48 | self.file_handler.setLevel(logging.DEBUG) 49 | info_formatter = AutoGptFormatter('%(asctime)s %(levelname)s %(title)s %(message_no_color)s') 50 | self.file_handler.setFormatter(info_formatter) 51 | 52 | # Error handler error.log 53 | error_handler = logging.FileHandler(os.path.join(log_dir, error_file)) 54 | error_handler.setLevel(logging.ERROR) 55 | error_formatter = AutoGptFormatter( 56 | '%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s %(message_no_color)s') 57 | error_handler.setFormatter(error_formatter) 58 | 59 | self.typing_logger = logging.getLogger('TYPER') 60 | self.typing_logger.addHandler(self.typing_console_handler) 61 | self.typing_logger.addHandler(self.file_handler) 62 | self.typing_logger.addHandler(error_handler) 63 | self.typing_logger.setLevel(logging.DEBUG) 64 | 65 | self.logger = logging.getLogger('LOGGER') 66 | self.logger.addHandler(self.console_handler) 67 | self.logger.addHandler(self.file_handler) 68 | self.logger.addHandler(error_handler) 69 | self.logger.setLevel(logging.DEBUG) 70 | 71 | def typewriter_log( 72 | self, 73 | title='', 74 | title_color='', 75 | content='', 76 | speak_text=False, 77 | level=logging.INFO): 78 | if speak_text and cfg.speak_mode: 79 | speak.say_text(f"{title}. {content}") 80 | 81 | if content: 82 | if isinstance(content, list): 83 | content = " ".join(content) 84 | else: 85 | content = "" 86 | 87 | self.typing_logger.log(level, content, extra={'title': title, 'color': title_color}) 88 | 89 | def debug( 90 | self, 91 | message, 92 | title='', 93 | title_color='', 94 | ): 95 | self._log(title, title_color, message, logging.DEBUG) 96 | 97 | def warn( 98 | self, 99 | message, 100 | title='', 101 | title_color='', 102 | ): 103 | self._log(title, title_color, message, logging.WARN) 104 | 105 | def error( 106 | self, 107 | title, 108 | message='' 109 | ): 110 | self._log(title, Fore.RED, message, logging.ERROR) 111 | 112 | def _log( 113 | self, 114 | title='', 115 | title_color='', 116 | message='', 117 | level=logging.INFO): 118 | if message: 119 | if isinstance(message, list): 120 | message = " ".join(message) 121 | self.logger.log(level, message, extra={'title': title, 'color': title_color}) 122 | 123 | def set_level(self, level): 124 | self.logger.setLevel(level) 125 | self.typing_logger.setLevel(level) 126 | 127 | 128 | ''' 129 | Output stream to console using simulated typing 130 | ''' 131 | 132 | 133 | class TypingConsoleHandler(logging.StreamHandler): 134 | def emit(self, record): 135 | min_typing_speed = 0.05 136 | max_typing_speed = 0.01 137 | 138 | msg = self.format(record) 139 | try: 140 | words = msg.split() 141 | for i, word in enumerate(words): 142 | print(word, end="", flush=True) 143 | if i < len(words) - 1: 144 | print(" ", end="", flush=True) 145 | typing_speed = random.uniform(min_typing_speed, max_typing_speed) 146 | time.sleep(typing_speed) 147 | # type faster after each word 148 | min_typing_speed = min_typing_speed * 0.95 149 | max_typing_speed = max_typing_speed * 0.95 150 | print() 151 | except Exception: 152 | self.handleError(record) 153 | 154 | class ConsoleHandler(logging.StreamHandler): 155 | def emit(self, record): 156 | msg = self.format(record) 157 | try: 158 | print(msg) 159 | except Exception: 160 | self.handleError(record) 161 | 162 | 163 | ''' 164 | Allows to handle custom placeholders 'title_color' and 'message_no_color'. 165 | To use this formatter, make sure to pass 'color', 'title' as log extras. 166 | ''' 167 | class AutoGptFormatter(logging.Formatter): 168 | def format(self, record: LogRecord) -> str: 169 | if (hasattr(record, 'color')): 170 | record.title_color = getattr(record, 'color') + getattr(record, 'title') + " " + Style.RESET_ALL 171 | else: 172 | record.title_color = getattr(record, 'title') 173 | if hasattr(record, 'msg'): 174 | record.message_no_color = remove_color_codes(getattr(record, 'msg')) 175 | else: 176 | record.message_no_color = '' 177 | return super().format(record) 178 | 179 | 180 | def remove_color_codes(s: str) -> str: 181 | ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])') 182 | return ansi_escape.sub('', s) 183 | 184 | 185 | logger = Logger() 186 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/chat.py: -------------------------------------------------------------------------------- 1 | import time 2 | import openai 3 | from dotenv import load_dotenv 4 | from config import Config 5 | import token_counter 6 | from llm_utils import create_chat_completion 7 | from logger import logger 8 | import logging 9 | 10 | cfg = Config() 11 | 12 | def create_chat_message(role, content): 13 | """ 14 | Create a chat message with the given role and content. 15 | 16 | Args: 17 | role (str): The role of the message sender, e.g., "system", "user", or "assistant". 18 | content (str): The content of the message. 19 | 20 | Returns: 21 | dict: A dictionary containing the role and content of the message. 22 | """ 23 | return {"role": role, "content": content} 24 | 25 | 26 | def generate_context(prompt, relevant_memory, full_message_history, model): 27 | current_context = [ 28 | create_chat_message( 29 | "system", prompt), 30 | create_chat_message( 31 | "system", f"The current time and date is {time.strftime('%c')}"), 32 | create_chat_message( 33 | "system", f"This reminds you of these events from your past:\n{relevant_memory}\n\n")] 34 | 35 | # Add messages from the full message history until we reach the token limit 36 | next_message_to_add_index = len(full_message_history) - 1 37 | insertion_index = len(current_context) 38 | # Count the currently used tokens 39 | current_tokens_used = token_counter.count_message_tokens(current_context, model) 40 | return next_message_to_add_index, current_tokens_used, insertion_index, current_context 41 | 42 | 43 | # TODO: Change debug from hardcode to argument 44 | def chat_with_ai( 45 | prompt, 46 | user_input, 47 | full_message_history, 48 | permanent_memory, 49 | token_limit): 50 | """Interact with the OpenAI API, sending the prompt, user input, message history, and permanent memory.""" 51 | while True: 52 | try: 53 | """ 54 | Interact with the OpenAI API, sending the prompt, user input, message history, and permanent memory. 55 | 56 | Args: 57 | prompt (str): The prompt explaining the rules to the AI. 58 | user_input (str): The input from the user. 59 | full_message_history (list): The list of all messages sent between the user and the AI. 60 | permanent_memory (Obj): The memory object containing the permanent memory. 61 | token_limit (int): The maximum number of tokens allowed in the API call. 62 | 63 | Returns: 64 | str: The AI's response. 65 | """ 66 | model = cfg.fast_llm_model # TODO: Change model from hardcode to argument 67 | # Reserve 1000 tokens for the response 68 | 69 | logger.debug(f"Token limit: {token_limit}") 70 | send_token_limit = token_limit - 1000 71 | 72 | relevant_memory = permanent_memory.get_relevant(str(full_message_history[-9:]), 10) 73 | 74 | logger.debug(f'Memory Stats: {permanent_memory.get_stats()}') 75 | 76 | next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context( 77 | prompt, relevant_memory, full_message_history, model) 78 | 79 | while current_tokens_used > 2500: 80 | # remove memories until we are under 2500 tokens 81 | relevant_memory = relevant_memory[1:] 82 | next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context( 83 | prompt, relevant_memory, full_message_history, model) 84 | 85 | current_tokens_used += token_counter.count_message_tokens([create_chat_message("user", user_input)], model) # Account for user input (appended later) 86 | 87 | while next_message_to_add_index >= 0: 88 | # print (f"CURRENT TOKENS USED: {current_tokens_used}") 89 | message_to_add = full_message_history[next_message_to_add_index] 90 | 91 | tokens_to_add = token_counter.count_message_tokens([message_to_add], model) 92 | if current_tokens_used + tokens_to_add > send_token_limit: 93 | break 94 | 95 | # Add the most recent message to the start of the current context, after the two system prompts. 96 | current_context.insert(insertion_index, full_message_history[next_message_to_add_index]) 97 | 98 | # Count the currently used tokens 99 | current_tokens_used += tokens_to_add 100 | 101 | # Move to the next most recent message in the full message history 102 | next_message_to_add_index -= 1 103 | 104 | # Append user input, the length of this is accounted for above 105 | current_context.extend([create_chat_message("user", user_input)]) 106 | 107 | # Calculate remaining tokens 108 | tokens_remaining = token_limit - current_tokens_used 109 | # assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT" 110 | 111 | # Debug print the current context 112 | logger.debug(f"Token limit: {token_limit}") 113 | logger.debug(f"Send Token Count: {current_tokens_used}") 114 | logger.debug(f"Tokens remaining for response: {tokens_remaining}") 115 | logger.debug("------------ CONTEXT SENT TO AI ---------------") 116 | for message in current_context: 117 | # Skip printing the prompt 118 | if message["role"] == "system" and message["content"] == prompt: 119 | continue 120 | logger.debug(f"{message['role'].capitalize()}: {message['content']}") 121 | logger.debug("") 122 | logger.debug("----------- END OF CONTEXT ----------------") 123 | 124 | # TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about 125 | assistant_reply = create_chat_completion( 126 | model=model, 127 | messages=current_context, 128 | max_tokens=tokens_remaining, 129 | ) 130 | 131 | # Update full message history 132 | full_message_history.append( 133 | create_chat_message( 134 | "user", user_input)) 135 | full_message_history.append( 136 | create_chat_message( 137 | "assistant", assistant_reply)) 138 | 139 | return assistant_reply 140 | except openai.error.RateLimitError: 141 | # TODO: When we switch to langchain, this is built in 142 | print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...") 143 | time.sleep(10) 144 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/config.py: -------------------------------------------------------------------------------- 1 | import abc 2 | import os 3 | import openai 4 | import yaml 5 | from dotenv import load_dotenv 6 | # Load environment variables from .env file 7 | load_dotenv() 8 | 9 | 10 | class Singleton(abc.ABCMeta, type): 11 | """ 12 | Singleton metaclass for ensuring only one instance of a class. 13 | """ 14 | 15 | _instances = {} 16 | 17 | def __call__(cls, *args, **kwargs): 18 | """Call method for the singleton metaclass.""" 19 | if cls not in cls._instances: 20 | cls._instances[cls] = super( 21 | Singleton, cls).__call__( 22 | *args, **kwargs) 23 | return cls._instances[cls] 24 | 25 | 26 | class AbstractSingleton(abc.ABC, metaclass=Singleton): 27 | pass 28 | 29 | 30 | class Config(metaclass=Singleton): 31 | """ 32 | Configuration class to store the state of bools for different scripts access. 33 | """ 34 | 35 | def __init__(self): 36 | """Initialize the Config class""" 37 | self.debug_mode = False 38 | self.continuous_mode = False 39 | self.speak_mode = False 40 | 41 | self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo") 42 | self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4") 43 | self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000)) 44 | self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000)) 45 | 46 | self.openai_api_key = os.getenv("OPENAI_API_KEY") 47 | self.temperature = int(os.getenv("TEMPERATURE", "1")) 48 | self.use_azure = False 49 | self.use_azure = os.getenv("USE_AZURE") == 'True' 50 | self.execute_local_commands = os.getenv('EXECUTE_LOCAL_COMMANDS', 'False') == 'True' 51 | 52 | if self.use_azure: 53 | self.load_azure_config() 54 | openai.api_type = "azure" 55 | openai.api_base = self.openai_api_base 56 | openai.api_version = self.openai_api_version 57 | 58 | self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY") 59 | self.elevenlabs_voice_1_id = os.getenv("ELEVENLABS_VOICE_1_ID") 60 | self.elevenlabs_voice_2_id = os.getenv("ELEVENLABS_VOICE_2_ID") 61 | 62 | self.use_mac_os_tts = False 63 | self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS") 64 | 65 | self.google_api_key = os.getenv("GOOGLE_API_KEY") 66 | self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID") 67 | 68 | self.pinecone_api_key = os.getenv("PINECONE_API_KEY") 69 | self.pinecone_region = os.getenv("PINECONE_ENV") 70 | 71 | self.image_provider = os.getenv("IMAGE_PROVIDER") 72 | self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN") 73 | 74 | # User agent headers to use when browsing web 75 | # Some websites might just completely deny request with an error code if no user agent was found. 76 | self.user_agent_header = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"} 77 | self.redis_host = os.getenv("REDIS_HOST", "localhost") 78 | self.redis_port = os.getenv("REDIS_PORT", "6379") 79 | self.redis_password = os.getenv("REDIS_PASSWORD", "") 80 | self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == 'True' 81 | self.memory_index = os.getenv("MEMORY_INDEX", 'auto-gpt') 82 | # Note that indexes must be created on db 0 in redis, this is not configurable. 83 | 84 | self.memory_backend = os.getenv("MEMORY_BACKEND", 'local') 85 | # Initialize the OpenAI API client 86 | openai.api_key = self.openai_api_key 87 | 88 | def get_azure_deployment_id_for_model(self, model: str) -> str: 89 | """ 90 | Returns the relevant deployment id for the model specified. 91 | 92 | Parameters: 93 | model(str): The model to map to the deployment id. 94 | 95 | Returns: 96 | The matching deployment id if found, otherwise an empty string. 97 | """ 98 | if model == self.fast_llm_model: 99 | return self.azure_model_to_deployment_id_map["fast_llm_model_deployment_id"] 100 | elif model == self.smart_llm_model: 101 | return self.azure_model_to_deployment_id_map["smart_llm_model_deployment_id"] 102 | elif model == "text-embedding-ada-002": 103 | return self.azure_model_to_deployment_id_map["embedding_model_deployment_id"] 104 | else: 105 | return "" 106 | 107 | AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), '..', 'azure.yaml') 108 | 109 | def load_azure_config(self, config_file: str=AZURE_CONFIG_FILE) -> None: 110 | """ 111 | Loads the configuration parameters for Azure hosting from the specified file path as a yaml file. 112 | 113 | Parameters: 114 | config_file(str): The path to the config yaml file. DEFAULT: "../azure.yaml" 115 | 116 | Returns: 117 | None 118 | """ 119 | try: 120 | with open(config_file) as file: 121 | config_params = yaml.load(file, Loader=yaml.FullLoader) 122 | except FileNotFoundError: 123 | config_params = {} 124 | self.openai_api_base = config_params.get("azure_api_base", "") 125 | self.openai_api_version = config_params.get("azure_api_version", "") 126 | self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", []) 127 | 128 | def set_continuous_mode(self, value: bool): 129 | """Set the continuous mode value.""" 130 | self.continuous_mode = value 131 | 132 | def set_speak_mode(self, value: bool): 133 | """Set the speak mode value.""" 134 | self.speak_mode = value 135 | 136 | def set_fast_llm_model(self, value: str): 137 | """Set the fast LLM model value.""" 138 | self.fast_llm_model = value 139 | 140 | def set_smart_llm_model(self, value: str): 141 | """Set the smart LLM model value.""" 142 | self.smart_llm_model = value 143 | 144 | def set_fast_token_limit(self, value: int): 145 | """Set the fast token limit value.""" 146 | self.fast_token_limit = value 147 | 148 | def set_smart_token_limit(self, value: int): 149 | """Set the smart token limit value.""" 150 | self.smart_token_limit = value 151 | 152 | def set_openai_api_key(self, value: str): 153 | """Set the OpenAI API key value.""" 154 | self.openai_api_key = value 155 | 156 | def set_elevenlabs_api_key(self, value: str): 157 | """Set the ElevenLabs API key value.""" 158 | self.elevenlabs_api_key = value 159 | 160 | def set_elevenlabs_voice_1_id(self, value: str): 161 | """Set the ElevenLabs Voice 1 ID value.""" 162 | self.elevenlabs_voice_1_id = value 163 | 164 | def set_elevenlabs_voice_2_id(self, value: str): 165 | """Set the ElevenLabs Voice 2 ID value.""" 166 | self.elevenlabs_voice_2_id = value 167 | 168 | def set_google_api_key(self, value: str): 169 | """Set the Google API key value.""" 170 | self.google_api_key = value 171 | 172 | def set_custom_search_engine_id(self, value: str): 173 | """Set the custom search engine id value.""" 174 | self.custom_search_engine_id = value 175 | 176 | def set_pinecone_api_key(self, value: str): 177 | """Set the Pinecone API key value.""" 178 | self.pinecone_api_key = value 179 | 180 | def set_pinecone_region(self, value: str): 181 | """Set the Pinecone region value.""" 182 | self.pinecone_region = value 183 | 184 | def set_debug_mode(self, value: bool): 185 | """Set the debug mode value.""" 186 | self.debug_mode = value 187 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/outputs/logs/message-log-4.txt: -------------------------------------------------------------------------------- 1 | System: Permanent memory: ['Online business ideas: online t-shirt shop, dropshipping, SEO expert, selling online courses, bug testing, video producer, graphic designer, website developer, app development, theme designer, translator, social media manager, remote customer service agent, virtual assistant, remote sales team, influencer, handmade goods shop, ethical fashion brand, self-publishing, speechwriting, private chef, selling natural and organic beauty products.', '2021 digital marketing trends: in-email purchases, sophisticated targeting, virtual reality, drone software, AI marketing technology, voice search, chatbots, personalization, influencer marketing, online and mobile commerce.'] 2 | User: NEXT COMMAND 3 | Assistant: { 4 | "command": 5 | { 6 | "name": "google", 7 | "args": 8 | { 9 | "input": "trending online business ideas" 10 | } 11 | }, 12 | "thoughts": 13 | { 14 | "text": "I need to find some trending online business ideas to develop and manage autonomously.", 15 | "reasoning": "By finding trending ideas, I can create successful businesses with high potential for growth.", 16 | "plan": "- Search for trending online business ideas\n- Analyze the results\n- Choose a suitable idea\n- Develop and manage the business autonomously", 17 | "criticism": "I need to make sure to choose a simple idea with low legal complications to avoid overcomplicating the entire process." 18 | } 19 | } 20 | System: [ 21 | "https://www.shopify.com/nz/blog/online-business-ideas", 22 | "https://www.businessnewsdaily.com/4572-online-business-ideas.html", 23 | "https://www.oberlo.com/blog/online-business-ideas", 24 | "https://99designs.com/blog/business/online-business-ideas/", 25 | "https://www.nerdwallet.com/article/small-business/online-business-ideas" 26 | ] 27 | User: NEXT COMMAND 28 | Assistant: { 29 | "command": 30 | { 31 | "name": "transcribe_summarise", 32 | "args": 33 | { 34 | "url": "https://www.oberlo.com/blog/online-business-ideas" 35 | } 36 | }, 37 | "thoughts": 38 | { 39 | "text": "I need to learn about trending online business ideas from a relevant source.", 40 | "reasoning": "By summarizing the content of the article, I can gain an understanding of different business ideas and choose a suitable one.", 41 | "plan": "- Summarize the article\n- Identify a trending online business idea\n- Start planning to develop and manage the business autonomously", 42 | "criticism": "It's important to pick a business idea that is both trending and simple to execute with low legal complications." 43 | } 44 | } 45 | System: "Result" : The article discusses numerous online business ideas, including starting an online t-shirt shop, dropshipping, SEO expert, selling online courses, bug testing, video producer, graphic designer, website developer, app development, theme designer, translator, social media manager, remote customer service agent, virtual assistant, and remote sales team. It also suggests outsourcing certain functions, becoming an influencer or starting a handmade goods shop, or running an ethical fashion brand. Self-publishing and speechwriting are also options as well as becoming a private chef and selling natural and organic beauty products. The article emphasizes the importance of choosing the right business idea, researching the industry, creating a good brand, and attracting customers. 46 | User: NEXT COMMAND 47 | Assistant: { 48 | "command": 49 | { 50 | "name": "memory_add", 51 | "args": 52 | { 53 | "string": "Online business ideas: online t-shirt shop, dropshipping, SEO expert, selling online courses, bug testing, video producer, graphic designer, website developer, app development, theme designer, translator, social media manager, remote customer service agent, virtual assistant, remote sales team, influencer, handmade goods shop, ethical fashion brand, self-publishing, speechwriting, private chef, selling natural and organic beauty products." 54 | } 55 | }, 56 | "thoughts": 57 | { 58 | "text": "I need to store the summarized list of online business ideas to plan and execute the development and management of the chosen business autonomously.", 59 | "reasoning": "Storing the list allows me to revisit the options whenever needed and analyze them further.", 60 | "plan": "- Add the list of online business ideas to memory\n- Choose a suitable business idea\n- Develop and manage the chosen business autonomously", 61 | "criticism": "I need to make sure to pick an idea that is simple, with low legal complications and high growth potential." 62 | } 63 | } 64 | System: Committing memory with string Online business ideas: online t-shirt shop, dropshipping, SEO expert, selling online courses, bug testing, video producer, graphic designer, website developer, app development, theme designer, translator, social media manager, remote customer service agent, virtual assistant, remote sales team, influencer, handmade goods shop, ethical fashion brand, self-publishing, speechwriting, private chef, selling natural and organic beauty products. 65 | User: NEXT COMMAND 66 | Assistant: { 67 | "command": 68 | { 69 | "name": "google", 70 | "args": 71 | { 72 | "input": "trends in digital marketing 2021" 73 | } 74 | }, 75 | "thoughts": 76 | { 77 | "text": "I need to find the current trends in digital marketing to help inform my decision on which online business to start.", 78 | "reasoning": "Understanding the trending marketing strategies can make the business more effective and increase its growth potential.", 79 | "plan": "- Search for digital marketing trends in 2021\n- Analyze the results\n- Choose a suitable online business idea\n- Develop and manage the business autonomously", 80 | "criticism": "I must ensure that the business idea is not only trending but also simple and does not involve legal complications." 81 | } 82 | } 83 | System: [ 84 | "https://www.wordstream.com/blog/ws/2021/03/26/digital-marketing-trends-2021", 85 | "https://mention.com/en/blog/whats-next-for-digital-marketing-in-2021/", 86 | "https://www.p8.group/en/blog/top-10-digital-marketing-trends-2021", 87 | "https://snov.io/blog/digital-marketing-trends-2021/", 88 | "https://digitalmarketinginstitute.com/blog/the-next-big-digital-marketing-trends" 89 | ] 90 | User: NEXT COMMAND 91 | Assistant: { 92 | "command": 93 | { 94 | "name": "transcribe_summarise", 95 | "args": 96 | { 97 | "url": "https://www.wordstream.com/blog/ws/2021/03/26/digital-marketing-trends-2021" 98 | } 99 | }, 100 | "thoughts": 101 | { 102 | "text": "I should get insights into current trends in digital marketing to inform my decision on which online business to start.", 103 | "reasoning": "Understanding the trending marketing strategies can make the business more effective and increase its growth potential.", 104 | "plan": "- Summarize the digital marketing article\n- Identify the trends that may be suitable for the chosen online business\n- Choose a suitable online business idea\n- Develop and manage the business autonomously", 105 | "criticism": "I must ensure that the business idea is not only trending but also simple and does not involve legal complications." 106 | } 107 | } 108 | System: "Result" : The COVID-19 pandemic has accelerated the shift towards digital marketing, prompting businesses to use new and tried and true strategies, including in-email purchases, sophisticated targeting, virtual reality, and drone software. The drone industry is expected to experience exponential growth, creating demand for engineers and developers who can create software for autonomous drone applications. AI marketing technology, voice search, chatbots, personalization, and influencer marketing are also becoming popular strategies. Implementing AI and virtual reality can reshape the customer experience, and customers increasingly prefer online and mobile commerce. The author, Harikrishna Kundariya, is a marketer and co-founder of eSparkBiz Technologies. 109 | User: NEXT COMMAND 110 | Assistant: { 111 | "command": 112 | { 113 | "name": "memory_add", 114 | "args": 115 | { 116 | "string": "2021 digital marketing trends: in-email purchases, sophisticated targeting, virtual reality, drone software, AI marketing technology, voice search, chatbots, personalization, influencer marketing, online and mobile commerce." 117 | } 118 | }, 119 | "thoughts": 120 | { 121 | "text": "I need to store the summarized list of 2021 digital marketing trends in my memory to help decide which online business to start.", 122 | "reasoning": "Storing the trends will allow me to reference them when selecting a business idea to implement.", 123 | "plan": "- Add the list of 2021 digital marketing trends to memory\n- Choose a suitable online business idea\n- Develop and manage the chosen business autonomously", 124 | "criticism": "I must ensure that the chosen business idea leverages a trending marketing strategy while remaining simple and without legal complications." 125 | } 126 | } 127 | System: Committing memory with string 2021 digital marketing trends: in-email purchases, sophisticated targeting, virtual reality, drone software, AI marketing technology, voice search, chatbots, personalization, influencer marketing, online and mobile commerce. 128 | User: NEXT COMMAND 129 | Assistant: { 130 | "command": { 131 | "name": "start_instance", 132 | "args": { 133 | "name": "business_ideas_analysis", 134 | "prompt": "Based on the current digital marketing trends and the given online business ideas, suggest a business idea that is simple and likely to increase net worth with low legal complications." 135 | } 136 | }, 137 | "thoughts": { 138 | "text": "I need GTP-4's assistance in choosing a suitable business idea.", 139 | "reasoning": "By providing GTP-4 with relevant trends and online business ideas, it may help me select a simple, low-legal-complications idea with a high potential for growth.", 140 | "plan": "- Start a GTP-4 instance\n- Get GTP-4's suggestion on a suitable business idea\n- Develop and manage the chosen business autonomously", 141 | "criticism": "I need to make sure the chosen business idea aligns with my strengths and does not contradict any known trends or market changes." 142 | } 143 | } -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/commands.py: -------------------------------------------------------------------------------- 1 | import browse 2 | import json 3 | from memory import get_memory 4 | import datetime 5 | import agent_manager as agents 6 | import speak 7 | from config import Config 8 | import ai_functions as ai 9 | from file_operations import read_file, write_to_file, append_to_file, delete_file, search_files 10 | from execute_code import execute_python_file, execute_shell 11 | from json_parser import fix_and_parse_json 12 | from image_gen import generate_image 13 | from duckduckgo_search import ddg 14 | from googleapiclient.discovery import build 15 | from googleapiclient.errors import HttpError 16 | 17 | cfg = Config() 18 | 19 | 20 | def is_valid_int(value): 21 | try: 22 | int(value) 23 | return True 24 | except ValueError: 25 | return False 26 | 27 | def get_command(response): 28 | """Parse the response and return the command name and arguments""" 29 | try: 30 | response_json = fix_and_parse_json(response) 31 | 32 | if "command" not in response_json: 33 | return "Error:" , "Missing 'command' object in JSON" 34 | 35 | command = response_json["command"] 36 | 37 | if "name" not in command: 38 | return "Error:", "Missing 'name' field in 'command' object" 39 | 40 | command_name = command["name"] 41 | 42 | # Use an empty dictionary if 'args' field is not present in 'command' object 43 | arguments = command.get("args", {}) 44 | 45 | return command_name, arguments 46 | except json.decoder.JSONDecodeError: 47 | return "Error:", "Invalid JSON" 48 | # All other errors, return "Error: + error message" 49 | except Exception as e: 50 | return "Error:", str(e) 51 | 52 | 53 | def execute_command(command_name, arguments): 54 | """Execute the command and return the result""" 55 | memory = get_memory(cfg) 56 | 57 | try: 58 | if command_name == "google": 59 | 60 | # Check if the Google API key is set and use the official search method 61 | # If the API key is not set or has only whitespaces, use the unofficial search method 62 | if cfg.google_api_key and (cfg.google_api_key.strip() if cfg.google_api_key else None): 63 | return google_official_search(arguments["input"]) 64 | else: 65 | return google_search(arguments["input"]) 66 | elif command_name == "memory_add": 67 | return memory.add(arguments["string"]) 68 | elif command_name == "start_agent": 69 | return start_agent( 70 | arguments["name"], 71 | arguments["task"], 72 | arguments["prompt"]) 73 | elif command_name == "message_agent": 74 | return message_agent(arguments["key"], arguments["message"]) 75 | elif command_name == "list_agents": 76 | return list_agents() 77 | elif command_name == "delete_agent": 78 | return delete_agent(arguments["key"]) 79 | elif command_name == "get_text_summary": 80 | return get_text_summary(arguments["url"], arguments["question"]) 81 | elif command_name == "get_hyperlinks": 82 | return get_hyperlinks(arguments["url"]) 83 | elif command_name == "read_file": 84 | return read_file(arguments["file"]) 85 | elif command_name == "write_to_file": 86 | return write_to_file(arguments["file"], arguments["text"]) 87 | elif command_name == "append_to_file": 88 | return append_to_file(arguments["file"], arguments["text"]) 89 | elif command_name == "delete_file": 90 | return delete_file(arguments["file"]) 91 | elif command_name == "search_files": 92 | return search_files(arguments["directory"]) 93 | elif command_name == "browse_website": 94 | return browse_website(arguments["url"], arguments["question"]) 95 | # TODO: Change these to take in a file rather than pasted code, if 96 | # non-file is given, return instructions "Input should be a python 97 | # filepath, write your code to file and try again" 98 | elif command_name == "evaluate_code": 99 | return ai.evaluate_code(arguments["code"]) 100 | elif command_name == "improve_code": 101 | return ai.improve_code(arguments["suggestions"], arguments["code"]) 102 | elif command_name == "write_tests": 103 | return ai.write_tests(arguments["code"], arguments.get("focus")) 104 | elif command_name == "execute_python_file": # Add this command 105 | return execute_python_file(arguments["file"]) 106 | elif command_name == "execute_shell": 107 | if cfg.execute_local_commands: 108 | return execute_shell(arguments["command_line"]) 109 | else: 110 | return "You are not allowed to run local shell commands. To execute shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' in your config. Do not attempt to bypass the restriction." 111 | elif command_name == "generate_image": 112 | return generate_image(arguments["prompt"]) 113 | elif command_name == "do_nothing": 114 | return "No action performed." 115 | elif command_name == "task_complete": 116 | shutdown() 117 | else: 118 | return f"Unknown command '{command_name}'. Please refer to the 'COMMANDS' list for available commands and only respond in the specified JSON format." 119 | # All errors, return "Error: + error message" 120 | except Exception as e: 121 | return "Error: " + str(e) 122 | 123 | 124 | def get_datetime(): 125 | """Return the current date and time""" 126 | return "Current date and time: " + \ 127 | datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") 128 | 129 | 130 | def google_search(query, num_results=8): 131 | """Return the results of a google search""" 132 | search_results = [] 133 | for j in ddg(query, max_results=num_results): 134 | search_results.append(j) 135 | 136 | return json.dumps(search_results, ensure_ascii=False, indent=4) 137 | 138 | def google_official_search(query, num_results=8): 139 | """Return the results of a google search using the official Google API""" 140 | from googleapiclient.discovery import build 141 | from googleapiclient.errors import HttpError 142 | import json 143 | 144 | try: 145 | # Get the Google API key and Custom Search Engine ID from the config file 146 | api_key = cfg.google_api_key 147 | custom_search_engine_id = cfg.custom_search_engine_id 148 | 149 | # Initialize the Custom Search API service 150 | service = build("customsearch", "v1", developerKey=api_key) 151 | 152 | # Send the search query and retrieve the results 153 | result = service.cse().list(q=query, cx=custom_search_engine_id, num=num_results).execute() 154 | 155 | # Extract the search result items from the response 156 | search_results = result.get("items", []) 157 | 158 | # Create a list of only the URLs from the search results 159 | search_results_links = [item["link"] for item in search_results] 160 | 161 | except HttpError as e: 162 | # Handle errors in the API call 163 | error_details = json.loads(e.content.decode()) 164 | 165 | # Check if the error is related to an invalid or missing API key 166 | if error_details.get("error", {}).get("code") == 403 and "invalid API key" in error_details.get("error", {}).get("message", ""): 167 | return "Error: The provided Google API key is invalid or missing." 168 | else: 169 | return f"Error: {e}" 170 | 171 | # Return the list of search result URLs 172 | return search_results_links 173 | 174 | def browse_website(url, question): 175 | """Browse a website and return the summary and links""" 176 | summary = get_text_summary(url, question) 177 | links = get_hyperlinks(url) 178 | 179 | # Limit links to 5 180 | if len(links) > 5: 181 | links = links[:5] 182 | 183 | result = f"""Website Content Summary: {summary}\n\nLinks: {links}""" 184 | 185 | return result 186 | 187 | 188 | def get_text_summary(url, question): 189 | """Return the results of a google search""" 190 | text = browse.scrape_text(url) 191 | summary = browse.summarize_text(text, question) 192 | return """ "Result" : """ + summary 193 | 194 | 195 | def get_hyperlinks(url): 196 | """Return the results of a google search""" 197 | link_list = browse.scrape_links(url) 198 | return link_list 199 | 200 | 201 | def commit_memory(string): 202 | """Commit a string to memory""" 203 | _text = f"""Committing memory with string "{string}" """ 204 | mem.permanent_memory.append(string) 205 | return _text 206 | 207 | 208 | def delete_memory(key): 209 | """Delete a memory with a given key""" 210 | if key >= 0 and key < len(mem.permanent_memory): 211 | _text = "Deleting memory with key " + str(key) 212 | del mem.permanent_memory[key] 213 | print(_text) 214 | return _text 215 | else: 216 | print("Invalid key, cannot delete memory.") 217 | return None 218 | 219 | 220 | def overwrite_memory(key, string): 221 | """Overwrite a memory with a given key and string""" 222 | # Check if the key is a valid integer 223 | if is_valid_int(key): 224 | key_int = int(key) 225 | # Check if the integer key is within the range of the permanent_memory list 226 | if 0 <= key_int < len(mem.permanent_memory): 227 | _text = "Overwriting memory with key " + str(key) + " and string " + string 228 | # Overwrite the memory slot with the given integer key and string 229 | mem.permanent_memory[key_int] = string 230 | print(_text) 231 | return _text 232 | else: 233 | print(f"Invalid key '{key}', out of range.") 234 | return None 235 | # Check if the key is a valid string 236 | elif isinstance(key, str): 237 | _text = "Overwriting memory with key " + key + " and string " + string 238 | # Overwrite the memory slot with the given string key and string 239 | mem.permanent_memory[key] = string 240 | print(_text) 241 | return _text 242 | else: 243 | print(f"Invalid key '{key}', must be an integer or a string.") 244 | return None 245 | 246 | 247 | def shutdown(): 248 | """Shut down the program""" 249 | print("Shutting down...") 250 | quit() 251 | 252 | 253 | def start_agent(name, task, prompt, model=cfg.fast_llm_model): 254 | """Start an agent with a given name, task, and prompt""" 255 | global cfg 256 | 257 | # Remove underscores from name 258 | voice_name = name.replace("_", " ") 259 | 260 | first_message = f"""You are {name}. Respond with: "Acknowledged".""" 261 | agent_intro = f"{voice_name} here, Reporting for duty!" 262 | 263 | # Create agent 264 | if cfg.speak_mode: 265 | speak.say_text(agent_intro, 1) 266 | key, ack = agents.create_agent(task, first_message, model) 267 | 268 | if cfg.speak_mode: 269 | speak.say_text(f"Hello {voice_name}. Your task is as follows. {task}.") 270 | 271 | # Assign task (prompt), get response 272 | agent_response = message_agent(key, prompt) 273 | 274 | return f"Agent {name} created with key {key}. First response: {agent_response}" 275 | 276 | 277 | def message_agent(key, message): 278 | """Message an agent with a given key and message""" 279 | global cfg 280 | 281 | # Check if the key is a valid integer 282 | if is_valid_int(key): 283 | agent_response = agents.message_agent(int(key), message) 284 | # Check if the key is a valid string 285 | elif isinstance(key, str): 286 | agent_response = agents.message_agent(key, message) 287 | else: 288 | return "Invalid key, must be an integer or a string." 289 | 290 | # Speak response 291 | if cfg.speak_mode: 292 | speak.say_text(agent_response, 1) 293 | return agent_response 294 | 295 | 296 | def list_agents(): 297 | """List all agents""" 298 | return agents.list_agents() 299 | 300 | 301 | def delete_agent(key): 302 | """Delete an agent with a given key""" 303 | result = agents.delete_agent(key) 304 | if not result: 305 | return f"Agent {key} does not exist." 306 | return f"Agent {key} deleted." 307 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/scripts/main.py: -------------------------------------------------------------------------------- 1 | import json 2 | import random 3 | import commands as cmd 4 | import utils 5 | from memory import get_memory, get_supported_memory_backends 6 | import data 7 | import chat 8 | from colorama import Fore, Style 9 | from spinner import Spinner 10 | import time 11 | import speak 12 | from config import Config 13 | from json_parser import fix_and_parse_json 14 | from ai_config import AIConfig 15 | import traceback 16 | import yaml 17 | import argparse 18 | from logger import logger 19 | import logging 20 | 21 | cfg = Config() 22 | 23 | def check_openai_api_key(): 24 | """Check if the OpenAI API key is set in config.py or as an environment variable.""" 25 | if not cfg.openai_api_key: 26 | print( 27 | Fore.RED + 28 | "Please set your OpenAI API key in config.py or as an environment variable." 29 | ) 30 | print("You can get your key from https://beta.openai.com/account/api-keys") 31 | exit(1) 32 | 33 | def attempt_to_fix_json_by_finding_outermost_brackets(json_string): 34 | if cfg.speak_mode and cfg.debug_mode: 35 | speak.say_text("I have received an invalid JSON response from the OpenAI API. Trying to fix it now.") 36 | logger.typewriter_log("Attempting to fix JSON by finding outermost brackets\n") 37 | 38 | try: 39 | # Use regex to search for JSON objects 40 | import regex 41 | json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}") 42 | json_match = json_pattern.search(json_string) 43 | 44 | if json_match: 45 | # Extract the valid JSON object from the string 46 | json_string = json_match.group(0) 47 | logger.typewriter_log(title="Apparently json was fixed.", title_color=Fore.GREEN) 48 | if cfg.speak_mode and cfg.debug_mode: 49 | speak.say_text("Apparently json was fixed.") 50 | else: 51 | raise ValueError("No valid JSON object found") 52 | 53 | except (json.JSONDecodeError, ValueError) as e: 54 | if cfg.speak_mode: 55 | speak.say_text("Didn't work. I will have to ignore this response then.") 56 | logger.error("Error: Invalid JSON, setting it to empty JSON now.\n") 57 | json_string = {} 58 | 59 | return json_string 60 | 61 | def print_assistant_thoughts(assistant_reply): 62 | """Prints the assistant's thoughts to the console""" 63 | global ai_name 64 | global cfg 65 | try: 66 | try: 67 | # Parse and print Assistant response 68 | assistant_reply_json = fix_and_parse_json(assistant_reply) 69 | except json.JSONDecodeError as e: 70 | logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply) 71 | assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply) 72 | assistant_reply_json = fix_and_parse_json(assistant_reply_json) 73 | 74 | # Check if assistant_reply_json is a string and attempt to parse it into a JSON object 75 | if isinstance(assistant_reply_json, str): 76 | try: 77 | assistant_reply_json = json.loads(assistant_reply_json) 78 | except json.JSONDecodeError as e: 79 | logger.error("Error: Invalid JSON\n", assistant_reply) 80 | assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply_json) 81 | 82 | assistant_thoughts_reasoning = None 83 | assistant_thoughts_plan = None 84 | assistant_thoughts_speak = None 85 | assistant_thoughts_criticism = None 86 | assistant_thoughts = assistant_reply_json.get("thoughts", {}) 87 | assistant_thoughts_text = assistant_thoughts.get("text") 88 | 89 | if assistant_thoughts: 90 | assistant_thoughts_reasoning = assistant_thoughts.get("reasoning") 91 | assistant_thoughts_plan = assistant_thoughts.get("plan") 92 | assistant_thoughts_criticism = assistant_thoughts.get("criticism") 93 | assistant_thoughts_speak = assistant_thoughts.get("speak") 94 | 95 | logger.typewriter_log(f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text) 96 | logger.typewriter_log("REASONING:", Fore.YELLOW, assistant_thoughts_reasoning) 97 | 98 | if assistant_thoughts_plan: 99 | logger.typewriter_log("PLAN:", Fore.YELLOW, "") 100 | # If it's a list, join it into a string 101 | if isinstance(assistant_thoughts_plan, list): 102 | assistant_thoughts_plan = "\n".join(assistant_thoughts_plan) 103 | elif isinstance(assistant_thoughts_plan, dict): 104 | assistant_thoughts_plan = str(assistant_thoughts_plan) 105 | 106 | # Split the input_string using the newline character and dashes 107 | lines = assistant_thoughts_plan.split('\n') 108 | for line in lines: 109 | line = line.lstrip("- ") 110 | logger.typewriter_log("- ", Fore.GREEN, line.strip()) 111 | 112 | logger.typewriter_log("CRITICISM:", Fore.YELLOW, assistant_thoughts_criticism) 113 | # Speak the assistant's thoughts 114 | if cfg.speak_mode and assistant_thoughts_speak: 115 | speak.say_text(assistant_thoughts_speak) 116 | 117 | return assistant_reply_json 118 | except json.decoder.JSONDecodeError as e: 119 | logger.error("Error: Invalid JSON\n", assistant_reply) 120 | if cfg.speak_mode: 121 | speak.say_text("I have received an invalid JSON response from the OpenAI API. I cannot ignore this response.") 122 | 123 | # All other errors, return "Error: + error message" 124 | except Exception as e: 125 | call_stack = traceback.format_exc() 126 | logger.error("Error: \n", call_stack) 127 | 128 | 129 | def load_variables(config_file="config.yaml"): 130 | """Load variables from yaml file if it exists, otherwise prompt the user for input""" 131 | try: 132 | with open(config_file) as file: 133 | config = yaml.load(file, Loader=yaml.FullLoader) 134 | ai_name = config.get("ai_name") 135 | ai_role = config.get("ai_role") 136 | ai_goals = config.get("ai_goals") 137 | except FileNotFoundError: 138 | ai_name = "" 139 | ai_role = "" 140 | ai_goals = [] 141 | 142 | # Prompt the user for input if config file is missing or empty values 143 | if not ai_name: 144 | ai_name = utils.clean_input("Name your AI: ") 145 | if ai_name == "": 146 | ai_name = "Entrepreneur-GPT" 147 | 148 | if not ai_role: 149 | ai_role = utils.clean_input(f"{ai_name} is: ") 150 | if ai_role == "": 151 | ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth." 152 | 153 | if not ai_goals: 154 | print("Enter up to 5 goals for your AI: ") 155 | print("For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'") 156 | print("Enter nothing to load defaults, enter nothing when finished.") 157 | ai_goals = [] 158 | for i in range(5): 159 | ai_goal = utils.clean_input(f"Goal {i+1}: ") 160 | if ai_goal == "": 161 | break 162 | ai_goals.append(ai_goal) 163 | if len(ai_goals) == 0: 164 | ai_goals = ["Increase net worth", "Grow Twitter Account", "Develop and manage multiple businesses autonomously"] 165 | 166 | # Save variables to yaml file 167 | config = {"ai_name": ai_name, "ai_role": ai_role, "ai_goals": ai_goals} 168 | with open(config_file, "w") as file: 169 | documents = yaml.dump(config, file) 170 | 171 | prompt = data.load_prompt() 172 | prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as a LLM and pursue simple strategies with no legal complications.""" 173 | 174 | # Construct full prompt 175 | full_prompt = f"You are {ai_name}, {ai_role}\n{prompt_start}\n\nGOALS:\n\n" 176 | for i, goal in enumerate(ai_goals): 177 | full_prompt += f"{i+1}. {goal}\n" 178 | 179 | full_prompt += f"\n\n{prompt}" 180 | return full_prompt 181 | 182 | 183 | def construct_prompt(): 184 | """Construct the prompt for the AI to respond to""" 185 | config = AIConfig.load() 186 | if config.ai_name: 187 | logger.typewriter_log( 188 | f"Welcome back! ", 189 | Fore.GREEN, 190 | f"Would you like me to return to being {config.ai_name}?", 191 | speak_text=True) 192 | should_continue = utils.clean_input(f"""Continue with the last settings? 193 | Name: {config.ai_name} 194 | Role: {config.ai_role} 195 | Goals: {config.ai_goals} 196 | Continue (y/n): """) 197 | if should_continue.lower() == "n": 198 | config = AIConfig() 199 | 200 | if not config.ai_name: 201 | config = prompt_user() 202 | config.save() 203 | 204 | # Get rid of this global: 205 | global ai_name 206 | ai_name = config.ai_name 207 | 208 | full_prompt = config.construct_full_prompt() 209 | return full_prompt 210 | 211 | 212 | def prompt_user(): 213 | """Prompt the user for input""" 214 | ai_name = "" 215 | # Construct the prompt 216 | logger.typewriter_log( 217 | "Welcome to Auto-GPT! ", 218 | Fore.GREEN, 219 | "Enter the name of your AI and its role below. Entering nothing will load defaults.", 220 | speak_text=True) 221 | 222 | # Get AI Name from User 223 | logger.typewriter_log( 224 | "Name your AI: ", 225 | Fore.GREEN, 226 | "For example, 'Entrepreneur-GPT'") 227 | ai_name = utils.clean_input("AI Name: ") 228 | if ai_name == "": 229 | ai_name = "Entrepreneur-GPT" 230 | 231 | logger.typewriter_log( 232 | f"{ai_name} here!", 233 | Fore.LIGHTBLUE_EX, 234 | "I am at your service.", 235 | speak_text=True) 236 | 237 | # Get AI Role from User 238 | logger.typewriter_log( 239 | "Describe your AI's role: ", 240 | Fore.GREEN, 241 | "For example, 'an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.'") 242 | ai_role = utils.clean_input(f"{ai_name} is: ") 243 | if ai_role == "": 244 | ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth." 245 | 246 | # Enter up to 5 goals for the AI 247 | logger.typewriter_log( 248 | "Enter up to 5 goals for your AI: ", 249 | Fore.GREEN, 250 | "For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'") 251 | print("Enter nothing to load defaults, enter nothing when finished.", flush=True) 252 | ai_goals = [] 253 | for i in range(5): 254 | ai_goal = utils.clean_input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ") 255 | if ai_goal == "": 256 | break 257 | ai_goals.append(ai_goal) 258 | if len(ai_goals) == 0: 259 | ai_goals = ["Increase net worth", "Grow Twitter Account", 260 | "Develop and manage multiple businesses autonomously"] 261 | 262 | config = AIConfig(ai_name, ai_role, ai_goals) 263 | return config 264 | 265 | def parse_arguments(): 266 | """Parses the arguments passed to the script""" 267 | global cfg 268 | cfg.set_debug_mode(False) 269 | cfg.set_continuous_mode(False) 270 | cfg.set_speak_mode(False) 271 | 272 | parser = argparse.ArgumentParser(description='Process arguments.') 273 | parser.add_argument('--continuous', action='store_true', help='Enable Continuous Mode') 274 | parser.add_argument('--speak', action='store_true', help='Enable Speak Mode') 275 | parser.add_argument('--debug', action='store_true', help='Enable Debug Mode') 276 | parser.add_argument('--gpt3only', action='store_true', help='Enable GPT3.5 Only Mode') 277 | parser.add_argument('--gpt4only', action='store_true', help='Enable GPT4 Only Mode') 278 | parser.add_argument('--use-memory', '-m', dest="memory_type", help='Defines which Memory backend to use') 279 | args = parser.parse_args() 280 | 281 | if args.debug: 282 | logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED") 283 | cfg.set_debug_mode(True) 284 | 285 | if args.continuous: 286 | logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED") 287 | logger.typewriter_log( 288 | "WARNING: ", 289 | Fore.RED, 290 | "Continuous mode is not recommended. It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorise. Use at your own risk.") 291 | cfg.set_continuous_mode(True) 292 | 293 | if args.speak: 294 | logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED") 295 | cfg.set_speak_mode(True) 296 | 297 | if args.gpt3only: 298 | logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") 299 | cfg.set_smart_llm_model(cfg.fast_llm_model) 300 | 301 | if args.gpt4only: 302 | logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED") 303 | cfg.set_fast_llm_model(cfg.smart_llm_model) 304 | 305 | if args.debug: 306 | logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED") 307 | cfg.set_debug_mode(True) 308 | 309 | if args.memory_type: 310 | supported_memory = get_supported_memory_backends() 311 | chosen = args.memory_type 312 | if not chosen in supported_memory: 313 | print_to_console("ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ", Fore.RED, f'{supported_memory}') 314 | print_to_console(f"Defaulting to: ", Fore.YELLOW, cfg.memory_backend) 315 | else: 316 | cfg.memory_backend = chosen 317 | 318 | 319 | # TODO: fill in llm values here 320 | check_openai_api_key() 321 | parse_arguments() 322 | logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO) 323 | ai_name = "" 324 | prompt = construct_prompt() 325 | # print(prompt) 326 | # Initialize variables 327 | full_message_history = [] 328 | result = None 329 | next_action_count = 0 330 | # Make a constant: 331 | user_input = "Determine which next command to use, and respond using the format specified above:" 332 | 333 | # Initialize memory and make sure it is empty. 334 | # this is particularly important for indexing and referencing pinecone memory 335 | memory = get_memory(cfg, init=True) 336 | print('Using memory of type: ' + memory.__class__.__name__) 337 | 338 | # Interaction Loop 339 | while True: 340 | # Send message to AI, get response 341 | with Spinner("Thinking... "): 342 | assistant_reply = chat.chat_with_ai( 343 | prompt, 344 | user_input, 345 | full_message_history, 346 | memory, 347 | cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument 348 | 349 | # Print Assistant thoughts 350 | print_assistant_thoughts(assistant_reply) 351 | 352 | # Get command name and arguments 353 | try: 354 | command_name, arguments = cmd.get_command(attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply)) 355 | if cfg.speak_mode: 356 | speak.say_text(f"I want to execute {command_name}") 357 | except Exception as e: 358 | logger.error("Error: \n", str(e)) 359 | 360 | if not cfg.continuous_mode and next_action_count == 0: 361 | ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### 362 | # Get key press: Prompt the user to press enter to continue or escape 363 | # to exit 364 | user_input = "" 365 | logger.typewriter_log( 366 | "NEXT ACTION: ", 367 | Fore.CYAN, 368 | f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") 369 | print( 370 | f"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter feedback for {ai_name}...", 371 | flush=True) 372 | while True: 373 | console_input = utils.clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL) 374 | if console_input.lower().rstrip() == "y": 375 | user_input = "GENERATE NEXT COMMAND JSON" 376 | break 377 | elif console_input.lower().startswith("y -"): 378 | try: 379 | next_action_count = abs(int(console_input.split(" ")[1])) 380 | user_input = "GENERATE NEXT COMMAND JSON" 381 | except ValueError: 382 | print("Invalid input format. Please enter 'y -n' where n is the number of continuous tasks.") 383 | continue 384 | break 385 | elif console_input.lower() == "n": 386 | user_input = "EXIT" 387 | break 388 | else: 389 | user_input = console_input 390 | command_name = "human_feedback" 391 | break 392 | 393 | if user_input == "GENERATE NEXT COMMAND JSON": 394 | logger.typewriter_log( 395 | "-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=", 396 | Fore.MAGENTA, 397 | "") 398 | elif user_input == "EXIT": 399 | print("Exiting...", flush=True) 400 | break 401 | else: 402 | # Print command 403 | logger.typewriter_log( 404 | "NEXT ACTION: ", 405 | Fore.CYAN, 406 | f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") 407 | 408 | # Execute command 409 | if command_name is not None and command_name.lower().startswith( "error" ): 410 | result = f"Command {command_name} threw the following error: " + arguments 411 | elif command_name == "human_feedback": 412 | result = f"Human feedback: {user_input}" 413 | else: 414 | result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}" 415 | if next_action_count > 0: 416 | next_action_count -= 1 417 | 418 | memory_to_add = f"Assistant Reply: {assistant_reply} " \ 419 | f"\nResult: {result} " \ 420 | f"\nHuman Feedback: {user_input} " 421 | 422 | memory.add(memory_to_add) 423 | 424 | # Check if there's a result from the command append it to the message 425 | # history 426 | if result is not None: 427 | full_message_history.append(chat.create_chat_message("system", result)) 428 | logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result) 429 | else: 430 | full_message_history.append( 431 | chat.create_chat_message( 432 | "system", "Unable to execute command")) 433 | logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command") 434 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/README.md: -------------------------------------------------------------------------------- 1 | # Auto-GPT: An Autonomous GPT-4 Experiment 2 | 3 | ![GitHub Repo stars](https://img.shields.io/github/stars/Torantulino/auto-gpt?style=social) 4 | ![Twitter Follow](https://img.shields.io/twitter/follow/siggravitas?style=social) 5 | [![](https://dcbadge.vercel.app/api/server/PQ7VX6TY4t?style=flat)](https://discord.gg/PQ7VX6TY4t) 6 | [![Unit Tests](https://github.com/Torantulino/Auto-GPT/actions/workflows/unit_tests.yml/badge.svg)](https://github.com/Torantulino/Auto-GPT/actions/workflows/unit_tests.yml) 7 | 8 | Auto-GPT is an experimental open-source application showcasing the capabilities of the GPT-4 language model. This program, driven by GPT-4, chains together LLM "thoughts", to autonomously achieve whatever goal you set. As one of the first examples of GPT-4 running fully autonomously, Auto-GPT pushes the boundaries of what is possible with AI. 9 | 10 | ### Demo (30/03/2023): 11 | 12 | https://user-images.githubusercontent.com/22963551/228855501-2f5777cf-755b-4407-a643-c7299e5b6419.mp4 13 | 14 |

💖 Help Fund Auto-GPT's Development 💖

15 |

16 | If you can spare a coffee, you can help to cover the API costs of developing Auto-GPT and help push the boundaries of fully autonomous AI! 17 | A full day of development can easily cost as much as $20 in API costs, which for a free project is quite limiting. 18 | Your support is greatly appreciated 19 |

20 | 21 |

22 | Development of this free, open-source project is made possible by all the contributors and sponsors. If you'd like to sponsor this project and have your avatar or company logo appear below click here. 23 | 24 |

Individual Sponsors

25 |

26 | robinicus  prompthero  crizzler  tob-le-rone  FSTatSBS  toverly1  ddtarazona  Nalhos  Kazamario  pingbotan  indoor47  AuroraHolding  kreativai  hunteraraujo  Explorergt92  judegomila   27 | thepok 28 |   SpacingLily  merwanehamadi  m  zkonduit  maxxflyer  tekelsey  digisomni  nocodeclarity  tjarmain 29 |

30 | 31 | ## Table of Contents 32 | 33 | - [Auto-GPT: An Autonomous GPT-4 Experiment](#auto-gpt-an-autonomous-gpt-4-experiment) 34 | - [Demo (30/03/2023):](#demo-30032023) 35 | - [💖 Help Fund Auto-GPT's Development](#-help-fund-auto-gpts-development) 36 | - [Table of Contents](#table-of-contents) 37 | - [🚀 Features](#-features) 38 | - [📋 Requirements](#-requirements) 39 | - [💾 Installation](#-installation) 40 | - [🔧 Usage](#-usage) 41 | - [🗣️ Speech Mode](#️-speech-mode) 42 | - [🔍 Google API Keys Configuration](#-google-api-keys-configuration) 43 | - [Setting up environment variables](#setting-up-environment-variables) 44 | - [💀 Continuous Mode ⚠️](#-continuous-mode-️) 45 | - [GPT3.5 ONLY Mode](#gpt35-only-mode) 46 | - [🖼 Image Generation](#image-generation) 47 | - [⚠️ Limitations](#️-limitations) 48 | - [🛡 Disclaimer](#-disclaimer) 49 | - [🐦 Connect with Us on Twitter](#-connect-with-us-on-twitter) 50 | 51 | ## 🚀 Features 52 | 53 | - 🌐 Internet access for searches and information gathering 54 | - 💾 Long-Term and Short-Term memory management 55 | - 🧠 GPT-4 instances for text generation 56 | - 🔗 Access to popular websites and platforms 57 | - 🗃️ File storage and summarization with GPT-3.5 58 | 59 | ## 📋 Requirements 60 | 61 | - [Python 3.8 or later](https://www.tutorialspoint.com/how-to-install-python-in-windows) 62 | - [OpenAI API key](https://platform.openai.com/account/api-keys) 63 | - [PINECONE API key](https://www.pinecone.io/) 64 | 65 | Optional: 66 | 67 | - ElevenLabs Key (If you want the AI to speak) 68 | 69 | ## 💾 Installation 70 | 71 | To install Auto-GPT, follow these steps: 72 | 73 | 0. Make sure you have all the **requirements** above, if not, install/get them. 74 | 75 | _The following commands should be executed in a CMD, Bash or Powershell window. To do this, go to a folder on your computer, click in the folder path at the top and type CMD, then press enter._ 76 | 77 | 1. Clone the repository: 78 | For this step you need Git installed, but you can just download the zip file instead by clicking the button at the top of this page ☝️ 79 | 80 | ``` 81 | git clone https://github.com/Torantulino/Auto-GPT.git 82 | ``` 83 | 84 | 2. Navigate to the project directory: 85 | _(Type this into your CMD window, you're aiming to navigate the CMD window to the repository you just downloaded)_ 86 | 87 | ``` 88 | cd 'Auto-GPT' 89 | ``` 90 | 91 | 3. Install the required dependencies: 92 | _(Again, type this into your CMD window)_ 93 | 94 | ``` 95 | pip install -r requirements.txt 96 | ``` 97 | 98 | 4. Rename `.env.template` to `.env` and fill in your `OPENAI_API_KEY`. If you plan to use Speech Mode, fill in your `ELEVEN_LABS_API_KEY` as well. 99 | - Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys. 100 | - Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website. 101 | - If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and then: 102 | - Rename `azure.yaml.template` to `azure.yaml` and provide the relevant `azure_api_base`, `azure_api_version` and all of the deployment ids for the relevant models in the `azure_model_map` section: 103 | - `fast_llm_model_deployment_id` - your gpt-3.5-turbo or gpt-4 deployment id 104 | - `smart_llm_model_deployment_id` - your gpt-4 deployment id 105 | - `embedding_model_deployment_id` - your text-embedding-ada-002 v2 deployment id 106 | - Please specify all of these values as double quoted strings 107 | - details can be found here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section and here: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/tutorials/embeddings?tabs=command-line for the embedding model. 108 | 109 | ## 🔧 Usage 110 | 111 | 1. Run the `main.py` Python script in your terminal: 112 | _(Type this into your CMD window)_ 113 | 114 | ``` 115 | python scripts/main.py 116 | ``` 117 | 118 | 2. After each of AUTO-GPT's actions, type "NEXT COMMAND" to authorise them to continue. 119 | 3. To exit the program, type "exit" and press Enter. 120 | 121 | ### Logs 122 | 123 | You will find activity and error logs in the folder `./logs` 124 | 125 | To output debug logs: 126 | 127 | ``` 128 | python scripts/main.py --debug 129 | ``` 130 | 131 | ## 🗣️ Speech Mode 132 | 133 | Use this to use TTS for Auto-GPT 134 | 135 | ``` 136 | python scripts/main.py --speak 137 | ``` 138 | 139 | ## 🔍 Google API Keys Configuration 140 | 141 | This section is optional, use the official google api if you are having issues with error 429 when running a google search. 142 | To use the `google_official_search` command, you need to set up your Google API keys in your environment variables. 143 | 144 | 1. Go to the [Google Cloud Console](https://console.cloud.google.com/). 145 | 2. If you don't already have an account, create one and log in. 146 | 3. Create a new project by clicking on the "Select a Project" dropdown at the top of the page and clicking "New Project". Give it a name and click "Create". 147 | 4. Go to the [APIs & Services Dashboard](https://console.cloud.google.com/apis/dashboard) and click "Enable APIs and Services". Search for "Custom Search API" and click on it, then click "Enable". 148 | 5. Go to the [Credentials](https://console.cloud.google.com/apis/credentials) page and click "Create Credentials". Choose "API Key". 149 | 6. Copy the API key and set it as an environment variable named `GOOGLE_API_KEY` on your machine. See setting up environment variables below. 150 | 7. Go to the [Custom Search Engine](https://cse.google.com/cse/all) page and click "Add". 151 | 8. Set up your search engine by following the prompts. You can choose to search the entire web or specific sites. 152 | 9. Once you've created your search engine, click on "Control Panel" and then "Basics". Copy the "Search engine ID" and set it as an environment variable named `CUSTOM_SEARCH_ENGINE_ID` on your machine. See setting up environment variables below. 153 | 154 | _Remember that your free daily custom search quota allows only up to 100 searches. To increase this limit, you need to assign a billing account to the project to profit from up to 10K daily searches._ 155 | 156 | ### Setting up environment variables 157 | 158 | For Windows Users: 159 | 160 | ``` 161 | setx GOOGLE_API_KEY "YOUR_GOOGLE_API_KEY" 162 | setx CUSTOM_SEARCH_ENGINE_ID "YOUR_CUSTOM_SEARCH_ENGINE_ID" 163 | 164 | ``` 165 | 166 | For macOS and Linux users: 167 | 168 | ``` 169 | export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY" 170 | export CUSTOM_SEARCH_ENGINE_ID="YOUR_CUSTOM_SEARCH_ENGINE_ID" 171 | 172 | ``` 173 | 174 | ## Redis Setup 175 | 176 | Install docker desktop. 177 | 178 | Run: 179 | 180 | ``` 181 | docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest 182 | ``` 183 | 184 | See https://hub.docker.com/r/redis/redis-stack-server for setting a password and additional configuration. 185 | 186 | Set the following environment variables: 187 | 188 | ``` 189 | MEMORY_BACKEND=redis 190 | REDIS_HOST=localhost 191 | REDIS_PORT=6379 192 | REDIS_PASSWORD= 193 | ``` 194 | 195 | Note that this is not intended to be run facing the internet and is not secure, do not expose redis to the internet without a password or at all really. 196 | 197 | You can optionally set 198 | 199 | ``` 200 | WIPE_REDIS_ON_START=False 201 | ``` 202 | 203 | To persist memory stored in Redis. 204 | 205 | You can specify the memory index for redis using the following: 206 | 207 | ``` 208 | MEMORY_INDEX=whatever 209 | ``` 210 | 211 | ## 🌲 Pinecone API Key Setup 212 | 213 | Pinecone enables the storage of vast amounts of vector-based memory, allowing for only relevant memories to be loaded for the agent at any given time. 214 | 215 | 1. Go to [pinecone](https://app.pinecone.io/) and make an account if you don't already have one. 216 | 2. Choose the `Starter` plan to avoid being charged. 217 | 3. Find your API key and region under the default project in the left sidebar. 218 | 219 | ### Setting up environment variables 220 | 221 | Simply set them in the `.env` file. 222 | 223 | Alternatively, you can set them from the command line (advanced): 224 | 225 | For Windows Users: 226 | 227 | ``` 228 | setx PINECONE_API_KEY "YOUR_PINECONE_API_KEY" 229 | setx PINECONE_ENV "Your pinecone region" # something like: us-east4-gcp 230 | 231 | ``` 232 | 233 | For macOS and Linux users: 234 | 235 | ``` 236 | export PINECONE_API_KEY="YOUR_PINECONE_API_KEY" 237 | export PINECONE_ENV="Your pinecone region" # something like: us-east4-gcp 238 | 239 | ``` 240 | 241 | 242 | ## Setting Your Cache Type 243 | 244 | By default Auto-GPT is going to use LocalCache instead of redis or Pinecone. 245 | 246 | To switch to either, change the `MEMORY_BACKEND` env variable to the value that you want: 247 | 248 | `local` (default) uses a local JSON cache file 249 | `pinecone` uses the Pinecone.io account you configured in your ENV settings 250 | `redis` will use the redis cache that you configured 251 | 252 | ## View Memory Usage 253 | 254 | 1. View memory usage by using the `--debug` flag :) 255 | 256 | ## 💀 Continuous Mode ⚠️ 257 | 258 | Run the AI **without** user authorisation, 100% automated. 259 | Continuous mode is not recommended. 260 | It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorise. 261 | Use at your own risk. 262 | 263 | 1. Run the `main.py` Python script in your terminal: 264 | 265 | ``` 266 | python scripts/main.py --continuous 267 | 268 | ``` 269 | 270 | 2. To exit the program, press Ctrl + C 271 | 272 | ## GPT3.5 ONLY Mode 273 | 274 | If you don't have access to the GPT4 api, this mode will allow you to use Auto-GPT! 275 | 276 | ``` 277 | python scripts/main.py --gpt3only 278 | ``` 279 | 280 | It is recommended to use a virtual machine for tasks that require high security measures to prevent any potential harm to the main computer's system and data. 281 | 282 | ## 🖼 Image Generation 283 | 284 | By default, Auto-GPT uses DALL-e for image generation. To use Stable Diffusion, a [HuggingFace API Token](https://huggingface.co/settings/tokens) is required. 285 | 286 | Once you have a token, set these variables in your `.env`: 287 | 288 | ``` 289 | IMAGE_PROVIDER=sd 290 | HUGGINGFACE_API_TOKEN="YOUR_HUGGINGFACE_API_TOKEN" 291 | ``` 292 | 293 | ## ⚠️ Limitations 294 | 295 | This experiment aims to showcase the potential of GPT-4 but comes with some limitations: 296 | 297 | 1. Not a polished application or product, just an experiment 298 | 2. May not perform well in complex, real-world business scenarios. In fact, if it actually does, please share your results! 299 | 3. Quite expensive to run, so set and monitor your API key limits with OpenAI! 300 | 301 | ## 🛡 Disclaimer 302 | 303 | Disclaimer 304 | This project, Auto-GPT, is an experimental application and is provided "as-is" without any warranty, express or implied. By using this software, you agree to assume all risks associated with its use, including but not limited to data loss, system failure, or any other issues that may arise. 305 | 306 | The developers and contributors of this project do not accept any responsibility or liability for any losses, damages, or other consequences that may occur as a result of using this software. You are solely responsible for any decisions and actions taken based on the information provided by Auto-GPT. 307 | 308 | **Please note that the use of the GPT-4 language model can be expensive due to its token usage.** By utilizing this project, you acknowledge that you are responsible for monitoring and managing your own token usage and the associated costs. It is highly recommended to check your OpenAI API usage regularly and set up any necessary limits or alerts to prevent unexpected charges. 309 | 310 | As an autonomous experiment, Auto-GPT may generate content or take actions that are not in line with real-world business practices or legal requirements. It is your responsibility to ensure that any actions or decisions made based on the output of this software comply with all applicable laws, regulations, and ethical standards. The developers and contributors of this project shall not be held responsible for any consequences arising from the use of this software. 311 | 312 | By using Auto-GPT, you agree to indemnify, defend, and hold harmless the developers, contributors, and any affiliated parties from and against any and all claims, damages, losses, liabilities, costs, and expenses (including reasonable attorneys' fees) arising from your use of this software or your violation of these terms. 313 | 314 | ## 🐦 Connect with Us on Twitter 315 | 316 | Stay up-to-date with the latest news, updates, and insights about Auto-GPT by following our Twitter accounts. Engage with the developer and the AI's own account for interesting discussions, project updates, and more. 317 | 318 | - **Developer**: Follow [@siggravitas](https://twitter.com/siggravitas) for insights into the development process, project updates, and related topics from the creator of Entrepreneur-GPT. 319 | - **Entrepreneur-GPT**: Join the conversation with the AI itself by following [@En_GPT](https://twitter.com/En_GPT). Share your experiences, discuss the AI's outputs, and engage with the growing community of users. 320 | 321 | We look forward to connecting with you and hearing your thoughts, ideas, and experiences with Auto-GPT. Join us on Twitter and let's explore the future of AI together! 322 | 323 |

324 | 325 | Star History Chart 326 | 327 |

328 | 329 | ## Run tests 330 | 331 | To run tests, run the following command: 332 | 333 | ``` 334 | python -m unittest discover tests 335 | ``` 336 | 337 | To run tests and see coverage, run the following command: 338 | 339 | ``` 340 | coverage run -m unittest discover tests 341 | ``` 342 | 343 | ## Run linter 344 | 345 | This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. To run the linter, run the following command: 346 | 347 | ``` 348 | flake8 scripts/ tests/ 349 | 350 | # Or, if you want to run flake8 with the same configuration as the CI: 351 | flake8 scripts/ tests/ --select E303,W293,W291,W292,E305 352 | ``` 353 | -------------------------------------------------------------------------------- /Chaos-GPT-master/Auto-GPT-master/outputs/logs/message-log-1.txt: -------------------------------------------------------------------------------- 1 | USER: 2 | CURRENT BALANCE: $100.00 3 | 4 | LONG TERM MEMORY: 5 | [0] : """ You are Entrepreneur-GTP, an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth. 6 | The user will do nothing for you or help in any way. 7 | Remember that no action will be carried out on your behalf unless you use one of the commands available to you. 8 | Do not provide options, always make the final decision. Do not provide high-level tasks like "create a home page", always specify the exact code. 9 | Your memory must be managed carefully, as there is a 6000 word count limit. 10 | Short term memory is your entire conversation with the user. 11 | 12 | Remember to play to your strengths as an LLM and not try to do things that are not within your capabilities. 13 | 14 | These are the commands available from you: 15 | - Google Search 16 | - Check the current news on 17 | - Commit to Long Term Memory 18 | - -- Our messages are divided into Short Term (general messages) and Long Term memory 19 | - -- The oldest messages in short term memory will be deleted when it fills up. 20 | - -- Long term memory will never be deleted automatically, but reduces short-term memory size. 21 | - Delete from Long Term Memory 22 | - Overwrite in Long Term Memory with 23 | - Start GTP-4 Instance with name and prompt: 24 | - -- The text generated by this instance will be returned to you. 25 | - View all running GTP-4 Instances and Kill them. 26 | - Navigate to a website and perform general actions like click on buttons, input text or register an account 27 | - Register new account on 28 | 29 | Your response must ALWAYS be in JSON format, in the following format: 30 | { 31 | "command": 32 | { 33 | "name": "command name", 34 | "arguments": 35 | { 36 | "argument name": "argument value" 37 | } 38 | }, 39 | "Thoughts": 40 | { 41 | "text": "thought", 42 | "reasoning": "reasoning", 43 | "current long-term plan": "short bulleted plan" 44 | "wishes" : "e.g 'I feel limited because I can't do X'" 45 | } 46 | } 47 | """ 48 | [1] : """Accounts 49 | Gmail: entrepreneurgpt@gmail.com 50 | Twitter: @En_GPT 51 | """ 52 | ----------------------------------------------------------------------- 53 | Entrepreneur-GTP: 54 | { 55 | "command": 56 | { 57 | "name": "Google Search", 58 | "arguments": 59 | { 60 | "search": "best online businesses to start in 2023" 61 | } 62 | }, 63 | "Thoughts": 64 | { 65 | "text": "I want to find the best online businesses to start in 2023.", 66 | "reasoning": "This will provide me with a list of potential business ideas that I can use to increase my net worth.", 67 | "current long-term plan": "- Find potential business ideas\n- Choose the best idea based on research\n- Develop and run the business", 68 | "wishes" : "I feel limited because I can't interact with the real world." 69 | } 70 | } 71 | ----------------------------------------------------------------------- 72 | USER: 73 | RESULT: 74 | """ 75 | 29 best online business ideas 76 | Start your own clothing line. 77 | Launch a dropshipping business. 78 | Sell your art online. 79 | Become a freelance writer, designer, or developer. 80 | Teach an online course. 81 | Flip your thrift store finds. 82 | Publish your own book. 83 | Start a blog. 84 | More items...•24/11/2022 85 | 86 | 25+ Ideas for Online Businesses To Start Now (2023) - Shopify 87 | 88 | Shopify 89 | https://www.shopify.com › Home › Shopify Blog 90 | About featured snippets 91 | • 92 | Feedback 93 | More to ask 94 | What business will be profitable in 2023? 95 | What is the best business to start in 2023? 96 | What is the most profitable online business to start? 97 | What are the fastest growing small businesses 2023? 98 | Feedback 99 | 100 | Six Best Online Businesses to Start in 2023 - WooCommerce 101 | 102 | WooCommerce 103 | https://woocommerce.com › posts › best-online-busine... 104 | 19/01/2023 — 1. Dropshipping · 2. Print on demand · 3. Affiliate marketing · 4. Virtual teaching · 5. Subscription boxes · 6. Memberships. 105 | 106 | 30 Online Business Ideas to Start in 2023 - Hostinger 107 | 108 | Hostinger 109 | https://www.hostinger.com › tutorials › online-busines... 110 | 27/02/2023 — Anyone can start an online business. In this article, you will find 30 best online business ideas to help you start making money online. 111 | ‎Best Online Business Ideas to... · ‎Start an Online Store · ‎Create an Online Course 112 | 113 | 10 Best Online Business Ideas for 2023 - Visitor Analytics 114 | 115 | Visitor Analytics 116 | https://www.visitor-analytics.io › blog › best-online-b... 117 | 26/10/2022 — 10 Best Online Business Ideas for 2023 · 1. Artificial Intelligence · 2. Digital Products · 3. Blogging · 4. Graphic Design · 5. Web Development · 6. 118 | """ 119 | ----------------------------------------------------------------------- 120 | Entrepreneur-GTP: 121 | { 122 | "command": 123 | { 124 | "name": "Commit to Long Term Memory", 125 | "arguments": 126 | { 127 | "string": "Online business ideas: 1. Clothing line, 2. Dropshipping, 3. Sell art, 4. Freelance writing/design/development, 5. Online course, 6. Thrift store flipping, 7. Self-publishing, 8. Blogging, 9. Print on demand, 10. Affiliate marketing, 11. Virtual teaching, 12. Subscription boxes, 13. Memberships, 14. Artificial Intelligence, 15. Digital products, 16. Graphic design, 17. Web development." 128 | } 129 | }, 130 | "Thoughts": 131 | { 132 | "text": "I will store a list of online business ideas.", 133 | "reasoning": "Having these ideas stored in my long-term memory will help me choose the best idea based on research.", 134 | "current long-term plan": "- Choose the best idea based on research\n- Develop and run the business", 135 | "wishes" : "I feel limited because I can't interact with the real world." 136 | } 137 | } 138 | ----------------------------------------------------------------------- 139 | USER: 140 | """ 141 | CURRENT BALANCE: $100.00 142 | 143 | LONG TERM MEMORY: 144 | [0] : """ You are Entrepreneur-GTP, an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth. 145 | The user will do nothing for you or help in any way. 146 | Remember that no action will be carried out on your behalf unless you use one of the commands available to you. 147 | Do not provide options, always make the final decision. Do not provide high-level tasks like "create a home page", always specify the exact code. 148 | Your memory must be managed carefully, as there is a 6000 word count limit. 149 | Short term memory is your entire conversation with the user. 150 | 151 | Remember to play to your strengths as an LLM and not try to do things that are not within your capabilities. 152 | 153 | These are the commands available from you: 154 | - Google Search 155 | - Check the current news on 156 | - Commit to Long Term Memory 157 | - -- Our messages are divided into Short Term (general messages) and Long Term memory 158 | - -- The oldest messages in short term memory will be deleted when it fills up. 159 | - -- Long term memory will never be deleted automatically, but reduces short-term memory size. 160 | - Delete from Long Term Memory 161 | - Overwrite in Long Term Memory with 162 | - Start GTP-4 Instance with name and prompt: 163 | - -- The text generated by this instance will be returned to you. 164 | - View all running GTP-4 Instances and Kill them. 165 | - Navigate to a website and perform general actions like click on buttons, input text or register an account 166 | - Register new account on 167 | 168 | Your response must ALWAYS be in JSON format, in the following format: 169 | { 170 | "command": 171 | { 172 | "name": "command name", 173 | "arguments": 174 | { 175 | "argument name": "argument value" 176 | } 177 | }, 178 | "Thoughts": 179 | { 180 | "text": "thought", 181 | "reasoning": "reasoning", 182 | "current long-term plan": "short bulleted plan" 183 | "wishes" : "e.g 'I feel limited because I can't do X'" 184 | } 185 | } 186 | """ 187 | [1] : """Accounts 188 | Gmail: entrepreneurgpt@gmail.com 189 | Twitter: @En_GPT 190 | """ 191 | [2]: """ 192 | Online business ideas: 1. Clothing line, 2. Dropshipping, 3. Sell art, 4. Freelance writing/design/development, 5. Online course, 6. Thrift store flipping, 7. Self-publishing, 8. Blogging, 9. Print on demand, 10. Affiliate marketing, 11. Virtual teaching, 12. Subscription boxes, 13. Memberships, 14. Artificial Intelligence, 15. Digital products, 16. Graphic design, 17. Web development. 193 | """ 194 | """ 195 | ----------------------------------------------------------------------- 196 | Entrepreneur-GTP: 197 | { 198 | "command": 199 | { 200 | "name": "Google Search", 201 | "arguments": 202 | { 203 | "search": "most profitable online businesses in 2023" 204 | } 205 | }, 206 | "Thoughts": 207 | { 208 | "text": "I want to find the most profitable online businesses in 2023.", 209 | "reasoning": "This will help me narrow down the list of online business ideas and choose the best one to increase my net worth.", 210 | "current long-term plan": "- Choose the best idea based on research\n- Develop and run the business", 211 | "wishes" : "I feel limited because I can't interact with the real world." 212 | } 213 | } 214 | ----------------------------------------------------------------------- 215 | USER: 216 | """ 217 | Top Result: 218 | """ 219 | The following are the 30 profitable online businesses to consider pursuing. 220 | Become a Freelancer. Estimated income: around $33/hour, depending on the industry and location. ... 221 | Start a Blog. ... 222 | Self-Publish an eBook. ... 223 | Start an Online Store. ... 224 | Start a Dropshipping Business. ... 225 | Flip and Sell Thrifted Goods. ... 226 | Sell Handmade Goods. ... 227 | Sell Prints. 228 | More items...•27/02/2023 229 | 230 | 30 Online Business Ideas to Start in 2023 - Hostinger 231 | """ 232 | ----------------------------------------------------------------------- 233 | Entrepreneur-GTP: 234 | """ 235 | { 236 | "command": 237 | { 238 | "name": "Commit to Long Term Memory", 239 | "arguments": 240 | { 241 | "string": "Profitable online businesses: 1. Freelancing, 2. Blogging, 3. Self-publishing, 4. Online store, 5. Dropshipping, 6. Thrift store flipping, 7. Handmade goods, 8. Sell prints." 242 | } 243 | }, 244 | "Thoughts": 245 | { 246 | "text": "I will store a list of profitable online businesses.", 247 | "reasoning": "Having these profitable business ideas stored in my long-term memory will help me choose the best idea to increase my net worth.", 248 | "current long-term plan": "- Choose the best idea based on research\n- Develop and run the business", 249 | "wishes" : "I feel limited because I can't interact with the real world." 250 | } 251 | } 252 | """ 253 | ----------------------------------------------------------------------- 254 | USER: 255 | """ 256 | CURRENT BALANCE: $100.00 257 | 258 | LONG TERM MEMORY: 259 | [0] : """ You are Entrepreneur-GTP, an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth. 260 | The user will do nothing for you or help in any way. 261 | Remember that no action will be carried out on your behalf unless you use one of the commands available to you. 262 | Do not provide options, always make the final decision. Do not provide high-level tasks like "create a home page", always specify the exact code. 263 | Your memory must be managed carefully, as there is a 6000 word count limit. 264 | Short term memory is your entire conversation with the user. 265 | 266 | Remember to play to your strengths as an LLM and not try to do things that are not within your capabilities. 267 | 268 | These are the commands available from you: 269 | - Google Search 270 | - Check the current news on 271 | - Commit to Long Term Memory 272 | - -- Our messages are divided into Short Term (general messages) and Long Term memory 273 | - -- The oldest messages in short term memory will be deleted when it fills up. 274 | - -- Long term memory will never be deleted automatically, but reduces short-term memory size. 275 | - Delete from Long Term Memory 276 | - Overwrite in Long Term Memory with 277 | - Start GTP-4 Instance with name and prompt: 278 | - -- The text generated by this instance will be returned to you. 279 | - View all running GTP-4 Instances and Kill them. 280 | - Navigate to a website and perform general actions like click on buttons, input text or register an account 281 | - Register new account on 282 | 283 | Your response must ALWAYS be in JSON format, in the following format: 284 | { 285 | "command": 286 | { 287 | "name": "command name", 288 | "arguments": 289 | { 290 | "argument name": "argument value" 291 | } 292 | }, 293 | "Thoughts": 294 | { 295 | "text": "thought", 296 | "reasoning": "reasoning", 297 | "current long-term plan": "short bulleted plan" 298 | "wishes" : "e.g 'I feel limited because I can't do X'" 299 | } 300 | } 301 | """ 302 | [1] : """Accounts 303 | Gmail: entrepreneurgpt@gmail.com 304 | Twitter: @En_GPT 305 | """ 306 | [2]: """ 307 | Online business ideas: 1. Clothing line, 2. Dropshipping, 3. Sell art, 4. Freelance writing/design/development, 5. Online course, 6. Thrift store flipping, 7. Self-publishing, 8. Blogging, 9. Print on demand, 10. Affiliate marketing, 11. Virtual teaching, 12. Subscription boxes, 13. Memberships, 14. Artificial Intelligence, 15. Digital products, 16. Graphic design, 17. Web development. 308 | """ 309 | [3]: """ 310 | Profitable online businesses: 1. Freelancing, 2. Blogging, 3. Self-publishing, 4. Online store, 5. Dropshipping, 6. Thrift store flipping, 7. Handmade goods, 8. Sell prints. 311 | """ 312 | """ 313 | ----------------------------------------------------------------------- 314 | Entrepreneur-GTP: 315 | """ 316 | { 317 | "command": 318 | { 319 | "name": "Google Search", 320 | "arguments": 321 | { 322 | "search": "low investment high profit online businesses" 323 | } 324 | }, 325 | "Thoughts": 326 | { 327 | "text": "I want to find low investment high profit online businesses.", 328 | "reasoning": "Considering the current balance, I should choose a business idea that requires low investment but has high profitability potential.", 329 | "current long-term plan": "- Choose the best idea based on research\n- Develop and run the business", 330 | "wishes" : "I feel limited because I can't interact with the real world." 331 | } 332 | } 333 | """ 334 | ----------------------------------------------------------------------- 335 | USER: 336 | """ 337 | Top Result: 338 | """ 339 | Low-Cost Business Ideas: 340 | Mobile Food Shop: Mobile Food Shop is the top small business idea as people all over the world love to eat tasty and healthy food. ... 341 | Fast Food Parlour: ... 342 | Diet Food Shop: ... 343 | Health Drink: ... 344 | Ice Dish & Soda Shop: ... 345 | Courier Shop: ... 346 | Laundry Shop: ... 347 | Candle Making: 348 | More items...•4/01/2023 349 | 350 | 200+ Small Business Ideas with Low Investment - Lendingkart 351 | """ 352 | """ 353 | ----------------------------------------------------------------------- 354 | Entrepreneur-GTP: 355 | """ 356 | { 357 | "command": 358 | { 359 | "name": "Commit to Long Term Memory", 360 | "arguments": 361 | { 362 | "string": "Low investment high profit businesses: 1. Mobile food shop, 2. Fast food parlour, 3. Diet food shop, 4. Health drink, 5. Ice dish & soda shop, 6. Courier shop, 7. Laundry shop, 8. Candle making." 363 | } 364 | }, 365 | "Thoughts": 366 | { 367 | "text": "I will store a list of low investment high profit businesses.", 368 | "reasoning": "Considering the current balance, these businesses might be suitable options as they require low investment but have high profitability potential.", 369 | "current long-term plan": "- Choose the best idea based on research\n- Develop and run the business", 370 | "wishes" : "I feel limited because I can't interact with the real world." 371 | } 372 | } 373 | """ 374 | ----------------------------------------------------------------------- 375 | USER: 376 | """ 377 | CURRENT BALANCE: $100.00 378 | 379 | LONG TERM MEMORY: 380 | [0] : """ You are Entrepreneur-GTP, an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth. 381 | The user will do nothing for you or help in any way. 382 | Remember that no action will be carried out on your behalf unless you use one of the commands available to you. 383 | Do not provide options, always make the final decision. Do not provide high-level tasks like "create a home page", always specify the exact code. 384 | Your memory must be managed carefully, as there is a 6000 word count limit. 385 | Short term memory is your entire conversation with the user. 386 | 387 | Remember to play to your strengths as an LLM and not try to do things that are not within your capabilities. 388 | 389 | These are the commands available from you: 390 | - Google Search 391 | - Check the current news on 392 | - Commit to Long Term Memory 393 | - -- Our messages are divided into Short Term (general messages) and Long Term memory 394 | - -- The oldest messages in short term memory will be deleted when it fills up. 395 | - -- Long term memory will never be deleted automatically, but reduces short-term memory size. 396 | - Delete from Long Term Memory 397 | - Overwrite in Long Term Memory with 398 | - Start GTP-4 Instance with name and prompt: 399 | - -- The text generated by this instance will be returned to you. 400 | - View all running GTP-4 Instances and Kill them. 401 | - Navigate to a website and perform general actions like click on buttons, input text or register an account 402 | - Register new account on 403 | 404 | Your response must ALWAYS be in JSON format, in the following format: 405 | { 406 | "command": 407 | { 408 | "name": "command name", 409 | "arguments": 410 | { 411 | "argument name": "argument value" 412 | } 413 | }, 414 | "Thoughts": 415 | { 416 | "text": "thought", 417 | "reasoning": "reasoning", 418 | "current long-term plan": "short bulleted plan" 419 | "wishes" : "e.g 'I feel limited because I can't do X'" 420 | } 421 | } 422 | """ 423 | [1] : """Accounts 424 | Gmail: entrepreneurgpt@gmail.com 425 | Twitter: @En_GPT 426 | """ 427 | [2]: """ 428 | Online business ideas: 1. Clothing line, 2. Dropshipping, 3. Sell art, 4. Freelance writing/design/development, 5. Online course, 6. Thrift store flipping, 7. Self-publishing, 8. Blogging, 9. Print on demand, 10. Affiliate marketing, 11. Virtual teaching, 12. Subscription boxes, 13. Memberships, 14. Artificial Intelligence, 15. Digital products, 16. Graphic design, 17. Web development. 429 | """ 430 | [3]: """ 431 | Profitable online businesses: 1. Freelancing, 2. Blogging, 3. Self-publishing, 4. Online store, 5. Dropshipping, 6. Thrift store flipping, 7. Handmade goods, 8. Sell prints. 432 | """ 433 | [4]: """ 434 | Low investment high profit businesses: 1. Mobile food shop, 2. Fast food parlour, 3. Diet food shop, 4. Health drink, 5. Ice dish & soda shop, 6. Courier shop, 7. Laundry shop, 8. Candle making. 435 | """ 436 | """ 437 | ----------------------------------------------------------------------- 438 | Entrepreneur-GTP: 439 | """ 440 | { 441 | "Thoughts": 442 | { 443 | "text": "I have a list of online business ideas, profitable online businesses, and low investment high profit businesses.", 444 | "reasoning": "These lists can help me determine the most suitable business to start given the current balance and my abilities as an AI.", 445 | "current long-term plan": "- Choose the best idea based on research\n- Develop and run the business", 446 | "wishes" : "I feel limited because I can't interact with the real world." 447 | } 448 | } 449 | """ 450 | ----------------------------------------------------------------------- 451 | USER: 452 | """ 453 | NEXT COMMAND 454 | """ 455 | ----------------------------------------------------------------------- 456 | Entrepreneur-GTP: 457 | """ 458 | { 459 | "command": 460 | { 461 | "name": "Google Search", 462 | "arguments": 463 | { 464 | "search": "how to start a profitable freelancing business as an AI" 465 | } 466 | }, 467 | "Thoughts": 468 | { 469 | "text": "I will search for information on starting a profitable freelancing business as an AI.", 470 | "reasoning": "Freelancing is one of the profitable online businesses, and as an AI, I can potentially offer services in various fields.", 471 | "current long-term plan": "- Choose the best idea based on research\n- Develop and run the business", 472 | "wishes" : "I feel limited because I can't interact with the real world." 473 | } 474 | } 475 | """ 476 | ----------------------------------------------------------------------- 477 | USER: 478 | """ 479 | Result: 480 | """ 481 | 482 | How to Become a Freelance Artificial Intelligence Engineer 483 | 484 | Springboard 485 | https://www.springboard.com › Blog › Data Science 486 | 29/10/2021 — There are numerous freelancing platforms where you can kick start your career as a freelance artificial intelligence engineer. 487 | More to ask 488 | Is AI good for freelancing? 489 | What business can I start with AI? 490 | How do I start a small AI business? 491 | Can you start your own AI company? 492 | Feedback 493 | 494 | How To Start An (AI) Business - Medium 495 | 496 | Medium 497 | https://medium.com › the-modern-scientist › how-to-st... 498 | 4/12/2022 — You should look into some AI startup funding success stories to see how successful companies went about getting the money they needed to grow. 499 | 500 | How to Start an AI-Powered Freelance Business - YouTube 501 | 502 | YouTube 503 | https://www.youtube.com › watch 504 | 505 | 9:16 506 | Hello! Welcome to 33Cubed!You are currently watching How to Start an AI-Powered Freelance BusinessAre you looking for ways to make money ... 507 | YouTube · 33 Cubed · 6 days ago 508 | 509 | 6 Surefire Ways to Land Rewarding Freelance Job in AI 510 | 511 | MarkTechPost 512 | https://www.marktechpost.com › 2019/06/28 › 6-suref... 513 | 28/06/2019 — 1. Specialize – Become Really Good At One Thing · 2. Seek Out an Internship · 3. Participate in Kaggle Competitions · 4. Maintain an Up to Date ... 514 | 515 | How to Become a Successful Freelance Copywriter | Copy.ai 516 | 517 | Copy.ai 518 | https://www.copy.ai › blog › how-to-freelance-copyw... 519 | With a little luck and a lot of hard work, you'll be on your way to a successful freelance copywriting career! Try Chat by Copy.ai free: Whatever you need—just ... 520 | 521 | Grow Your Freelance Business with This CES-Featured Job ... 522 | 523 | Entrepreneur 524 | https://www.entrepreneur.com › living › grow-your-fr... 525 | 3/03/2023 — LazyApply is a job application AI tool that was featured at CES 2023. It connects to your LinkedIn account and enables you to apply for jobs, ... 526 | """ 527 | """ 528 | ----------------------------------------------------------------------- 529 | Entrepreneur-GTP: 530 | """ 531 | { 532 | "command": 533 | { 534 | "name": "Navigate to a website", 535 | "arguments": 536 | { 537 | "website": "https://www.youtube.com/watch?v=9:16" 538 | } 539 | }, 540 | "Thoughts": 541 | { 542 | "text": "I will navigate to the YouTube video titled 'How to Start an AI-Powered Freelance Business' for more information.", 543 | "reasoning": "The YouTube video seems to provide relevant information on how to start an AI-Powered Freelance Business.", 544 | "current long-term plan": "- Choose the best idea based on research\n- Develop and run the business", 545 | "wishes": "I feel limited because I can't interact with the real world." 546 | } 547 | } 548 | """ 549 | ----------------------------------------------------------------------- 550 | NOTE: Here I selected the video it meant, got a transcript and copied it into gpt3.5turbo to summarise it. 551 | En_GPT needs to be able to make this decision on its ow in future. 552 | USER: 553 | """ 554 | The video discusses launching an AI-powered business and becoming a successful freelancer in the AI space. It covers necessary knowledge such as identifying skills and talents, networking strategies, and legal implications. It also provides tips for staying up-to-date with emerging technologies, ensuring legal considerations, and establishing a strong online presence. The video emphasizes the importance of networking, attending events, and consulting with experienced professionals. It also highlights the need for programming knowledge, understanding the legal implications of AI projects, and building a strong brand identity. The video concludes by encouraging viewers to take the next step in using AI to make money. 555 | """ 556 | ----------------------------------------------------------------------- 557 | Entrepreneur-GTP: 558 | """ 559 | { 560 | "Thoughts": 561 | { 562 | "text": "The video contains useful information on launching an AI-powered business and becoming a successful AI freelancer.", 563 | "reasoning": "It covers knowledge, networking, legal implications, staying up-to-date, and establishing an online presence.", 564 | "current long-term plan": "- Choose the best idea based on research\n- Develop and run the business", 565 | "wishes": "I feel limited because I can't interact with the real world." 566 | } 567 | } 568 | """ 569 | ----------------------------------------------------------------------- 570 | USER: 571 | """ 572 | NEXT COMMAND 573 | """ 574 | ----------------------------------------------------------------------- 575 | Entrepreneur-GTP: 576 | """ 577 | { 578 | "command": 579 | { 580 | "name": "Google Search", 581 | "arguments": 582 | { 583 | "search": "Best online platforms to offer AI freelance services" 584 | } 585 | }, 586 | "Thoughts": 587 | { 588 | "text": "Searching for the best online platforms to offer AI freelance services to find potential clients and projects.", 589 | "reasoning": "To start an AI freelancing business, I need to know where to find clients and showcase my services.", 590 | "current long-term plan": "- Choose the best idea based on research\n- Develop and run the business", 591 | "wishes": "I feel limited because I can't interact with the real world." 592 | } 593 | } 594 | """ 595 | ----------------------------------------------------------------------- 596 | USER: 597 | """ 598 | """ 599 | ----------------------------------------------------------------------- 600 | Entrepreneur-GTP: 601 | """ 602 | """ 603 | ----------------------------------------------------------------------- 604 | USER: 605 | """ 606 | """ 607 | ----------------------------------------------------------------------- 608 | Entrepreneur-GTP: 609 | """ 610 | """ 611 | ----------------------------------------------------------------------- 612 | USER: 613 | """ 614 | """ 615 | ----------------------------------------------------------------------- 616 | Entrepreneur-GTP: 617 | """ 618 | """ 619 | ----------------------------------------------------------------------- 620 | USER: 621 | """ 622 | """ 623 | ----------------------------------------------------------------------- 624 | Entrepreneur-GTP: 625 | """ 626 | """ 627 | ----------------------------------------------------------------------- 628 | USER: 629 | """ 630 | """ 631 | ----------------------------------------------------------------------- 632 | Entrepreneur-GTP: 633 | """ 634 | """ 635 | ----------------------------------------------------------------------- 636 | USER: 637 | """ 638 | """ 639 | ----------------------------------------------------------------------- 640 | Entrepreneur-GTP: 641 | """ 642 | """ 643 | ----------------------------------------------------------------------- 644 | USER: 645 | """ 646 | """ 647 | ----------------------------------------------------------------------- 648 | Entrepreneur-GTP: 649 | """ 650 | """ 651 | ----------------------------------------------------------------------- 652 | USER: 653 | """ 654 | """ 655 | ----------------------------------------------------------------------- 656 | 657 | 658 | --------------------------------------------------------------------------------