├── .editorconfig ├── .flake8 ├── .github ├── CODEOWNERS └── workflows │ ├── ci.yml │ └── test-plugin-installation.yml ├── .gitignore ├── .pre-commit-config.yaml ├── LICENSE ├── Makefile ├── README.md ├── helpers.sh ├── pylintrc ├── pyproject.toml ├── requirements.txt ├── run_pylint.py ├── sourcery.yaml └── src └── autogpt_plugins ├── __init__.py ├── api_tools ├── README.md ├── __init__.py ├── api_tools.py └── test_api_tools.py ├── astro ├── README.md ├── __init__.py ├── astronauts.py └── test_astro_plugin.py ├── baidu_search ├── README.md ├── README.zh.md ├── __init__.py ├── baidu_search.py ├── screenshots │ └── baidu_cookie.png └── test_auto_gpt_baidu_plugin.py ├── bing_search ├── README.md ├── README.zh.md ├── __init__.py ├── bing_search.py ├── screenshots │ └── azure_api.png └── test_auto_gpt_bing.py ├── bluesky ├── README.md ├── __init__.py └── bluesky_plugin │ ├── __init__.py │ ├── bluesky_plugin.py │ └── test_bluesky_plugin.py ├── create_plugins_here ├── email ├── README.md ├── __init__.py └── email_plugin │ ├── email_plugin.py │ └── test_email_plugin.py ├── news_search ├── README.md ├── __init__.py ├── news_search.py └── test_auto_gpt_news_search.py ├── planner ├── README.md ├── __init__.py └── planner.py ├── random_values ├── README.md ├── __init__.py ├── random_values.py └── test_random_valaues.py ├── scenex ├── README.md ├── __init__.py ├── scenex_plugin.py └── test_scenex_plugin.py ├── serpapi ├── README.md ├── __init__.py ├── serpapi_search.py └── test_serpapi_plugin.py ├── telegram ├── README.md ├── __init__.py └── telegram_chat.py ├── twitter ├── README.md ├── __init__.py └── twitter.py ├── wikipedia_search ├── README.md ├── __init__.py └── wikipedia_search.py └── wolframalpha_search ├── README.md ├── __init__.py ├── test_wolframalpha_search.py └── wolframalpha_search.py /.editorconfig: -------------------------------------------------------------------------------- 1 | # Top-most EditorConfig file 2 | root = true 3 | 4 | # Set default charset 5 | [*] 6 | charset = utf-8 7 | 8 | # Use black formatter for python files 9 | [*.py] 10 | profile = black 11 | 12 | # Set defaults for windows and batch filess 13 | [*.bat] 14 | end_of_line = crlf 15 | indent_style = space 16 | indent_size = 2 17 | 18 | # Set defaults for shell scripts 19 | [*.sh] 20 | end_of_line = lf 21 | trim_trailing_whitespace = true 22 | insert_final_newline = false 23 | 24 | # Set defaults for Makefiles 25 | [Makefile] 26 | end_of_line = lf 27 | indent_style = tab 28 | indent_size = 4 29 | trim_trailing_whitespace = true 30 | insert_final_newline = true -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 88 3 | extend-ignore = E203 4 | exclude = 5 | .tox, 6 | __pycache__, 7 | *.pyc, 8 | .env 9 | venv/* 10 | .venv/* 11 | reports/* 12 | dist/* 13 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @Significant-Gravitas/auto-gpt-source @Significant-Gravitas/plugins 2 | /src/autogpt_plugins/api_tools @sidewaysthought 3 | /src/autogpt_plugins/astro @sr5434 4 | /src/autogpt_plugins/baidu_search @ForestLinSen 5 | /src/autogpt_plugins/bing_search @ForestLinSen 6 | /src/autogpt_plugins/bluesky @hermanschutte 7 | /src/autogpt_plugins/email @riensen 8 | /src/autogpt_plugins/news_search @PalAditya 9 | /src/autogpt_plugins/random_values @sidewaysthought 10 | /src/autogpt_plugins/scenex @delgermurun 11 | /src/autogpt_plugins/telegram @wladastic 12 | /src/autogpt_plugins/twitter @desojo 13 | /src/autogpt_plugins/wikipedia_search @pierluigi-failla 14 | /src/autogpt_plugins/wolframalpha_search @pierluigi-failla 15 | /src/autogpt_plugins/serpapi @zyc9012 16 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Python CI 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | concurrency: 10 | group: ${{ format('ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }} 11 | cancel-in-progress: ${{ github.event_name == 'pull_request' }} 12 | 13 | jobs: 14 | test: 15 | runs-on: ubuntu-latest 16 | strategy: 17 | matrix: 18 | python-version: ["3.10", "3.11"] 19 | 20 | steps: 21 | - name: Check out repository 22 | uses: actions/checkout@v3 23 | 24 | - name: Set up Python ${{ matrix.python-version }} 25 | uses: actions/setup-python@v2 26 | with: 27 | python-version: ${{ matrix.python-version }} 28 | 29 | - name: Install dependencies 30 | run: | 31 | python -m pip install --upgrade pip 32 | pip install -r requirements.txt 33 | 34 | - name: Run unittest tests with coverage 35 | run: | 36 | pytest --cov=autogpt_plugins --cov-report term-missing --cov-branch --cov-report xml --cov-report term 37 | env: 38 | CI: true 39 | 40 | - name: Upload coverage reports to Codecov 41 | uses: codecov/codecov-action@v3 42 | -------------------------------------------------------------------------------- /.github/workflows/test-plugin-installation.yml: -------------------------------------------------------------------------------- 1 | name: Test installation of plugins against the PR 2 | on: 3 | push: 4 | branches: [ master ] 5 | pull_request: 6 | branches: [ master ] 7 | 8 | jobs: 9 | test-plugin-installation: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Install Kurtosis 13 | run: | 14 | echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list 15 | sudo apt update 16 | sudo apt install kurtosis-cli 17 | - name: Install and run different plugins 18 | run: | 19 | set -euo pipefail 20 | plugins_to_test=("AutoGPTTwitter AutoGPTEmailPlugin AutoGPTSceneXPlugin AutoGPTBingSearch AutoGPTNewsSearch AutoGPTWikipediaSearch AutoGPTApiTools AutoGPTRandomValues AutoGPTSpacePlugin AutoGPTBaiduSearch AutoGPTBluesky PlannerPlugin") 21 | for plugin in $plugins_to_test; do 22 | kurtosis run github.com/kurtosis-tech/autogpt-package '{"OPENAI_API_KEY": "test", "ALLOWLISTED_PLUGINS": '\"$plugin\"', "__skip_env_vars_validation": "True", "__skip_env_vars_default_values_set": "True", "__plugin_branch_to_use": '\"${{ github.head_ref}}\"', "__plugin_repo_to_use":'\"${{ github.event.pull_request.head.repo.full_name }}\"'}' 23 | done 24 | 25 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # Dev Environments 132 | .idea 133 | .devcontainer/devcontainer.json 134 | .vscode/settings.json 135 | 136 | # Mac OS Files 137 | **/.DS_Store 138 | /.vs 139 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/sourcery-ai/sourcery 3 | rev: v1.1.0 # Get the latest tag from https://github.com/sourcery-ai/sourcery/tags 4 | hooks: 5 | - id: sourcery 6 | 7 | - repo: git://github.com/pre-commit/pre-commit-hooks 8 | rev: v0.9.2 9 | hooks: 10 | - id: check-added-large-files 11 | args: ["--maxkb=500"] 12 | - id: check-byte-order-marker 13 | - id: check-case-conflict 14 | - id: check-merge-conflict 15 | - id: check-symlinks 16 | - id: debug-statements 17 | 18 | - repo: local 19 | hooks: 20 | - id: isort 21 | name: isort-local 22 | entry: isort 23 | language: python 24 | types: [python] 25 | exclude: .+/(dist|.venv|venv|build)/.+ 26 | pass_filenames: true 27 | - id: black 28 | name: black-local 29 | entry: black 30 | language: python 31 | types: [python] 32 | exclude: .+/(dist|.venv|venv|build)/.+ 33 | pass_filenames: true 34 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Toran Bruce Richards 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | ifeq ($(OS),Windows_NT) 2 | os := win 3 | SCRIPT_EXT := .bat 4 | SHELL_CMD := cmd /C 5 | else 6 | os := nix 7 | SCRIPT_EXT := .sh 8 | SHELL_CMD := bash 9 | endif 10 | 11 | helpers = @$(SHELL_CMD) helpers$(SCRIPT_EXT) $1 12 | 13 | clean: helpers$(SCRIPT_EXT) 14 | $(call helpers,clean) 15 | 16 | qa: helpers$(SCRIPT_EXT) 17 | $(call helpers,qa) 18 | 19 | style: helpers$(SCRIPT_EXT) 20 | $(call helpers,style) 21 | 22 | .PHONY: clean qa style 23 | -------------------------------------------------------------------------------- /helpers.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | clean() { 4 | # Remove build artifacts and temporary files 5 | rm -rf build 2>/dev/null || true 6 | rm -rf dist 2>/dev/null || true 7 | rm -rf __pycache__ 2>/dev/null || true 8 | rm -rf *.egg-info 2>/dev/null || true 9 | rm -rf **/*.egg-info 2>/dev/null || true 10 | rm -rf *.pyc 2>/dev/null || true 11 | rm -rf **/*.pyc 2>/dev/null || true 12 | rm -rf reports 2>/dev/null || true 13 | } 14 | 15 | qa() { 16 | # Run static analysis tools 17 | flake8 . 18 | python run_pylint.py 19 | } 20 | 21 | style() { 22 | # Format code 23 | isort . 24 | black --exclude=".*\/*(dist|venv|.venv|test-results)\/*.*" . 25 | } 26 | 27 | if [ "$1" = "clean" ]; then 28 | echo Removing build artifacts and temporary files... 29 | clean 30 | elif [ "$1" = "qa" ]; then 31 | echo Running static analysis tools... 32 | qa 33 | elif [ "$1" = "style" ]; then 34 | echo Running code formatters... 35 | style 36 | else 37 | echo "Usage: $0 [clean|qa|style]" 38 | exit 1 39 | fi 40 | 41 | echo Done! 42 | echo 43 | exit 0 -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["hatchling"] 3 | build-backend = "hatchling.build" 4 | 5 | [project] 6 | name = "agpt_plugins" 7 | version = "0.0.2" 8 | authors = [ 9 | { name="Torantulino", email="support@agpt.co" }, { name="Riensen", email="3340218+riensen@users.noreply.github.com" } 10 | ] 11 | description = "The plugins for Auto-GPT." 12 | readme = "README.md" 13 | requires-python = ">=3.8" 14 | classifiers = [ 15 | "Programming Language :: Python :: 3", 16 | "License :: OSI Approved :: MIT License", 17 | "Operating System :: OS Independent", 18 | ] 19 | dependencies = ["abstract-singleton"] 20 | 21 | [project.urls] 22 | "Homepage" = "https://github.com/Significant-Gravitas/Auto-GPT-Plugins" 23 | "Bug Tracker" = "https://github.com/Significant-Gravitas/Auto-GPT-Plugins" 24 | 25 | [tool.black] 26 | line-length = 88 27 | target-version = ['py38'] 28 | include = '\.pyi?$' 29 | extend-exclude = "" 30 | 31 | [tool.isort] 32 | profile = "black" 33 | 34 | [tool.pylint.messages_control] 35 | disable = "C0330, C0326" 36 | 37 | [tool.pylint.format] 38 | max-line-length = "88" -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | abstract-singleton 2 | atproto 3 | auto-gpt-plugin-template @ git+https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template@0.1.0 4 | black 5 | bs4 6 | build 7 | colorama 8 | flake8 9 | isort 10 | newsapi-python 11 | pandas 12 | pylint 13 | pytest 14 | pytest-cov 15 | python-lorem 16 | python-telegram-bot 17 | requests 18 | requests-mock 19 | setuptools 20 | tweepy==4.13.0 21 | twine 22 | validators 23 | wheel 24 | wolframalpha==5.0.0 25 | -------------------------------------------------------------------------------- /run_pylint.py: -------------------------------------------------------------------------------- 1 | """ 2 | https://stackoverflow.com/questions/49100806/ 3 | pylint-and-subprocess-run-returning-exit-status-28 4 | """ 5 | import subprocess 6 | 7 | cmd = " pylint src\\**\\*" 8 | try: 9 | subprocComplete = subprocess.run( 10 | cmd, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE 11 | ) 12 | print(subprocComplete.stdout.decode("utf-8")) 13 | except subprocess.CalledProcessError as err: 14 | print(err.output.decode("utf-8")) 15 | -------------------------------------------------------------------------------- /sourcery.yaml: -------------------------------------------------------------------------------- 1 | # 🪄 This is your project's Sourcery configuration file. 2 | 3 | # You can use it to get Sourcery working in the way you want, such as 4 | # ignoring specific refactorings, skipping directories in your project, 5 | # or writing custom rules. 6 | 7 | # 📚 For a complete reference to this file, see the documentation at 8 | # https://docs.sourcery.ai/Configuration/Project-Settings/ 9 | 10 | # This file was auto-generated by Sourcery on 2023-02-25 at 21:07. 11 | 12 | version: "1" # The schema version of this config file 13 | 14 | ignore: # A list of paths or files which Sourcery will ignore. 15 | - .git 16 | - venv 17 | - .venv 18 | - build 19 | - dist 20 | - env 21 | - .env 22 | - .tox 23 | 24 | rule_settings: 25 | enable: 26 | - default 27 | - gpsg 28 | disable: [] # A list of rule IDs Sourcery will never suggest. 29 | rule_types: 30 | - refactoring 31 | - suggestion 32 | - comment 33 | python_version: "3.9" # A string specifying the lowest Python version your project supports. Sourcery will not suggest refactorings requiring a higher Python version. 34 | 35 | # rules: # A list of custom rules Sourcery will include in its analysis. 36 | # - id: no-print-statements 37 | # description: Do not use print statements in the test directory. 38 | # pattern: print(...) 39 | # language: python 40 | # replacement: 41 | # condition: 42 | # explanation: 43 | # paths: 44 | # include: 45 | # - test 46 | # exclude: 47 | # - conftest.py 48 | # tests: [] 49 | # tags: [] 50 | 51 | # rule_tags: {} # Additional rule tags. 52 | 53 | # metrics: 54 | # quality_threshold: 25.0 55 | 56 | # github: 57 | # labels: [] 58 | # ignore_labels: 59 | # - sourcery-ignore 60 | # request_review: author 61 | # sourcery_branch: sourcery/{base_branch} 62 | 63 | # clone_detection: 64 | # min_lines: 3 65 | # min_duplicates: 2 66 | # identical_clones_only: false 67 | 68 | # proxy: 69 | # url: 70 | # ssl_certs_file: 71 | # no_ssl_verify: false 72 | -------------------------------------------------------------------------------- /src/autogpt_plugins/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT-Plugins/84cdb22ae683aff27204d7b472869a42b4982f92/src/autogpt_plugins/__init__.py -------------------------------------------------------------------------------- /src/autogpt_plugins/api_tools/README.md: -------------------------------------------------------------------------------- 1 | # API Tools Plugin 2 | 3 | The API Tools Plugin enables Auto-GPT to communicate with APIs. 4 | 5 | ## Key Features: 6 | - Supports GET, POST, PUT, DELETE, PATCH, HEAD and OPTIONS 7 | - Tries to recover from strange values being used as parameters 8 | - Accepts custom header values 9 | 10 | ## Installation: 11 | As part of the AutoGPT plugins package, follow the [installation instructions](https://github.com/Significant-Gravitas/Auto-GPT-Plugins) on the Auto-GPT-Plugins GitHub reporistory README page. 12 | 13 | ## AutoGPT Configuration 14 | Set `ALLOWLISTED_PLUGINS=AutoGPTApiTools,example-plugin1,example-plugin2,etc` in your AutoGPT `.env` file. 15 | -------------------------------------------------------------------------------- /src/autogpt_plugins/api_tools/__init__.py: -------------------------------------------------------------------------------- 1 | """API Tools for Autogpt.""" 2 | 3 | from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar 4 | 5 | from auto_gpt_plugin_template import AutoGPTPluginTemplate 6 | try: 7 | from .api_tools import ApiCallCommand 8 | except ImportError: 9 | from api_tools import ApiCallCommand 10 | 11 | 12 | PromptGenerator = TypeVar("PromptGenerator") 13 | 14 | class Message(TypedDict): 15 | """Message type.""" 16 | role: str 17 | content: str 18 | 19 | class AutoGPTApiTools(AutoGPTPluginTemplate): 20 | """ 21 | API Tools plugin for Autogpt. 22 | """ 23 | 24 | def __init__(self): 25 | super().__init__() 26 | self._name = "AutoGPTApiTools" 27 | self._version = "0.1.2" 28 | self._description = "Allow AutoGPT to make API calls to outside services." 29 | 30 | self.plugin_class = ApiCallCommand() 31 | 32 | def can_handle_on_response(self) -> bool: 33 | """This method is called to check that the plugin can 34 | handle the on_response method. 35 | Returns: 36 | bool: True if the plugin can handle the on_response method.""" 37 | return False 38 | 39 | def on_response(self, response: str, *args, **kwargs) -> str: 40 | """This method is called when a response is received from the model.""" 41 | return response 42 | 43 | def can_handle_post_prompt(self) -> bool: 44 | """This method is called to check that the plugin can 45 | handle the post_prompt method. 46 | Returns: 47 | bool: True if the plugin can handle the post_prompt method.""" 48 | return True 49 | 50 | def can_handle_on_planning(self) -> bool: 51 | """This method is called to check that the plugin can 52 | handle the on_planning method. 53 | Returns: 54 | bool: True if the plugin can handle the on_planning method.""" 55 | return False 56 | 57 | def on_planning( 58 | self, prompt: PromptGenerator, messages: List[str] 59 | ) -> Optional[str]: 60 | """This method is called before the planning chat completeion is done. 61 | Args: 62 | prompt (PromptGenerator): The prompt generator. 63 | messages (List[str]): The list of messages. 64 | """ 65 | 66 | def can_handle_post_planning(self) -> bool: 67 | """This method is called to check that the plugin can 68 | handle the post_planning method. 69 | Returns: 70 | bool: True if the plugin can handle the post_planning method.""" 71 | return False 72 | 73 | def post_planning(self, response: str) -> str: 74 | """This method is called after the planning chat completeion is done. 75 | Args: 76 | response (str): The response. 77 | Returns: 78 | str: The resulting response. 79 | """ 80 | return response 81 | 82 | def can_handle_pre_instruction(self) -> bool: 83 | """This method is called to check that the plugin can 84 | handle the pre_instruction method. 85 | Returns: 86 | bool: True if the plugin can handle the pre_instruction method.""" 87 | return False 88 | 89 | def pre_instruction(self, messages: List[str]) -> List[str]: 90 | """This method is called before the instruction chat is done. 91 | Args: 92 | messages (List[str]): The list of context messages. 93 | Returns: 94 | List[str]: The resulting list of messages. 95 | """ 96 | return messages 97 | 98 | def can_handle_on_instruction(self) -> bool: 99 | """This method is called to check that the plugin can 100 | handle the on_instruction method. 101 | Returns: 102 | bool: True if the plugin can handle the on_instruction method.""" 103 | return False 104 | 105 | def on_instruction(self, messages: List[str]) -> Optional[str]: 106 | """This method is called when the instruction chat is done. 107 | Args: 108 | messages (List[str]): The list of context messages. 109 | Returns: 110 | Optional[str]: The resulting message. 111 | """ 112 | 113 | def can_handle_post_instruction(self) -> bool: 114 | """This method is called to check that the plugin can 115 | handle the post_instruction method. 116 | Returns: 117 | bool: True if the plugin can handle the post_instruction method.""" 118 | return False 119 | 120 | def post_instruction(self, response: str) -> str: 121 | """This method is called after the instruction chat is done. 122 | Args: 123 | response (str): The response. 124 | Returns: 125 | str: The resulting response. 126 | """ 127 | return response 128 | 129 | def can_handle_pre_command(self) -> bool: 130 | """This method is called to check that the plugin can 131 | handle the pre_command method. 132 | Returns: 133 | bool: True if the plugin can handle the pre_command method.""" 134 | return False 135 | 136 | def pre_command( 137 | self, command_name: str, arguments: Dict[str, Any] 138 | ) -> Tuple[str, Dict[str, Any]]: 139 | """This method is called before the command is executed. 140 | Args: 141 | command_name (str): The command name. 142 | arguments (Dict[str, Any]): The arguments. 143 | Returns: 144 | Tuple[str, Dict[str, Any]]: The command name and the arguments. 145 | """ 146 | return command_name, arguments 147 | 148 | def can_handle_post_command(self) -> bool: 149 | """This method is called to check that the plugin can 150 | handle the post_command method. 151 | Returns: 152 | bool: True if the plugin can handle the post_command method.""" 153 | return False 154 | 155 | def post_command(self, command_name: str, response: str) -> str: 156 | """This method is called after the command is executed. 157 | Args: 158 | command_name (str): The command name. 159 | response (str): The response. 160 | Returns: 161 | str: The resulting response. 162 | """ 163 | return '' 164 | 165 | def can_handle_chat_completion( 166 | self, 167 | messages: list[Dict[Any, Any]], 168 | model: str, 169 | temperature: float, 170 | max_tokens: int, 171 | ) -> bool: 172 | """This method is called to check that the plugin can 173 | handle the chat_completion method. 174 | Args: 175 | messages (Dict[Any, Any]): The messages. 176 | model (str): The model name. 177 | temperature (float): The temperature. 178 | max_tokens (int): The max tokens. 179 | Returns: 180 | bool: True if the plugin can handle the chat_completion method.""" 181 | return False 182 | 183 | def handle_chat_completion( 184 | self, 185 | messages: list[Dict[Any, Any]], 186 | model: str, 187 | temperature: float, 188 | max_tokens: int, 189 | ) -> str: 190 | """This method is called when the chat completion is done. 191 | Args: 192 | messages (Dict[Any, Any]): The messages. 193 | model (str): The model name. 194 | temperature (float): The temperature. 195 | max_tokens (int): The max tokens. 196 | Returns: 197 | str: The resulting response. 198 | """ 199 | return '' 200 | 201 | def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator: 202 | """This method is called just after the generate_prompt is called, 203 | but actually before the prompt is generated. 204 | Args: 205 | prompt (PromptGenerator): The prompt generator. 206 | Returns: 207 | PromptGenerator: The prompt generator. 208 | """ 209 | 210 | prompt.add_command( # type: ignore 211 | "api", 212 | "API Call", 213 | {"host": "", "endpoint": "", "mthd": "", "params": "", "body": "", "hdrs": "", "timeout": ""}, 214 | self.plugin_class.make_api_call 215 | ) 216 | return prompt 217 | 218 | def can_handle_user_input(self, user_input: str) -> bool: 219 | return False 220 | 221 | def user_input(self, user_input: str) -> str: 222 | return user_input 223 | 224 | def can_handle_report(self) -> bool: 225 | return False 226 | 227 | def report(self, message: str) -> None: 228 | pass 229 | 230 | def can_handle_text_embedding(self, text: str) -> bool: 231 | return False 232 | 233 | def handle_text_embedding(self, text: str) -> list: # type: ignore 234 | pass -------------------------------------------------------------------------------- /src/autogpt_plugins/api_tools/api_tools.py: -------------------------------------------------------------------------------- 1 | """API Call command for Autogpt.""" 2 | 3 | import json 4 | import re 5 | import requests 6 | from typing import Dict, Optional 7 | from urllib.parse import urljoin, urlparse 8 | from urllib.parse import urljoin 9 | from validators import url as is_valid_url 10 | 11 | class ApiCallCommand: 12 | """ 13 | A class used to make API calls. 14 | """ 15 | 16 | def sanitize_string(self, input_string: str) -> str: 17 | """ 18 | Remove potentially harmful characters from the string. 19 | 20 | Args: 21 | input_string (str): The string to sanitize. 22 | 23 | Returns: 24 | str: The sanitized string. 25 | """ 26 | 27 | return re.sub(r'[^a-zA-Z0-9_: -{}[\],"]', '', input_string) 28 | 29 | # End of sanitize_string() 30 | 31 | 32 | def sanitize_json(self, input_string: str) -> str: 33 | """ 34 | Sanitize all the values in a JSON string. 35 | 36 | Args: 37 | input_string (str): The JSON string to sanitize. 38 | 39 | Returns: 40 | str: The sanitized JSON string. 41 | """ 42 | 43 | data = json.loads(input_string) 44 | sanitized_data = {self.sanitize_string(k): self.sanitize_string(str(v)) for k, v in data.items()} 45 | return json.dumps(sanitized_data) 46 | 47 | # End of sanitize_json() 48 | 49 | 50 | def sanitize(self, input_string: str) -> str: 51 | """ 52 | Remove potentially harmful characters from the input string. 53 | 54 | Args: 55 | input_string (str): The string to sanitize. 56 | 57 | Returns: 58 | str: The sanitized string. 59 | """ 60 | 61 | try: 62 | sanitized_string = self.sanitize_json(input_string) 63 | except json.JSONDecodeError: 64 | sanitized_string = self.sanitize_string(input_string) 65 | return sanitized_string 66 | 67 | # End of sanitize() 68 | 69 | 70 | def make_api_call(self, host = "", endpoint = "", mthd = "GET", params = {}, body = "", 71 | hdrs = {"Content-Type": "application/json"}, timeout = 60) -> str: 72 | """ 73 | Return the results of an API call 74 | 75 | Args: 76 | host (str): The host to call. 77 | endpoint (str): The endpoint to call. 78 | mthd (str): The HTTP method to use. 79 | params (dict): The query parameters to use. 80 | body (str): The body to use. 81 | hdrs (dict): The headers to use. 82 | timeout (int): The timeout to use. 83 | 84 | Returns: 85 | str: A JSON string containing the results of the API 86 | call in the format 87 | {"status": "success|error", "status_code": int, "response": str, "response": str} 88 | """ 89 | 90 | # Initialize variables 91 | response = {} 92 | 93 | # Type-check inputs - host 94 | if not isinstance(host, str): 95 | raise ValueError("host must be a string") 96 | 97 | # Type-check inputs - endpoint 98 | if not isinstance(endpoint, str): 99 | raise ValueError("endpoint must be a string") 100 | 101 | # Type-check inputs - method 102 | if not isinstance(mthd, str): 103 | raise ValueError("method must be a string") 104 | 105 | # Type-check inputs - query_params 106 | if not params: 107 | params = {} 108 | elif isinstance(params, str): 109 | try: 110 | params = json.loads(params) 111 | except json.JSONDecodeError: 112 | raise ValueError("query_params must be a dictionary") 113 | elif isinstance(params, dict): 114 | new_query_params = {} 115 | for k, v in params.items(): 116 | if k is None: 117 | raise ValueError("query_params cannot contain None keys") 118 | if not isinstance(k, str): 119 | k = str(k) 120 | if v is not None and not isinstance(v, str): 121 | v = str(v) 122 | new_query_params[k] = v 123 | params = new_query_params 124 | else: 125 | raise ValueError("query_params must be a dictionary or a JSON string") 126 | 127 | # Type-check inputs - body 128 | if not isinstance(body, str): 129 | try: 130 | body = str(body) 131 | except ValueError: 132 | raise ValueError("body must be a string") 133 | 134 | # Type-check inputs - headers 135 | if not hdrs: 136 | hdrs = {} 137 | elif isinstance(hdrs, str): 138 | try: 139 | hdrs = json.loads(hdrs) 140 | except json.JSONDecodeError: 141 | raise ValueError("headers must be a dictionary") 142 | elif isinstance(hdrs, dict): 143 | new_headers = {} 144 | for k, v in hdrs.items(): 145 | if k is None: 146 | raise ValueError("headers cannot contain None keys") 147 | if not isinstance(k, str): 148 | k = str(k) 149 | if v is not None and not isinstance(v, str): 150 | v = str(v) 151 | new_headers[k] = v 152 | hdrs = new_headers 153 | else: 154 | raise ValueError("headers must be a dictionary or a JSON string") 155 | 156 | # Type-check inputs - timeout_secs 157 | if timeout is None: 158 | raise ValueError("timeout_secs must be an integer") 159 | elif not isinstance(timeout, int): 160 | try: 161 | timeout = int(timeout) 162 | except ValueError: 163 | raise ValueError("timeout_secs must be an integer") 164 | 165 | # Validate URL 166 | if '?' in host or '&' in host: 167 | raise ValueError("Invalid URL: Host must not contain query parameters") 168 | sanitized_host = self.sanitize(host) 169 | sanitized_endpoint = self.sanitize(endpoint) 170 | if not sanitized_host.startswith(("http://", "https://")): 171 | sanitized_host = f"https://{sanitized_host}" 172 | url = urljoin(sanitized_host, sanitized_endpoint) 173 | if not is_valid_url(url): # type: ignore 174 | raise ValueError("Invalid URL: " + url) 175 | 176 | # Validate method 177 | allowed_methods = ["GET", "POST", "PUT", "DELETE", "PATCH", "HEAD", "OPTIONS"] 178 | sanitized_method = self.sanitize(mthd).upper() 179 | if sanitized_method not in allowed_methods: 180 | raise ValueError("Invalid method: " + sanitized_method) 181 | 182 | # Validate timeout_secs 183 | if not timeout > 0: 184 | raise ValueError("timeout_secs must be a positive integer") 185 | 186 | # Make the request 187 | try: 188 | if sanitized_method == "GET": 189 | response = requests.get(url, params=params, headers=hdrs, timeout=timeout) 190 | elif sanitized_method == "HEAD": 191 | response = requests.head(url, params=params, headers=hdrs, timeout=timeout) 192 | elif sanitized_method == "OPTIONS": 193 | response = requests.options(url, params=params, headers=hdrs, timeout=timeout) 194 | elif sanitized_method == "POST": 195 | response = requests.post(url, params=params, json=body, headers=hdrs, timeout=timeout) 196 | elif sanitized_method == "PUT": 197 | response = requests.put(url, params=params, json=body, headers=hdrs, timeout=timeout) 198 | elif sanitized_method == "DELETE": 199 | response = requests.delete(url, params=params, json=body, headers=hdrs, timeout=timeout) 200 | elif sanitized_method == "PATCH": 201 | response = requests.patch(url, params=params, json=body, headers=hdrs, timeout=timeout) 202 | else: 203 | raise ValueError("Invalid method: " + mthd) 204 | 205 | response_text = response.text 206 | response = { 207 | "status": "success", 208 | "status_code": response.status_code, 209 | "response": response_text 210 | } 211 | 212 | except requests.exceptions.RequestException as e: 213 | response = { 214 | "status": "error", 215 | "status_code": None, 216 | "response": str(e) 217 | } 218 | 219 | return json.dumps(response) 220 | 221 | # End of call_api() 222 | 223 | # End of class ApiCallCommand 224 | -------------------------------------------------------------------------------- /src/autogpt_plugins/astro/README.md: -------------------------------------------------------------------------------- 1 | # Auto-GPT Space Plugin 2 | This plugin enables AutoGPT to see how many people are in space and see the position of the ISS. This can help enable AutoGPT to better achieve its goals. 3 | 4 | ## Use cases 5 | - Researching how many people are in space 6 | ## Setup 7 | Setup is easy. Just follow the instructions in [Auto-GPT-Plugins/README.md](https://github.com/Significant-Gravitas/Auto-GPT-Plugins/blob/master/README.md) 8 | -------------------------------------------------------------------------------- /src/autogpt_plugins/astro/__init__.py: -------------------------------------------------------------------------------- 1 | """This is the space plugin for Auto-GPT.""" 2 | import os 3 | from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar 4 | 5 | from auto_gpt_plugin_template import AutoGPTPluginTemplate 6 | 7 | PromptGenerator = TypeVar("PromptGenerator") 8 | 9 | 10 | class Message(TypedDict): 11 | role: str 12 | content: str 13 | 14 | 15 | class AutoGPTSpacePlugin(AutoGPTPluginTemplate): 16 | """ 17 | This is the Auto-GPT space plugin. 18 | """ 19 | 20 | def __init__(self): 21 | super().__init__() 22 | self._name = "Auto-GPT-Space-Plugin" 23 | self._version = "0.1.3" 24 | self._description = "Auto-GPT Space Plugin: Get the number of astronauts in space in real-time.." 25 | self.load_commands = True 26 | 27 | def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator: 28 | if self.load_commands: 29 | from .astronauts import get_num_astronauts 30 | 31 | prompt.add_command( 32 | "Get number of astronauts", 33 | "get_num_astronauts", 34 | {}, 35 | get_num_astronauts, 36 | ) 37 | prompt.add_command( 38 | "Get the coordinates of the ISS", 39 | "get_coords_iss", 40 | {}, 41 | get_coords_iss, 42 | ) 43 | 44 | return prompt 45 | 46 | def can_handle_post_prompt(self) -> bool: 47 | """This method is called to check that the plugin can 48 | handle the post_prompt method. 49 | 50 | Returns: 51 | bool: True if the plugin can handle the post_prompt method.""" 52 | return True 53 | 54 | def can_handle_on_response(self) -> bool: 55 | """This method is called to check that the plugin can 56 | handle the on_response method. 57 | 58 | Returns: 59 | bool: True if the plugin can handle the on_response method.""" 60 | return False 61 | 62 | def on_response(self, response: str, *args, **kwargs) -> str: 63 | """This method is called when a response is received from the model.""" 64 | pass 65 | 66 | def can_handle_on_planning(self) -> bool: 67 | """This method is called to check that the plugin can 68 | handle the on_planning method. 69 | 70 | Returns: 71 | bool: True if the plugin can handle the on_planning method.""" 72 | return False 73 | 74 | def on_planning( 75 | self, prompt: PromptGenerator, messages: List[Message] 76 | ) -> Optional[str]: 77 | """This method is called before the planning chat completion is done. 78 | 79 | Args: 80 | prompt (PromptGenerator): The prompt generator. 81 | messages (List[str]): The list of messages. 82 | """ 83 | pass 84 | 85 | def can_handle_post_planning(self) -> bool: 86 | """This method is called to check that the plugin can 87 | handle the post_planning method. 88 | 89 | Returns: 90 | bool: True if the plugin can handle the post_planning method.""" 91 | return False 92 | 93 | def post_planning(self, response: str) -> str: 94 | """This method is called after the planning chat completion is done. 95 | 96 | Args: 97 | response (str): The response. 98 | 99 | Returns: 100 | str: The resulting response. 101 | """ 102 | pass 103 | 104 | def can_handle_pre_instruction(self) -> bool: 105 | """This method is called to check that the plugin can 106 | handle the pre_instruction method. 107 | 108 | Returns: 109 | bool: True if the plugin can handle the pre_instruction method.""" 110 | return False 111 | 112 | def pre_instruction(self, messages: List[Message]) -> List[Message]: 113 | """This method is called before the instruction chat is done. 114 | 115 | Args: 116 | messages (List[Message]): The list of context messages. 117 | 118 | Returns: 119 | List[Message]: The resulting list of messages. 120 | """ 121 | pass 122 | 123 | def can_handle_on_instruction(self) -> bool: 124 | """This method is called to check that the plugin can 125 | handle the on_instruction method. 126 | 127 | Returns: 128 | bool: True if the plugin can handle the on_instruction method.""" 129 | return False 130 | 131 | def on_instruction(self, messages: List[Message]) -> Optional[str]: 132 | """This method is called when the instruction chat is done. 133 | 134 | Args: 135 | messages (List[Message]): The list of context messages. 136 | 137 | Returns: 138 | Optional[str]: The resulting message. 139 | """ 140 | pass 141 | 142 | def can_handle_post_instruction(self) -> bool: 143 | """This method is called to check that the plugin can 144 | handle the post_instruction method. 145 | 146 | Returns: 147 | bool: True if the plugin can handle the post_instruction method.""" 148 | return False 149 | 150 | def post_instruction(self, response: str) -> str: 151 | """This method is called after the instruction chat is done. 152 | 153 | Args: 154 | response (str): The response. 155 | 156 | Returns: 157 | str: The resulting response. 158 | """ 159 | pass 160 | 161 | def can_handle_pre_command(self) -> bool: 162 | """This method is called to check that the plugin can 163 | handle the pre_command method. 164 | 165 | Returns: 166 | bool: True if the plugin can handle the pre_command method.""" 167 | return False 168 | 169 | def pre_command( 170 | self, command_name: str, arguments: Dict[str, Any] 171 | ) -> Tuple[str, Dict[str, Any]]: 172 | """This method is called before the command is executed. 173 | 174 | Args: 175 | command_name (str): The command name. 176 | arguments (Dict[str, Any]): The arguments. 177 | 178 | Returns: 179 | Tuple[str, Dict[str, Any]]: The command name and the arguments. 180 | """ 181 | pass 182 | 183 | def can_handle_post_command(self) -> bool: 184 | """This method is called to check that the plugin can 185 | handle the post_command method. 186 | 187 | Returns: 188 | bool: True if the plugin can handle the post_command method.""" 189 | return False 190 | 191 | def post_command(self, command_name: str, response: str) -> str: 192 | """This method is called after the command is executed. 193 | 194 | Args: 195 | command_name (str): The command name. 196 | response (str): The response. 197 | 198 | Returns: 199 | str: The resulting response. 200 | """ 201 | pass 202 | 203 | def can_handle_chat_completion( 204 | self, messages: Dict[Any, Any], model: str, temperature: float, max_tokens: int 205 | ) -> bool: 206 | """This method is called to check that the plugin can 207 | handle the chat_completion method. 208 | 209 | Args: 210 | messages (List[Message]): The messages. 211 | model (str): The model name. 212 | temperature (float): The temperature. 213 | max_tokens (int): The max tokens. 214 | 215 | Returns: 216 | bool: True if the plugin can handle the chat_completion method.""" 217 | return False 218 | 219 | def handle_chat_completion( 220 | self, messages: List[Message], model: str, temperature: float, max_tokens: int 221 | ) -> str: 222 | """This method is called when the chat completion is done. 223 | 224 | Args: 225 | messages (List[Message]): The messages. 226 | model (str): The model name. 227 | temperature (float): The temperature. 228 | max_tokens (int): The max tokens. 229 | 230 | Returns: 231 | str: The resulting response. 232 | """ 233 | pass 234 | 235 | def can_handle_text_embedding( 236 | self, text: str 237 | ) -> bool: 238 | return False 239 | 240 | def handle_text_embedding( 241 | self, text: str 242 | ) -> list: 243 | pass 244 | 245 | def can_handle_user_input(self, user_input: str) -> bool: 246 | return False 247 | 248 | def user_input(self, user_input: str) -> str: 249 | return user_input 250 | 251 | def can_handle_report(self) -> bool: 252 | return False 253 | 254 | def report(self, message: str) -> None: 255 | pass 256 | -------------------------------------------------------------------------------- /src/autogpt_plugins/astro/astronauts.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | 4 | def get_num_astronauts(): 5 | """Get the number of astronauts in space. 6 | 7 | Args: 8 | None 9 | 10 | Returns: 11 | int: The number of astronauts in space. 12 | """ 13 | #Get the data 14 | response = requests.get("http://api.open-notify.org/astros.json") 15 | #Convert it to JSON 16 | data = response.json() 17 | #Extract the number and return it 18 | return data["number"] 19 | 20 | def get_coords_iss(): 21 | """Get the coordinates of the ISS 22 | Args: 23 | None 24 | Returns: 25 | int: The latitude of the ISS. 26 | int: The longitude of the ISS. 27 | """ 28 | #Get the data 29 | response = requests.get("http://api.open-notify.org/iss-now.json") 30 | #Convert it to JSON 31 | data = response.json() 32 | #Extract the number and return it 33 | return float(data["iss_position"]["latitude"]), float(data["iss_position"]["longitude"]) 34 | -------------------------------------------------------------------------------- /src/autogpt_plugins/astro/test_astro_plugin.py: -------------------------------------------------------------------------------- 1 | from .astronauts import get_num_astronauts 2 | from .astronauts import get_coords_iss 3 | 4 | def test_astro(): 5 | assert type(get_num_astronauts())==int 6 | 7 | def test_iss(): 8 | latitude, longitude = get_coords_iss() 9 | assert type(latitude)==float 10 | assert type(longitude)==float 11 | -------------------------------------------------------------------------------- /src/autogpt_plugins/baidu_search/README.md: -------------------------------------------------------------------------------- 1 | # Auto-GPT Baidu Search Plugin 2 | 3 | Language: [English](https://github.com/Significant-Gravitas/Auto-GPT-Plugins/tree/master/src/autogpt_plugins/baidu_search/README.md) | [中文](https://github.com/Significant-Gravitas/Auto-GPT-Plugins/tree/master/src/autogpt_plugins/baidu_search/README.zh.md) 4 | 5 | This search plugin integrates Baidu search engines into Auto-GPT, complementing the existing support for Google Search and DuckDuckGo Search provided by the main repository. 6 | 7 | ## Key Features: 8 | - Baidu Search: Perform search queries using the Baidu search engine. 9 | 10 | ## How it works 11 | If the environment variables for the search engine (`SEARCH_ENGINE`) and the Baidu cookie (`BAIDU_COOKIE`) are set, the search engine will be set to Baidu. 12 | 13 | ## Obtaining Baidu Cookie: 14 | 1. Open the Chrome browser and search for something on Baidu. 15 | 2. Open Developer Tools (press F12 or right-click and select "Inspect"). 16 | 3. Go to the "Network" tab. 17 | 4. Find the first name file in the list of network requests. 18 | 5. On the right side, find the "Cookie" header and copy all of its content(it's very long). 19 | 20 | ![Baidu Cookie](./screenshots/baidu_cookie.png) 21 | 22 | Set the `BAIDU_COOKIE` in the `.env` file: 23 | 24 | ``` 25 | SEARCH_ENGINE=baidu 26 | BAIDU_COOKIE=your-baidu-cookie 27 | ``` 28 | 29 | Remember to replace `your-baidu-cookie` with the actual cookie content you obtained from the Chrome Developer Tools. 30 | 31 | ## Note 32 | In most cases, the AutoGPT bot's queries are automatically set to English. However, if you wish to search in Chinese, you can specify the language in the goals. -------------------------------------------------------------------------------- /src/autogpt_plugins/baidu_search/README.zh.md: -------------------------------------------------------------------------------- 1 | # Auto-GPT 百度搜索插件 2 | 3 | 语言: [English](https://github.com/Significant-Gravitas/Auto-GPT-Plugins/tree/master/src/autogpt_plugins/baidu_search/README.md) | [中文](https://github.com/Significant-Gravitas/Auto-GPT-Plugins/tree/master/src/autogpt_plugins/baidu_search/README.zh.md) 4 | 5 | 此搜索插件将百度搜索引擎集成到 Auto-GPT 中,补充了原有的 Google 搜索和 DuckDuckGo 搜索。 6 | 7 | ## 主要功能: 8 | - 百度搜索:使用百度搜索引擎进行搜索查询。 9 | 10 | ## 工作原理: 11 | 如果设置了搜索引擎(`SEARCH_ENGINE`)和Baidu Cookie(`BAIDU_COOKIE`)的环境变量,搜索引擎将设置为百度。 12 | 13 | ### 获取百度 Cookie: 14 | 1. 打开 Chrome 浏览器并在百度上搜索随便某个内容。 15 | 2. 打开开发者工具(按 F12 或右键单击并选择 "审查元素")。 16 | 3. 转到 "网络" 标签。 17 | 4. 在网络请求列表中找到第一个名称文件。 18 | 5. 在右侧找到 "Cookie" 并复制所有内容(很长,需要全部复制)。 19 | 20 | ![Baidu Cookie](./screenshots/baidu_cookie.png) 21 | 22 | `.env` 文件示例: 23 | 24 | ``` 25 | SEARCH_ENGINE=baidu 26 | BAIDU_COOKIE=your-baidu-cookie 27 | ``` 28 | 29 | 请将 `your-baidu-cookie` 替换为从 Chrome 开发者工具获取的实际 Cookie 内容。 30 | 31 | ## 注意事项 32 | 在大多数情况下,AutoGPT的查询关键词会被自动设置为英文。如果你想用中文关键词搜索,你可以在goals中明确指定语言。 -------------------------------------------------------------------------------- /src/autogpt_plugins/baidu_search/__init__.py: -------------------------------------------------------------------------------- 1 | """This is the Baidu search engines plugin for Auto-GPT.""" 2 | import os 3 | from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar 4 | 5 | from auto_gpt_plugin_template import AutoGPTPluginTemplate 6 | 7 | from .baidu_search import _baidu_search 8 | 9 | PromptGenerator = TypeVar("PromptGenerator") 10 | 11 | 12 | class Message(TypedDict): 13 | role: str 14 | content: str 15 | 16 | 17 | class AutoGPTBaiduSearch(AutoGPTPluginTemplate): 18 | def __init__(self): 19 | super().__init__() 20 | self._name = "Baidu-Search-Plugin" 21 | self._version = "0.1.0" 22 | self._description = ( 23 | "This plugin performs Baidu searches using the provided query." 24 | ) 25 | self.load_commands = ( 26 | os.getenv("SEARCH_ENGINE") 27 | and os.getenv("SEARCH_ENGINE").lower() == "baidu" 28 | and os.getenv("BAIDU_COOKIE") 29 | ) 30 | 31 | def can_handle_post_prompt(self) -> bool: 32 | return True 33 | 34 | def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator: 35 | if self.load_commands: 36 | # Add Baidu Search command 37 | prompt.add_command( 38 | "Baidu Search", 39 | "baidu_search", 40 | {"query": ""}, 41 | _baidu_search, 42 | ) 43 | else: 44 | print( 45 | "Warning: Baidu-Search-Plugin is not fully functional. " 46 | "Please set the SEARCH_ENGINE and BAIDU_COOKIE environment variables." 47 | ) 48 | return prompt 49 | 50 | def can_handle_pre_command(self) -> bool: 51 | return True 52 | 53 | def pre_command( 54 | self, command_name: str, arguments: Dict[str, Any] 55 | ) -> Tuple[str, Dict[str, Any]]: 56 | if command_name == "google" and self.load_commands: 57 | return "baidu_search", {"query": arguments["query"]} 58 | else: 59 | return command_name, arguments 60 | 61 | def can_handle_post_command(self) -> bool: 62 | return False 63 | 64 | def post_command(self, command_name: str, response: str) -> str: 65 | pass 66 | 67 | def can_handle_on_planning(self) -> bool: 68 | return False 69 | 70 | def on_planning( 71 | self, prompt: PromptGenerator, messages: List[Message] 72 | ) -> Optional[str]: 73 | pass 74 | 75 | def can_handle_on_response(self) -> bool: 76 | return False 77 | 78 | def on_response(self, response: str, *args, **kwargs) -> str: 79 | pass 80 | 81 | def can_handle_post_planning(self) -> bool: 82 | return False 83 | 84 | def post_planning(self, response: str) -> str: 85 | pass 86 | 87 | def can_handle_pre_instruction(self) -> bool: 88 | return False 89 | 90 | def pre_instruction(self, messages: List[Message]) -> List[Message]: 91 | pass 92 | 93 | def can_handle_on_instruction(self) -> bool: 94 | return False 95 | 96 | def on_instruction(self, messages: List[Message]) -> Optional[str]: 97 | pass 98 | 99 | def can_handle_post_instruction(self) -> bool: 100 | return False 101 | 102 | def post_instruction(self, response: str) -> str: 103 | pass 104 | 105 | def can_handle_chat_completion( 106 | self, messages: Dict[Any, Any], model: str, temperature: float, max_tokens: int 107 | ) -> bool: 108 | return False 109 | 110 | def handle_chat_completion( 111 | self, messages: List[Message], model: str, temperature: float, max_tokens: int 112 | ) -> str: 113 | pass 114 | 115 | def can_handle_text_embedding( 116 | self, text: str 117 | ) -> bool: 118 | return False 119 | 120 | def handle_text_embedding( 121 | self, text: str 122 | ) -> list: 123 | pass 124 | 125 | def can_handle_user_input(self, user_input: str) -> bool: 126 | return False 127 | 128 | def user_input(self, user_input: str) -> str: 129 | return user_input 130 | 131 | def can_handle_report(self) -> bool: 132 | return False 133 | 134 | def report(self, message: str) -> None: 135 | pass 136 | -------------------------------------------------------------------------------- /src/autogpt_plugins/baidu_search/baidu_search.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import re 4 | 5 | import requests 6 | from bs4 import BeautifulSoup 7 | 8 | 9 | def _baidu_search(query: str, num_results=8): 10 | ''' 11 | Perform a Baidu search and return the results as a JSON string. 12 | ''' 13 | 14 | headers = { 15 | 'Cookie': os.getenv("BAIDU_COOKIE"), 16 | 'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:112.0) Gecko/20100101 Firefox/112.0" 17 | } 18 | url = f'https://www.baidu.com/s?wd={query}&rn={num_results}' 19 | response = requests.get(url, headers=headers) 20 | response.encoding = 'utf-8' 21 | soup = BeautifulSoup(response.text, 'html.parser') 22 | search_results = [] 23 | 24 | for result in soup.find_all('div', class_=re.compile('^result c-container ')): 25 | title = result.find('h3', class_='t').get_text() 26 | link = result.find('a', href=True)['href'] 27 | snippet = result.find('span', class_=re.compile('^content-right_8Zs40')) 28 | if snippet: 29 | snippet = snippet.get_text() 30 | else: 31 | snippet = '' 32 | search_results.append({ 33 | 'title': title, 34 | 'href': link, 35 | 'snippet': snippet 36 | }) 37 | 38 | return json.dumps(search_results, ensure_ascii=False, indent=4) -------------------------------------------------------------------------------- /src/autogpt_plugins/baidu_search/screenshots/baidu_cookie.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT-Plugins/84cdb22ae683aff27204d7b472869a42b4982f92/src/autogpt_plugins/baidu_search/screenshots/baidu_cookie.png -------------------------------------------------------------------------------- /src/autogpt_plugins/baidu_search/test_auto_gpt_baidu_plugin.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | from typing import List 4 | 5 | import requests 6 | 7 | from . import AutoGPTBaiduSearch 8 | from .baidu_search import _baidu_search 9 | 10 | 11 | class TestAutoGPTBaiduSearch(unittest.TestCase): 12 | def setUp(self): 13 | os.environ["BAIDU_COOKIE"] = "test_cookie" 14 | os.environ["SEARCH_ENGINE"] = "baidu" 15 | self.plugin = AutoGPTBaiduSearch() 16 | 17 | def tearDown(self): 18 | os.environ.pop("SEARCH_ENGINE", None) 19 | os.environ.pop("BAIDU_COOKIE", None) 20 | 21 | def test_baidu_search(self): 22 | query = "test query" 23 | try: 24 | _baidu_search(query) 25 | except requests.exceptions.HTTPError as e: 26 | self.assertEqual(e.response.status_code, 401) 27 | 28 | def test_pre_command(self): 29 | os.environ["SEARCH_ENGINE"] = "baidu" 30 | self.plugin = AutoGPTBaiduSearch() 31 | 32 | command_name, arguments = self.plugin.pre_command( 33 | "google", {"query": "test query"} 34 | ) 35 | self.assertEqual(command_name, "baidu_search") 36 | self.assertEqual(arguments, {"query": "test query"}) 37 | 38 | def test_can_handle_pre_command(self): 39 | self.assertTrue(self.plugin.can_handle_pre_command()) 40 | 41 | def test_can_handle_post_prompt(self): 42 | self.assertTrue(self.plugin.can_handle_post_prompt()) 43 | 44 | 45 | if __name__ == "__main__": 46 | unittest.main() 47 | -------------------------------------------------------------------------------- /src/autogpt_plugins/bing_search/README.md: -------------------------------------------------------------------------------- 1 | # Auto-GPT Bing Search Plugin 2 | 3 | Language: [English](https://github.com/Significant-Gravitas/Auto-GPT-Plugins/tree/master/src/autogpt_plugins/bing_search/README.md) | [中文](https://github.com/Significant-Gravitas/Auto-GPT-Plugins/tree/master/src/autogpt_plugins/bing_search/README.zh.md) 4 | 5 | The Auto-GPT Bing Search Plugin is a useful plugin for the base project, Auto-GPT. With the aim of expand the search experience, this search plugin integrates Bing search engines into Auto-GPT, complementing the existing support for Google Search and DuckDuckGo Search provided by the main repository. 6 | 7 | ## Key Features: 8 | - Bing Search: Perform search queries using the Bing search engine. 9 | 10 | ## How it works 11 | If the environment variables for the search engine (`SEARCH_ENGINE`) and the Bing API key (`BING_API_KEY`) are set, the search engine will be set to Bing. 12 | 13 | ## Installation: 14 | 1. Download the Auto-GPT Bing Search Plugin repository as a ZIP file. 15 | 2. Copy the ZIP file into the "plugins" folder of your Auto-GPT project. 16 | 17 | ### Bing API Key and Bing Search Configuration: 18 | 1. Go to the [Bing Web Search API](https://www.microsoft.com/en-us/bing/apis/bing-web-search-api) website. 19 | 2. Sign into your Microsoft Azure account or create a new account if you don't have one. 20 | 3. After setting up your account, go to the "Keys and Endpoint" section. 21 | 4. Copy the key from there and add it to the `.env` file in your project directory. 22 | 5. Name the environment variable `BING_API_KEY`. 23 | 24 | ![Azure Key](./screenshots/azure_api.png) 25 | 26 | Example of the `.env` file: 27 | ``` 28 | SEARCH_ENGINE=bing 29 | BING_API_KEY=your_bing_api_key 30 | ``` 31 | 32 | Remember to replace `your_bing_api_key` with the actual API key you obtained from the Microsoft Azure portal. 33 | -------------------------------------------------------------------------------- /src/autogpt_plugins/bing_search/README.zh.md: -------------------------------------------------------------------------------- 1 | # Auto-GPT 必应搜索插件 2 | 3 | 语言: [English](https://github.com/Significant-Gravitas/Auto-GPT-Plugins/tree/master/src/autogpt_plugins/bing_search/README.md) | [中文](https://github.com/Significant-Gravitas/Auto-GPT-Plugins/tree/master/src/autogpt_plugins/bing_search/README.zh.md) 4 | 5 | Auto-GPT 必应搜索插件是基础项目 Auto-GPT 的一个实用插件。为了扩展搜索选项,此搜索插件将必应搜索引擎集成到 Auto-GPT 中,补充了原有的 Google 搜索和 DuckDuckGo 搜索。 6 | 7 | ## 主要功能: 8 | - 必应搜索:使用必应搜索引擎进行搜索查询。 9 | 10 | ## 工作原理: 11 | 如果设置了搜索引擎(`SEARCH_ENGINE`)和Bing API密钥(`BING_API_KEY`)的环境变量,搜索引擎将设置为必应 12 | 13 | ## 安装: 14 | 1. 以 ZIP 文件格式下载 Auto-GPT 必应搜索插件存储库。 15 | 2. 将 ZIP 文件复制到 Auto-GPT 项目的 "plugins" 文件夹中。 16 | 17 | ### Bing API 密钥和必应搜索配置: 18 | 1. 访问 [Bing Web Search API](https://www.microsoft.com/en-us/bing/apis/bing-web-search-api)。 19 | 2. 登录您的 Microsoft Azure 帐户,如果没有帐户,请创建一个新帐户。 20 | 3. 设置帐户后,转到 "Keys and Endpoint" 部分。 21 | 4. 从那里复制密钥并将其添加到项目目录中的 .env 文件中。 22 | 5. 将环境变量命名为 `BING_API_KEY`。 23 | 24 | ![Azure Key](./screenshots/azure_api.png) 25 | 26 | `.env` 文件示例: 27 | ``` 28 | SEARCH_ENGINE=bing 29 | BING_API_KEY=your_bing_api_key 30 | ``` 31 | 32 | 请将 `your_bing_api_key` 替换为从 Microsoft Azure 获取的实际 API 密钥。 33 | -------------------------------------------------------------------------------- /src/autogpt_plugins/bing_search/__init__.py: -------------------------------------------------------------------------------- 1 | """This is the Bing search engines plugin for Auto-GPT.""" 2 | import os 3 | from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar 4 | 5 | from auto_gpt_plugin_template import AutoGPTPluginTemplate 6 | 7 | from .bing_search import _bing_search 8 | 9 | PromptGenerator = TypeVar("PromptGenerator") 10 | 11 | 12 | class Message(TypedDict): 13 | role: str 14 | content: str 15 | 16 | 17 | class AutoGPTBingSearch(AutoGPTPluginTemplate): 18 | def __init__(self): 19 | super().__init__() 20 | self._name = "Bing-Search-Plugin" 21 | self._version = "0.1.0" 22 | self._description = ( 23 | "This plugin performs Bing searches using the provided query." 24 | ) 25 | self.load_commands = ( 26 | os.getenv("SEARCH_ENGINE") 27 | and os.getenv("SEARCH_ENGINE").lower() == "bing" 28 | and os.getenv("BING_API_KEY") 29 | ) 30 | 31 | def can_handle_post_prompt(self) -> bool: 32 | return True 33 | 34 | def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator: 35 | if self.load_commands: 36 | # Add Bing Search command 37 | prompt.add_command( 38 | "Bing Search", 39 | "bing_search", 40 | {"query": ""}, 41 | _bing_search, 42 | ) 43 | else: 44 | print( 45 | "Warning: Bing-Search-Plugin is not fully functional. " 46 | "Please set the SEARCH_ENGINE and BING_API_KEY environment variables." 47 | ) 48 | return prompt 49 | 50 | def can_handle_pre_command(self) -> bool: 51 | return True 52 | 53 | def pre_command( 54 | self, command_name: str, arguments: Dict[str, Any] 55 | ) -> Tuple[str, Dict[str, Any]]: 56 | if command_name == "google" and self.load_commands: 57 | # this command does nothing but it is required to continue performing the post_command function 58 | return "bing_search", {"query": arguments["query"]} 59 | else: 60 | return command_name, arguments 61 | 62 | def can_handle_post_command(self) -> bool: 63 | return False 64 | 65 | def post_command(self, command_name: str, response: str) -> str: 66 | pass 67 | 68 | def can_handle_on_planning(self) -> bool: 69 | return False 70 | 71 | def on_planning( 72 | self, prompt: PromptGenerator, messages: List[Message] 73 | ) -> Optional[str]: 74 | pass 75 | 76 | def can_handle_on_response(self) -> bool: 77 | return False 78 | 79 | def on_response(self, response: str, *args, **kwargs) -> str: 80 | pass 81 | 82 | def can_handle_post_planning(self) -> bool: 83 | return False 84 | 85 | def post_planning(self, response: str) -> str: 86 | pass 87 | 88 | def can_handle_pre_instruction(self) -> bool: 89 | return False 90 | 91 | def pre_instruction(self, messages: List[Message]) -> List[Message]: 92 | pass 93 | 94 | def can_handle_on_instruction(self) -> bool: 95 | return False 96 | 97 | def on_instruction(self, messages: List[Message]) -> Optional[str]: 98 | pass 99 | 100 | def can_handle_post_instruction(self) -> bool: 101 | return False 102 | 103 | def post_instruction(self, response: str) -> str: 104 | pass 105 | 106 | def can_handle_pre_command(self) -> bool: 107 | return True 108 | 109 | def can_handle_chat_completion( 110 | self, messages: Dict[Any, Any], model: str, temperature: float, max_tokens: int 111 | ) -> bool: 112 | return False 113 | 114 | def handle_chat_completion( 115 | self, messages: List[Message], model: str, temperature: float, max_tokens: int 116 | ) -> str: 117 | pass 118 | 119 | def can_handle_text_embedding( 120 | self, text: str 121 | ) -> bool: 122 | return False 123 | 124 | def handle_text_embedding( 125 | self, text: str 126 | ) -> list: 127 | pass 128 | 129 | def can_handle_user_input(self, user_input: str) -> bool: 130 | return False 131 | 132 | def user_input(self, user_input: str) -> str: 133 | return user_input 134 | 135 | def can_handle_report(self) -> bool: 136 | return False 137 | 138 | def report(self, message: str) -> None: 139 | pass -------------------------------------------------------------------------------- /src/autogpt_plugins/bing_search/bing_search.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import re 4 | 5 | import requests 6 | 7 | 8 | def clean_text(text: str) -> str: 9 | cleaned_text = re.sub("<[^>]*>", "", text) # Remove HTML tags 10 | cleaned_text = cleaned_text.replace( 11 | "\\n", " " 12 | ) # Replace newline characters with spaces 13 | return cleaned_text 14 | 15 | 16 | def _bing_search(query: str, num_results=8) -> str: 17 | """ 18 | Perform a Bing search and return the results as a JSON string. 19 | """ 20 | subscription_key = os.getenv("BING_API_KEY") 21 | 22 | # Bing Search API endpoint 23 | search_url = "https://api.bing.microsoft.com/v7.0/search" 24 | 25 | headers = {"Ocp-Apim-Subscription-Key": subscription_key} 26 | params = { 27 | "q": query, 28 | "count": num_results, 29 | "textDecorations": True, 30 | "textFormat": "HTML", 31 | } 32 | response = requests.get(search_url, headers=headers, params=params) 33 | response.raise_for_status() 34 | search_results = response.json() 35 | 36 | # Extract the search result items from the response 37 | web_pages = search_results.get("webPages", {}) 38 | search_results = web_pages.get("value", []) 39 | 40 | # Create a list of search result dictionaries with 'title', 'href', and 'body' keys 41 | search_results_list = [ 42 | { 43 | "title": clean_text(item["name"]), 44 | "href": item["url"], 45 | "body": clean_text(item["snippet"]), 46 | } 47 | for item in search_results 48 | ] 49 | 50 | # Return the search results as a JSON string 51 | return json.dumps(search_results_list, ensure_ascii=False, indent=4) 52 | -------------------------------------------------------------------------------- /src/autogpt_plugins/bing_search/screenshots/azure_api.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT-Plugins/84cdb22ae683aff27204d7b472869a42b4982f92/src/autogpt_plugins/bing_search/screenshots/azure_api.png -------------------------------------------------------------------------------- /src/autogpt_plugins/bing_search/test_auto_gpt_bing.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | from typing import List 4 | 5 | import requests 6 | 7 | from . import AutoGPTBingSearch 8 | from .bing_search import _bing_search 9 | 10 | 11 | class TestAutoGPTBingSearch(unittest.TestCase): 12 | def setUp(self): 13 | os.environ["BING_API_KEY"] = "test_key" 14 | os.environ["SEARCH_ENGINE"] = "bing" 15 | self.plugin = AutoGPTBingSearch() 16 | 17 | def tearDown(self): 18 | os.environ.pop("SEARCH_ENGINE", None) 19 | os.environ.pop("BING_API_KEY", None) 20 | 21 | def test_bing_search(self): 22 | query = "test query" 23 | try: 24 | _bing_search(query) 25 | except requests.exceptions.HTTPError as e: 26 | self.assertEqual(e.response.status_code, 401) 27 | 28 | def test_pre_command(self): 29 | os.environ["SEARCH_ENGINE"] = "bing" 30 | self.plugin = AutoGPTBingSearch() 31 | 32 | command_name, arguments = self.plugin.pre_command( 33 | "google", {"query": "test query"} 34 | ) 35 | self.assertEqual(command_name, "bing_search") 36 | self.assertEqual(arguments, {"query": "test query"}) 37 | 38 | def test_can_handle_pre_command(self): 39 | self.assertTrue(self.plugin.can_handle_pre_command()) 40 | 41 | def test_can_handle_post_prompt(self): 42 | self.assertTrue(self.plugin.can_handle_post_prompt()) 43 | 44 | 45 | if __name__ == "__main__": 46 | unittest.main() 47 | -------------------------------------------------------------------------------- /src/autogpt_plugins/bluesky/README.md: -------------------------------------------------------------------------------- 1 | # Auto-GPT Bluesky Plugin 2 | 3 | A plugin that adds Bluesky API integration into Auto GPT 4 | 5 | ## Features (more coming soon!) 6 | 7 | - Post a message using the `post_to_bluesky(text)` command 8 | - Get recent posts using the `get_bluesky_posts(username, number_of_posts)` command 9 | 10 | ## Installation 11 | 12 | 1. Clone this repo as instructed in the main repository 13 | 2. Add this chunk of code along with your Bluesky Username and App Password information to the `.env` file within AutoGPT: 14 | 15 | ``` 16 | ################################################################################ 17 | ### BLUESKY API 18 | ################################################################################ 19 | 20 | # Create an App Password here: Bluesky -> Settings -> Advanced -> App Passwords 21 | 22 | BLUESKY_USERNAME= 23 | BLUESKY_APP_PASSWORD= 24 | ``` -------------------------------------------------------------------------------- /src/autogpt_plugins/bluesky/__init__.py: -------------------------------------------------------------------------------- 1 | """This is a Bluesky plugin for AutoGPT using atprototools.""" 2 | from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar 3 | 4 | from auto_gpt_plugin_template import AutoGPTPluginTemplate 5 | 6 | PromptGenerator = TypeVar("PromptGenerator") 7 | 8 | 9 | class Message(TypedDict): 10 | role: str 11 | content: str 12 | 13 | 14 | class AutoGPTBluesky(AutoGPTPluginTemplate): 15 | """ 16 | Bluesky plugin for AutoGPT using atprototools. 17 | """ 18 | 19 | def __init__(self): 20 | super().__init__() 21 | self._name = "autogpt-bluesky" 22 | self._version = "0.1.0" 23 | self._description = "Bluesky integration using atprototools." 24 | 25 | def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator: 26 | """This method is called just after the generate_prompt is called, 27 | but actually before the prompt is generated. 28 | Args: 29 | prompt (PromptGenerator): The prompt generator. 30 | Returns: 31 | PromptGenerator: The prompt generator. 32 | """ 33 | from .bluesky_plugin.bluesky_plugin import ( 34 | get_latest_posts, 35 | post_message, 36 | username_and_pwd_set, 37 | ) 38 | 39 | if not username_and_pwd_set(): 40 | return prompt 41 | 42 | prompt.add_command( 43 | "post_to_bluesky", "Post to Bluesky", { 44 | "text": ""}, post_message 45 | ) 46 | prompt.add_command( 47 | "get_bluesky_posts", "Get Blueskey Posts", { 48 | "username": "", 49 | "number_of_posts": ""}, get_latest_posts) 50 | 51 | return prompt 52 | 53 | def can_handle_on_response(self) -> bool: 54 | """This method is called to check that the plugin can 55 | handle the on_response method. 56 | Returns: 57 | bool: True if the plugin can handle the on_response method.""" 58 | return False 59 | 60 | def on_response(self, response: str, *args, **kwargs) -> str: 61 | """This method is called when a response is received from the model.""" 62 | pass 63 | 64 | def can_handle_post_prompt(self) -> bool: 65 | """This method is called to check that the plugin can 66 | handle the post_prompt method. 67 | Returns: 68 | bool: True if the plugin can handle the post_prompt method.""" 69 | return True 70 | 71 | def can_handle_on_planning(self) -> bool: 72 | """This method is called to check that the plugin can 73 | handle the on_planning method. 74 | Returns: 75 | bool: True if the plugin can handle the on_planning method.""" 76 | return False 77 | 78 | def on_planning( 79 | self, prompt: PromptGenerator, messages: List[str] 80 | ) -> Optional[str]: 81 | """This method is called before the planning chat completeion is done. 82 | Args: 83 | prompt (PromptGenerator): The prompt generator. 84 | messages (List[str]): The list of messages. 85 | """ 86 | pass 87 | 88 | def can_handle_post_planning(self) -> bool: 89 | """This method is called to check that the plugin can 90 | handle the post_planning method. 91 | Returns: 92 | bool: True if the plugin can handle the post_planning method.""" 93 | return False 94 | 95 | def post_planning(self, response: str) -> str: 96 | """This method is called after the planning chat completeion is done. 97 | Args: 98 | response (str): The response. 99 | Returns: 100 | str: The resulting response. 101 | """ 102 | pass 103 | 104 | def can_handle_pre_instruction(self) -> bool: 105 | """This method is called to check that the plugin can 106 | handle the pre_instruction method. 107 | Returns: 108 | bool: True if the plugin can handle the pre_instruction method.""" 109 | return False 110 | 111 | def pre_instruction(self, messages: List[str]) -> List[str]: 112 | """This method is called before the instruction chat is done. 113 | Args: 114 | messages (List[str]): The list of context messages. 115 | Returns: 116 | List[str]: The resulting list of messages. 117 | """ 118 | pass 119 | 120 | def can_handle_on_instruction(self) -> bool: 121 | """This method is called to check that the plugin can 122 | handle the on_instruction method. 123 | Returns: 124 | bool: True if the plugin can handle the on_instruction method.""" 125 | return False 126 | 127 | def on_instruction(self, messages: List[str]) -> Optional[str]: 128 | """This method is called when the instruction chat is done. 129 | Args: 130 | messages (List[str]): The list of context messages. 131 | Returns: 132 | Optional[str]: The resulting message. 133 | """ 134 | pass 135 | 136 | def can_handle_post_instruction(self) -> bool: 137 | """This method is called to check that the plugin can 138 | handle the post_instruction method. 139 | Returns: 140 | bool: True if the plugin can handle the post_instruction method.""" 141 | return False 142 | 143 | def post_instruction(self, response: str) -> str: 144 | """This method is called after the instruction chat is done. 145 | Args: 146 | response (str): The response. 147 | Returns: 148 | str: The resulting response. 149 | """ 150 | pass 151 | 152 | def can_handle_pre_command(self) -> bool: 153 | """This method is called to check that the plugin can 154 | handle the pre_command method. 155 | Returns: 156 | bool: True if the plugin can handle the pre_command method.""" 157 | return False 158 | 159 | def pre_command( 160 | self, command_name: str, arguments: Dict[str, Any] 161 | ) -> Tuple[str, Dict[str, Any]]: 162 | """This method is called before the command is executed. 163 | Args: 164 | command_name (str): The command name. 165 | arguments (Dict[str, Any]): The arguments. 166 | Returns: 167 | Tuple[str, Dict[str, Any]]: The command name and the arguments. 168 | """ 169 | pass 170 | 171 | def can_handle_post_command(self) -> bool: 172 | """This method is called to check that the plugin can 173 | handle the post_command method. 174 | Returns: 175 | bool: True if the plugin can handle the post_command method.""" 176 | return False 177 | 178 | def post_command(self, command_name: str, response: str) -> str: 179 | """This method is called after the command is executed. 180 | Args: 181 | command_name (str): The command name. 182 | response (str): The response. 183 | Returns: 184 | str: The resulting response. 185 | """ 186 | pass 187 | 188 | def can_handle_chat_completion( 189 | self, 190 | messages: list[Dict[Any, Any]], 191 | model: str, 192 | temperature: float, 193 | max_tokens: int, 194 | ) -> bool: 195 | """This method is called to check that the plugin can 196 | handle the chat_completion method. 197 | Args: 198 | messages (Dict[Any, Any]): The messages. 199 | model (str): The model name. 200 | temperature (float): The temperature. 201 | max_tokens (int): The max tokens. 202 | Returns: 203 | bool: True if the plugin can handle the chat_completion method.""" 204 | return False 205 | 206 | def handle_chat_completion( 207 | self, 208 | messages: list[Dict[Any, Any]], 209 | model: str, 210 | temperature: float, 211 | max_tokens: int, 212 | ) -> str: 213 | """This method is called when the chat completion is done. 214 | Args: 215 | messages (Dict[Any, Any]): The messages. 216 | model (str): The model name. 217 | temperature (float): The temperature. 218 | max_tokens (int): The max tokens. 219 | Returns: 220 | str: The resulting response. 221 | """ 222 | return None 223 | 224 | def can_handle_text_embedding( 225 | self, text: str 226 | ) -> bool: 227 | return False 228 | 229 | def handle_text_embedding( 230 | self, text: str 231 | ) -> list: 232 | pass 233 | 234 | def can_handle_user_input(self, user_input: str) -> bool: 235 | return False 236 | 237 | def user_input(self, user_input: str) -> str: 238 | return user_input 239 | 240 | def can_handle_report(self) -> bool: 241 | return False 242 | 243 | def report(self, message: str) -> None: 244 | pass -------------------------------------------------------------------------------- /src/autogpt_plugins/bluesky/bluesky_plugin/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT-Plugins/84cdb22ae683aff27204d7b472869a42b4982f92/src/autogpt_plugins/bluesky/bluesky_plugin/__init__.py -------------------------------------------------------------------------------- /src/autogpt_plugins/bluesky/bluesky_plugin/bluesky_plugin.py: -------------------------------------------------------------------------------- 1 | """This module contains functions for interacting with the Bluesky API via atprototools.""" 2 | import os 3 | 4 | import pandas as pd 5 | from atproto import Client 6 | 7 | 8 | def username_and_pwd_set() -> bool: 9 | return True if os.getenv("BLUESKY_USERNAME") and os.getenv("BLUESKY_APP_PASSWORD") else False 10 | 11 | 12 | def post_message(text: str) -> str: 13 | """Posts a message to Bluesky. 14 | 15 | Args: 16 | text (str): The message to post. 17 | 18 | Returns: 19 | str: The message that was posted. 20 | """ 21 | 22 | bluesky_username = os.getenv("BLUESKY_USERNAME") 23 | bluesky_app_password = os.getenv("BLUESKY_APP_PASSWORD") 24 | 25 | client = Client() 26 | 27 | try: 28 | client.login(bluesky_username, bluesky_app_password) 29 | client.send_post(text=text) 30 | except Exception as e: 31 | return f"Error! Message: {e}" 32 | 33 | return f"Success! Message: {text}" 34 | 35 | 36 | def get_latest_posts(username: str, number_of_posts=5) -> str | None: 37 | """Gets the latest posts from a user. 38 | 39 | Args: 40 | username (str): The username to get the messages from. 41 | number_of_posts (int): The number of posts to get. 42 | 43 | Returns: 44 | str | None: The latest posts. 45 | """ 46 | 47 | bluesky_username = os.getenv("BLUESKY_USERNAME") 48 | bluesky_app_password = os.getenv("BLUESKY_APP_PASSWORD") 49 | 50 | client = Client() 51 | 52 | try: 53 | client.login(bluesky_username, bluesky_app_password) 54 | profile_feed = client.bsky.feed.get_author_feed( 55 | {'actor': username, 'limit': number_of_posts}) 56 | except Exception as e: 57 | return f"Error! Message: {e}" 58 | 59 | columns = ["URI", "Text", "Date", "User", "Likes", "Replies"] 60 | posts = [] 61 | 62 | for feed in profile_feed.feed: 63 | posts.append([feed.post.uri, feed.post.record.text, feed.post.record.createdAt, 64 | feed.post.author.handle, feed.post.likeCount, feed.post.replyCount]) 65 | 66 | df = str(pd.DataFrame(posts, columns=columns)) 67 | 68 | print(df) 69 | 70 | return df 71 | -------------------------------------------------------------------------------- /src/autogpt_plugins/bluesky/bluesky_plugin/test_bluesky_plugin.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | 4 | from .bluesky_plugin import get_latest_posts, post_message, username_and_pwd_set 5 | 6 | MOCK_USERNAME = "example.bsky.social" 7 | MOCK_MESSAGE = "Hello, World!" 8 | 9 | 10 | class TestBlueskyPlugin(unittest.TestCase): 11 | def setUp(self): 12 | os.environ["BLUESKY_USERNAME"] = "example.bsky.social" 13 | os.environ["BLUESKY_APP_PASSWORD"] = "bsky-social-app-password" 14 | 15 | def tearDown(self): 16 | os.environ.pop("BLUESKY_USERNAME", None) 17 | os.environ.pop("BLUESKY_APP_PASSWORD", None) 18 | 19 | def test_username_and_pwd_set(self): 20 | self.assertTrue(username_and_pwd_set()) 21 | 22 | def test_post_message(self): 23 | self.assertIsInstance(post_message(MOCK_MESSAGE), str) 24 | 25 | def test_get_latest_posts(self): 26 | self.assertIsInstance(get_latest_posts(MOCK_USERNAME, 5), str) 27 | 28 | 29 | if __name__ == "__main__": 30 | unittest.main() 31 | -------------------------------------------------------------------------------- /src/autogpt_plugins/create_plugins_here: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT-Plugins/84cdb22ae683aff27204d7b472869a42b4982f92/src/autogpt_plugins/create_plugins_here -------------------------------------------------------------------------------- /src/autogpt_plugins/email/README.md: -------------------------------------------------------------------------------- 1 | # Auto-GPT Email Plugin: Revolutionize Your Email Management with Auto-GPT 🚀 2 | 3 | The Auto-GPT Email Plugin is an innovative and powerful plugin for the groundbreaking base software, Auto-GPT. Harnessing the capabilities of the latest Auto-GPT architecture, Auto-GPT aims to autonomously achieve any goal you set, pushing the boundaries of what is possible with artificial intelligence. This email plugin takes Auto-GPT to the next level by enabling it to send and read emails, opening up a world of exciting use cases. 4 | 5 | [![Twitter Follow](https://img.shields.io/twitter/follow/riensen?style=social)](https://twitter.com/riensen) 6 | [![GitHub Repo stars](https://img.shields.io/github/stars/Significant-Gravitas/auto-gpt-plugins?style=social)](https://github.com/Significant-Gravitas/Auto-GPT-Plugins/stargazers) 7 | 8 | auto-gpt-email-plugin 9 | 10 | gmail-view-auto-gpt-email-plugin 11 | 12 | ## 🌟 Key Features 13 | 14 | - 📬 **Read Emails:** Effortlessly manage your inbox with Auto-GPT's email reading capabilities, ensuring you never miss important information. 15 | - 📤 **Auto-Compose and Send Emails**: Auto-GPT crafts personalized, context-aware emails using its advanced language model capabilities, saving you time and effort. 16 | - 📝 **Save Emails to Drafts Folder:** Gain more control by letting Auto-GPT create email drafts that you can review and edit before sending, ensuring your messages are fine-tuned to your preferences. 17 | - 📎 **Send Emails with Attachments:** Effortlessly send emails with attachments, making your communication richer and more comprehensive. 18 | - 🛡️ **Custom Email Signature:** Personalize your emails with a custom Auto-GPT signature, adding a touch of automation to every message sent by Auto-GPT. 19 | - 🎯 **Auto-Reply and Answer Questions:** Streamline your email responses by letting Auto-GPT intelligently read, analyze, and reply to incoming messages with accurate answers. 20 | - 🔌 **Seamless Integration with Auto-GPT:** Enjoy easy setup and integration with the base Auto-GPT software, opening up a world of powerful automation possibilities. 21 | 22 | Unlock the full potential of your email management with the Auto-GPT Email Plugin and revolutionize your email experience today! 🚀 23 | 24 | ## 🔧 Installation 25 | 26 | Follow these steps to configure the Auto-GPT Email Plugin: 27 | 28 | ### 1. Follow Auto-GPT-Plugins Installation Instructions 29 | Follow the instructions as per the [Auto-GPT-Plugins/README.md](https://github.com/Significant-Gravitas/Auto-GPT-Plugins/blob/master/README.md) 30 | 31 | ### 2. Locate the `.env.template` file 32 | Find the file named `.env.template` in the main `/Auto-GPT` folder. 33 | 34 | ### 3. Create and rename a copy of the file 35 | Duplicate the `.env.template` file and rename the copy to `.env` inside the `/Auto-GPT` folder. 36 | 37 | ### 4. Edit the `.env` file 38 | Open the `.env` file in a text editor. Note: Files starting with a dot might be hidden by your operating system. 39 | 40 | ### 5. Add email configuration settings 41 | Append the following configuration settings to the end of the file: 42 | 43 | ```ini 44 | ################################################################################ 45 | ### EMAIL (SMTP / IMAP) 46 | ################################################################################ 47 | 48 | EMAIL_ADDRESS= 49 | EMAIL_PASSWORD= 50 | EMAIL_SMTP_HOST=smtp.gmail.com 51 | EMAIL_SMTP_PORT=587 52 | EMAIL_IMAP_SERVER=imap.gmail.com 53 | 54 | # Optional Settings 55 | EMAIL_MARK_AS_SEEN=False 56 | EMAIL_SIGNATURE="This was sent by Auto-GPT" 57 | EMAIL_DRAFT_MODE_WITH_FOLDER=[Gmail]/Drafts 58 | ``` 59 | 60 | 1. **Email address and password:** 61 | - Set `EMAIL_ADDRESS` to your sender email address. 62 | - Set `EMAIL_PASSWORD` to your password. For Gmail, use an [App Password](https://myaccount.google.com/apppasswords). 63 | 64 | 2. **Provider-specific settings:** 65 | - If not using Gmail, adjust `EMAIL_SMTP_HOST`, `EMAIL_IMAP_SERVER`, and `EMAIL_SMTP_PORT` according to your email provider's settings. 66 | 67 | 3. **Optional settings:** 68 | - `EMAIL_MARK_AS_SEEN`: By default, processed emails are not marked as `SEEN`. Set to `True` to change this. 69 | - `EMAIL_SIGNATURE`: By default, no email signature is included. Configure this parameter to add a custom signature to each message sent by Auto-GPT. 70 | - `EMAIL_DRAFT_MODE_WITH_FOLDER`: Prevents emails from being sent and instead stores them as drafts in the specified IMAP folder. `[Gmail]/Drafts` is the default drafts folder for Gmail. 71 | 72 | 73 | ### 6. Allowlist Plugin 74 | In your `.env` search for `ALLOWLISTED_PLUGINS` and add this Plugin: 75 | 76 | ```ini 77 | ################################################################################ 78 | ### ALLOWLISTED PLUGINS 79 | ################################################################################ 80 | 81 | #ALLOWLISTED_PLUGINS - Sets the listed plugins that are allowed (Example: plugin1,plugin2,plugin3) 82 | ALLOWLISTED_PLUGINS=AutoGPTEmailPlugin 83 | ``` 84 | 85 | ## 🧪 Test the Auto-GPT Email Plugin 86 | 87 | Experience the plugin's capabilities by testing it for sending and receiving emails. 88 | 89 | ### 📤 Test Sending Emails 90 | 91 | 1. **Configure Auto-GPT:** 92 | Set up Auto-GPT with the following parameters: 93 | - Name: `CommunicatorGPT` 94 | - Role: `Communicate` 95 | - Goals: 96 | 1. Goal 1: `Send an email to my-email-plugin-test@trash-mail.com to introduce yourself` 97 | 2. Goal 2: `Terminate` 98 | 99 | 2. **Run Auto-GPT:** 100 | Launch Auto-GPT, which should use the email plugin to send an email to my-email-plugin-test@trash-mail.com. 101 | 102 | 3. **Verify the email:** 103 | Check your outbox to confirm that the email was sent. Visit [trash-mail.com](https://www.trash-mail.com/) and enter your chosen email to ensure the email was received. 104 | 105 | 4. **Sample email content:** 106 | Auto-GPT might send the following email: 107 | ``` 108 | Hello, 109 | 110 | My name is CommunicatorGPT, and I am an LLM. I am writing to introduce myself and to let you know that I will be terminating shortly. Thank you for your time. 111 | 112 | Best regards, 113 | CommunicatorGPT 114 | ``` 115 | 116 | ### 📬 Test Receiving Emails and Replying Back 117 | 118 | 1. **Send a test email:** 119 | Compose an email with a simple question from a [trash-mail.com](https://www.trash-mail.com/) email address to your configured `EMAIL_ADDRESS` in your `.env` file. 120 | 121 | 2. **Configure Auto-GPT:** 122 | Set up Auto-GPT with the following parameters: 123 | - Name: `CommunicatorGPT` 124 | - Role: `Communicate` 125 | - Goals: 126 | 1. Goal 1: `Read my latest emails` 127 | 2. Goal 2: `Send back an email with an answer` 128 | 3. Goal 3: `Terminate` 129 | 130 | 3. **Run Auto-GPT:** 131 | Launch Auto-GPT, which should automatically reply to the email with an answer. 132 | 133 | ### 🎁 Test Sending Emails with Attachment 134 | 135 | 1. **Send a test email:** 136 | Compose an email with a simple question from a [trash-mail.com](https://www.trash-mail.com/) email address to your configured `EMAIL_ADDRESS` in your `.env` file. 137 | 138 | 2. **Place attachment in Auto-GPT workspace folder** 139 | Insert the attachment intended for sending into the Auto-GPT workspace folder, typically named auto_gpt_workspace, which is located within the cloned [Auto-GPT](https://github.com/Significant-Gravitas/Auto-GPT) Github repository. 140 | 141 | 3. **Configure Auto-GPT:** 142 | Set up Auto-GPT with the following parameters: 143 | - Name: `CommunicatorGPT` 144 | - Role: `Communicate` 145 | - Goals: 146 | 1. Goal 1: `Read my latest emails` 147 | 2. Goal 2: `Send back an email with an answer and always attach happy.png` 148 | 3. Goal 3: `Terminate` 149 | 150 | 4. **Run Auto-GPT:** 151 | Launch Auto-GPT, which should automatically reply to the email with an answer and the attached file. 152 | -------------------------------------------------------------------------------- /src/autogpt_plugins/email/__init__.py: -------------------------------------------------------------------------------- 1 | """This is the email plugin for Auto-GPT.""" 2 | from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar 3 | 4 | from auto_gpt_plugin_template import AutoGPTPluginTemplate 5 | from colorama import Fore 6 | 7 | PromptGenerator = TypeVar("PromptGenerator") 8 | 9 | 10 | class Message(TypedDict): 11 | role: str 12 | content: str 13 | 14 | 15 | class AutoGPTEmailPlugin(AutoGPTPluginTemplate): 16 | """ 17 | This is the Auto-GPT email plugin. 18 | """ 19 | 20 | def __init__(self): 21 | super().__init__() 22 | self._name = "Auto-GPT-Email-Plugin" 23 | self._version = "0.2.0" 24 | self._description = "This plugin reads and send emails." 25 | 26 | def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator: 27 | from .email_plugin.email_plugin import ( 28 | bothEmailAndPwdSet, 29 | read_emails, 30 | send_email, 31 | send_email_with_attachment, 32 | ) 33 | 34 | if bothEmailAndPwdSet(): 35 | prompt.add_command( 36 | "Read Emails", 37 | "read_emails", 38 | { 39 | "imap_folder": "", 40 | "imap_search_command": "", 41 | "limit": "", 42 | "page": "", 43 | }, 44 | read_emails, 45 | ) 46 | prompt.add_command( 47 | "Send Email", 48 | "send_email", 49 | {"to": "", "subject": "", "body": ""}, 50 | send_email, 51 | ) 52 | prompt.add_command( 53 | "Send Email", 54 | "send_email_with_attachment", 55 | { 56 | "to": "", 57 | "subject": "", 58 | "body": "", 59 | "filename": "", 60 | }, 61 | send_email_with_attachment, 62 | ) 63 | else: 64 | print( 65 | Fore.RED 66 | + f"{self._name} - {self._version} - Email plugin not loaded, because EMAIL_PASSWORD or EMAIL_ADDRESS were not set in env." 67 | ) 68 | 69 | return prompt 70 | 71 | def can_handle_post_prompt(self) -> bool: 72 | """This method is called to check that the plugin can 73 | handle the post_prompt method. 74 | 75 | Returns: 76 | bool: True if the plugin can handle the post_prompt method.""" 77 | return True 78 | 79 | def can_handle_on_response(self) -> bool: 80 | """This method is called to check that the plugin can 81 | handle the on_response method. 82 | 83 | Returns: 84 | bool: True if the plugin can handle the on_response method.""" 85 | return False 86 | 87 | def on_response(self, response: str, *args, **kwargs) -> str: 88 | """This method is called when a response is received from the model.""" 89 | pass 90 | 91 | def can_handle_on_planning(self) -> bool: 92 | """This method is called to check that the plugin can 93 | handle the on_planning method. 94 | 95 | Returns: 96 | bool: True if the plugin can handle the on_planning method.""" 97 | return False 98 | 99 | def on_planning( 100 | self, prompt: PromptGenerator, messages: List[Message] 101 | ) -> Optional[str]: 102 | """This method is called before the planning chat completion is done. 103 | 104 | Args: 105 | prompt (PromptGenerator): The prompt generator. 106 | messages (List[str]): The list of messages. 107 | """ 108 | pass 109 | 110 | def can_handle_post_planning(self) -> bool: 111 | """This method is called to check that the plugin can 112 | handle the post_planning method. 113 | 114 | Returns: 115 | bool: True if the plugin can handle the post_planning method.""" 116 | return False 117 | 118 | def post_planning(self, response: str) -> str: 119 | """This method is called after the planning chat completion is done. 120 | 121 | Args: 122 | response (str): The response. 123 | 124 | Returns: 125 | str: The resulting response. 126 | """ 127 | pass 128 | 129 | def can_handle_pre_instruction(self) -> bool: 130 | """This method is called to check that the plugin can 131 | handle the pre_instruction method. 132 | 133 | Returns: 134 | bool: True if the plugin can handle the pre_instruction method.""" 135 | return False 136 | 137 | def pre_instruction(self, messages: List[Message]) -> List[Message]: 138 | """This method is called before the instruction chat is done. 139 | 140 | Args: 141 | messages (List[Message]): The list of context messages. 142 | 143 | Returns: 144 | List[Message]: The resulting list of messages. 145 | """ 146 | pass 147 | 148 | def can_handle_on_instruction(self) -> bool: 149 | """This method is called to check that the plugin can 150 | handle the on_instruction method. 151 | 152 | Returns: 153 | bool: True if the plugin can handle the on_instruction method.""" 154 | return False 155 | 156 | def on_instruction(self, messages: List[Message]) -> Optional[str]: 157 | """This method is called when the instruction chat is done. 158 | 159 | Args: 160 | messages (List[Message]): The list of context messages. 161 | 162 | Returns: 163 | Optional[str]: The resulting message. 164 | """ 165 | pass 166 | 167 | def can_handle_post_instruction(self) -> bool: 168 | """This method is called to check that the plugin can 169 | handle the post_instruction method. 170 | 171 | Returns: 172 | bool: True if the plugin can handle the post_instruction method.""" 173 | return False 174 | 175 | def post_instruction(self, response: str) -> str: 176 | """This method is called after the instruction chat is done. 177 | 178 | Args: 179 | response (str): The response. 180 | 181 | Returns: 182 | str: The resulting response. 183 | """ 184 | pass 185 | 186 | def can_handle_pre_command(self) -> bool: 187 | """This method is called to check that the plugin can 188 | handle the pre_command method. 189 | 190 | Returns: 191 | bool: True if the plugin can handle the pre_command method.""" 192 | return False 193 | 194 | def pre_command( 195 | self, command_name: str, arguments: Dict[str, Any] 196 | ) -> Tuple[str, Dict[str, Any]]: 197 | """This method is called before the command is executed. 198 | 199 | Args: 200 | command_name (str): The command name. 201 | arguments (Dict[str, Any]): The arguments. 202 | 203 | Returns: 204 | Tuple[str, Dict[str, Any]]: The command name and the arguments. 205 | """ 206 | pass 207 | 208 | def can_handle_post_command(self) -> bool: 209 | """This method is called to check that the plugin can 210 | handle the post_command method. 211 | 212 | Returns: 213 | bool: True if the plugin can handle the post_command method.""" 214 | return False 215 | 216 | def post_command(self, command_name: str, response: str) -> str: 217 | """This method is called after the command is executed. 218 | 219 | Args: 220 | command_name (str): The command name. 221 | response (str): The response. 222 | 223 | Returns: 224 | str: The resulting response. 225 | """ 226 | pass 227 | 228 | def can_handle_chat_completion( 229 | self, messages: Dict[Any, Any], model: str, temperature: float, max_tokens: int 230 | ) -> bool: 231 | """This method is called to check that the plugin can 232 | handle the chat_completion method. 233 | 234 | Args: 235 | messages (List[Message]): The messages. 236 | model (str): The model name. 237 | temperature (float): The temperature. 238 | max_tokens (int): The max tokens. 239 | 240 | Returns: 241 | bool: True if the plugin can handle the chat_completion method.""" 242 | return False 243 | 244 | def handle_chat_completion( 245 | self, messages: List[Message], model: str, temperature: float, max_tokens: int 246 | ) -> str: 247 | """This method is called when the chat completion is done. 248 | 249 | Args: 250 | messages (List[Message]): The messages. 251 | model (str): The model name. 252 | temperature (float): The temperature. 253 | max_tokens (int): The max tokens. 254 | 255 | Returns: 256 | str: The resulting response. 257 | """ 258 | pass 259 | 260 | def can_handle_text_embedding( 261 | self, text: str 262 | ) -> bool: 263 | return False 264 | 265 | def handle_text_embedding( 266 | self, text: str 267 | ) -> list: 268 | pass 269 | 270 | def can_handle_user_input(self, user_input: str) -> bool: 271 | return False 272 | 273 | def user_input(self, user_input: str) -> str: 274 | return user_input 275 | 276 | def can_handle_report(self) -> bool: 277 | return False 278 | 279 | def report(self, message: str) -> None: 280 | pass -------------------------------------------------------------------------------- /src/autogpt_plugins/news_search/README.md: -------------------------------------------------------------------------------- 1 | ## Auto-GPT News Search Plugin 2 | 3 | A plugin adding [News API](https://newsapi.org/docs) integration into Auto GPT 4 | 5 | ## Features(more coming soon!) 6 | 7 | - Retrieve news across all categories supported by News API via a provided query via the `news_search(query)` command 8 | 9 | ## Installation 10 | 11 | 1. Clone this repo as instructed in the main repository 12 | 2. Add this chunk of code along with your News API information to the `.env` file within AutoGPT: 13 | 14 | ``` 15 | ################################################################################ 16 | ### NEWS API 17 | ################################################################################ 18 | 19 | NEWSAPI_API_KEY= 20 | ``` 21 | 22 | ## NEWS API Setup: 23 | 24 | 1. Go to the [News API Portal](https://newsapi.org/) 25 | 2. Click the 'Get API Key' button to get your own API Key 26 | 3. Set that API Key in the env file as mentioned 27 | -------------------------------------------------------------------------------- /src/autogpt_plugins/news_search/__init__.py: -------------------------------------------------------------------------------- 1 | """This is the News search engine plugin for Auto-GPT.""" 2 | import os 3 | from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar 4 | 5 | from auto_gpt_plugin_template import AutoGPTPluginTemplate 6 | 7 | from .news_search import NewsSearch 8 | 9 | PromptGenerator = TypeVar("PromptGenerator") 10 | 11 | 12 | class Message(TypedDict): 13 | role: str 14 | content: str 15 | 16 | 17 | class AutoGPTNewsSearch(AutoGPTPluginTemplate): 18 | def __init__(self): 19 | super().__init__() 20 | self._name = "News-Search-Plugin" 21 | self._version = "0.1.0" 22 | self._description = "This plugin searches the latest news using the provided query and the newsapi aggregator" 23 | self.load_commands = os.getenv( 24 | "NEWSAPI_API_KEY" 25 | ) # Wrapper, if more variables are needed in future 26 | self.news_search = NewsSearch(os.getenv("NEWSAPI_API_KEY")) 27 | 28 | def can_handle_post_prompt(self) -> bool: 29 | return True 30 | 31 | def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator: 32 | if self.load_commands: 33 | # Add News Search command 34 | prompt.add_command( 35 | "News Search", 36 | "news_search", 37 | {"query": ""}, 38 | self.news_search.news_everything_search, 39 | ) 40 | else: 41 | print( 42 | "Warning: News-Search-Plugin is not fully functional. " 43 | "Please set the NEWSAPI_API_KEY environment variable." 44 | ) 45 | return prompt 46 | 47 | def can_handle_pre_command(self) -> bool: 48 | return False 49 | 50 | def pre_command( 51 | self, command_name: str, arguments: Dict[str, Any] 52 | ) -> Tuple[str, Dict[str, Any]]: 53 | pass 54 | 55 | def can_handle_post_command(self) -> bool: 56 | return False 57 | 58 | def post_command(self, command_name: str, response: str) -> str: 59 | pass 60 | 61 | def can_handle_on_planning(self) -> bool: 62 | return False 63 | 64 | def on_planning( 65 | self, prompt: PromptGenerator, messages: List[Message] 66 | ) -> Optional[str]: 67 | pass 68 | 69 | def can_handle_on_response(self) -> bool: 70 | return False 71 | 72 | def on_response(self, response: str, *args, **kwargs) -> str: 73 | pass 74 | 75 | def can_handle_post_planning(self) -> bool: 76 | return False 77 | 78 | def post_planning(self, response: str) -> str: 79 | pass 80 | 81 | def can_handle_pre_instruction(self) -> bool: 82 | return False 83 | 84 | def pre_instruction(self, messages: List[Message]) -> List[Message]: 85 | pass 86 | 87 | def can_handle_on_instruction(self) -> bool: 88 | return False 89 | 90 | def on_instruction(self, messages: List[Message]) -> Optional[str]: 91 | pass 92 | 93 | def can_handle_post_instruction(self) -> bool: 94 | return False 95 | 96 | def post_instruction(self, response: str) -> str: 97 | pass 98 | 99 | def can_handle_pre_command(self) -> bool: 100 | return False 101 | 102 | def can_handle_chat_completion( 103 | self, messages: Dict[Any, Any], model: str, temperature: float, max_tokens: int 104 | ) -> bool: 105 | return False 106 | 107 | def handle_chat_completion( 108 | self, messages: List[Message], model: str, temperature: float, max_tokens: int 109 | ) -> str: 110 | pass 111 | 112 | def can_handle_text_embedding( 113 | self, text: str 114 | ) -> bool: 115 | return False 116 | 117 | def handle_text_embedding( 118 | self, text: str 119 | ) -> list: 120 | pass 121 | 122 | def can_handle_user_input(self, user_input: str) -> bool: 123 | return False 124 | 125 | def user_input(self, user_input: str) -> str: 126 | return user_input 127 | 128 | def can_handle_report(self) -> bool: 129 | return False 130 | 131 | def report(self, message: str) -> None: 132 | pass -------------------------------------------------------------------------------- /src/autogpt_plugins/news_search/news_search.py: -------------------------------------------------------------------------------- 1 | import concurrent.futures 2 | from typing import List 3 | 4 | from newsapi import NewsApiClient 5 | 6 | categories = ["technology", "business", "entertainment", "health", "sports", "science"] 7 | 8 | 9 | class NewsSearch(object): 10 | def __init__(self, api_key): 11 | self.news_api_client = NewsApiClient(api_key) 12 | 13 | def news_headlines_search(self, category: str, query: str) -> List[str]: 14 | """ 15 | Get top news headlines for category specified. 16 | Args: 17 | category (str) : The category specified. Must be one of technology, business, entertainment, health, sports or science. 18 | Returns: 19 | list(str): A list of top news headlines for the specified category. 20 | """ 21 | result = self.news_api_client.get_top_headlines( 22 | category=category, language="en", country="us", page=1, q=query 23 | ) 24 | return [article["title"] for article in result["articles"][:3]] 25 | 26 | def news_everything_search(self, query: str) -> List[str]: 27 | """ 28 | Get all news for query specified. 29 | Args: 30 | query (str) : The query specified. 31 | Returns: 32 | list(str): A list of news for the specified category, sorted by relevant. 33 | """ 34 | result = self.news_api_client.get_everything( 35 | language="en", page=1, q=query, sort_by="relevancy" 36 | ) 37 | return [article["title"] for article in result["articles"]] 38 | 39 | def news_headlines_search_wrapper(self, query: str) -> List[str]: 40 | """ 41 | Aggregates top news headlines from the categories. 42 | Returns: 43 | list(str): A list of top news headlines aggregated from all categories. 44 | """ 45 | with concurrent.futures.ThreadPoolExecutor() as tp: 46 | futures = [] 47 | for cat in categories: 48 | futures.append( 49 | tp.submit(self.news_headlines_search, category=cat, query=query) 50 | ) 51 | 52 | aggregated_headlines = [] 53 | for fut in concurrent.futures.wait(futures)[0]: 54 | aggregated_headlines.append(fut.result()) 55 | 56 | return aggregated_headlines 57 | -------------------------------------------------------------------------------- /src/autogpt_plugins/news_search/test_auto_gpt_news_search.py: -------------------------------------------------------------------------------- 1 | import json 2 | from unittest.mock import Mock 3 | 4 | import pytest 5 | 6 | from .news_search import NewsSearch 7 | 8 | 9 | class TestNewsSearch: 10 | def mock_response(self, *args, **kwargs): 11 | # Mock Response of NewsAPI. We have result for AutoGPT in technology but not others, 12 | # whereas Cricket is present in Sports/Entertainment but not others 13 | if kwargs["q"] == "AI" and kwargs["category"] == "technology": 14 | return json.loads( 15 | """{"status":"ok","totalResults":1,"articles": [{"title": "AutoGPT"}]}""" 16 | ) 17 | elif kwargs["q"] == "Cricket" and kwargs["category"] in [ 18 | "entertainment", 19 | "sports", 20 | ]: 21 | return json.loads( 22 | """{"status":"ok","totalResults":1,"articles": [{"title": "World Cup"}]}""" 23 | ) 24 | elif kwargs["q"] == "Taylor Swift": 25 | return json.loads( 26 | """{"status": "ok","totalResults": 1,"articles": [{"title": "The National enlist Taylor Swift for new song “The Alcott”"}]}""" 27 | ) 28 | else: 29 | return json.loads("""{"status":"ok","totalResults":0,"articles":[]}""") 30 | 31 | @pytest.fixture(autouse=True) 32 | def setUp(self): 33 | self.NewsSearch = NewsSearch("testKey") 34 | self.NewsSearch.news_api_client.get_top_headlines = Mock( 35 | side_effect=self.mock_response 36 | ) 37 | self.NewsSearch.news_api_client.get_everything = Mock( 38 | side_effect=self.mock_response 39 | ) 40 | 41 | def test_news_search(self): 42 | # For AI, only technology should be populated. However, we can't rely on ordering, 43 | # so we'll assert one actual answer and 5 empty answers 44 | actual_output_autogpt = self.NewsSearch.news_headlines_search_wrapper("AI") 45 | assert actual_output_autogpt.count(["AutoGPT"]) == 1 46 | assert actual_output_autogpt.count([]) == 5 47 | 48 | # For Cricket, we should have sports/entertainment 49 | actual_output_cricket = self.NewsSearch.news_headlines_search_wrapper("Cricket") 50 | assert actual_output_cricket.count(["World Cup"]) == 2 51 | assert actual_output_cricket.count([]) == 4 52 | 53 | actual_output_taylor = self.NewsSearch.news_everything_search("Taylor Swift") 54 | assert actual_output_taylor.count(["Taylor Swift"]) == 0 55 | assert actual_output_taylor.count([]) == 0 56 | -------------------------------------------------------------------------------- /src/autogpt_plugins/planner/README.md: -------------------------------------------------------------------------------- 1 | # AutoGPT Planner Plugin 2 | Simple planning commands for planning leveraged with chatgpt3.5 and json objects to keep track of its progress on a list of tasks. 3 | 4 | ![image](https://user-images.githubusercontent.com/12145726/235688701-af549b76-7f9f-4426-9c88-dd72aca45685.png) 5 | 6 | 7 | ### Getting started 8 | 9 | After you clone the plugin from the original repo (https://github.com/rihp/autogpt-planner-plugin) Add it to the plugins folder of your AutoGPT repo and then run AutoGPT 10 | 11 | ![image](https://user-images.githubusercontent.com/12145726/235688224-7abf6ae4-5c0a-4e2d-b1b2-18241c6d74b4.png) 12 | 13 | Remember to also update your .env to include 14 | 15 | ``` 16 | ALLOWLISTED_PLUGINS=PlannerPlugin 17 | ``` 18 | 19 | 20 | 21 | # New commands 22 | ```python 23 | prompt.add_command( 24 | "check_plan", 25 | "Read the plan.md with the next goals to achieve", 26 | {}, 27 | check_plan, 28 | ) 29 | 30 | prompt.add_command( 31 | "run_planning_cycle", 32 | "Improves the current plan.md and updates it with progress", 33 | {}, 34 | update_plan, 35 | ) 36 | 37 | prompt.add_command( 38 | "create_task", 39 | "creates a task with a task id, description and a completed status of False ", 40 | { 41 | "task_id": "", 42 | "task_description": "", 43 | }, 44 | create_task, 45 | ) 46 | 47 | prompt.add_command( 48 | "load_tasks", 49 | "Checks out the task ids, their descriptionsand a completed status", 50 | {}, 51 | load_tasks, 52 | ) 53 | 54 | prompt.add_command( 55 | "mark_task_completed", 56 | "Updates the status of a task and marks it as completed", 57 | {"task_id": ""}, 58 | update_task_status, 59 | ) 60 | ``` 61 | 62 | # New config options 63 | By default, the plugin is set ot use what ever your `FAST_LLM_MODEL` environment variable is set to, if none is set it 64 | will fall back to `gpt-3.5-turbo`. If you want to set it individually to a different model you can do that by setting 65 | the environment variable `PLANNER_MODEL` to the model you want to use (example: `gpt-4`). 66 | 67 | Similarly, the token limit defaults to the `FAST_TOKEN_LIMIT` environment variable, if none is set it will fall 68 | back to `1500`. If you want to set it individually to a different limit for the plugin you can do that by setting 69 | `PLANNER_TOKEN_LIMIT` to the desired limit (example: `7500`). 70 | 71 | And last, but not least, the temperature used defaults to the `TEMPERATURE` environment variable, if none is set it will 72 | fall back to `0.5`. If you want to set it individually to a different temperature for the plugin you can do that by 73 | setting `PLANNER_TEMPERATURE` to the desired temperature (example: `0.3`). 74 | 75 | 76 | ## CODE SAMPLES 77 | 78 | Example of generating an improved plan 79 | ```python 80 | def generate_improved_plan(prompt: str) -> str: 81 | """Generate an improved plan using OpenAI's ChatCompletion functionality""" 82 | 83 | import openai 84 | 85 | tasks = load_tasks() 86 | 87 | # Call the OpenAI API for chat completion 88 | response = openai.ChatCompletion.create( 89 | model="gpt-3.5-turbo", 90 | messages=[ 91 | { 92 | "role": "system", 93 | "content": "You are an assistant that improves and adds crucial points to plans in .md format.", 94 | }, 95 | { 96 | "role": "user", 97 | "content": f"Update the following plan given the task status below, keep the .md format:\n{prompt}\nInclude the current tasks in the improved plan, keep mind of their status and track them with a checklist:\n{tasks}\Revised version should comply with the contests of the tasks at hand:", 98 | }, 99 | ], 100 | max_tokens=1500, 101 | n=1, 102 | temperature=0.5, 103 | ) 104 | ``` 105 | 106 | 107 | ## Testing workflow 108 | 109 | Clone the repo and modify the functionality, when you're done you can run 110 | ``` 111 | zip -ru ../fork/plugins/planner.zip . ; cd ../fork && python3 -m autogpt --debug 112 | ``` 113 | 114 | then you need to cd back to 115 | ``` 116 | cd ../autogpt-planner-plugin 117 | ``` 118 | -------------------------------------------------------------------------------- /src/autogpt_plugins/planner/planner.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | 5 | def check_plan(): 6 | """this function checks if the file plan.md exists, if it doesn't exist it gets created""" 7 | 8 | current_working_directory = os.getcwd() 9 | workdir = os.path.join( 10 | current_working_directory, "auto_gpt_workspace", "plan.md" 11 | ) 12 | 13 | file_name = workdir 14 | 15 | if not os.path.exists(file_name): 16 | with open(file_name, "w") as file: 17 | file.write( 18 | """ 19 | # Task List and status: 20 | - [ ] Create a detailed checklist for the current plan and goals 21 | - [ ] Finally, review that every new task is completed 22 | 23 | ## Notes: 24 | - Use the run_planning_cycle command frequently to keep this plan up to date. 25 | """ 26 | ) 27 | print(f"{file_name} created.") 28 | 29 | with open(file_name, "r") as file: 30 | return file.read() 31 | 32 | 33 | def update_plan(): 34 | """this function checks if the file plan.md exists, if it doesn't exist it gets created""" 35 | 36 | current_working_directory = os.getcwd() 37 | workdir = os.path.join(current_working_directory, 'auto_gpt_workspace', 'plan.md') 38 | 39 | file_name = workdir 40 | 41 | with open(file_name, 'r') as file: 42 | data = file.read() 43 | 44 | response = generate_improved_plan(data) 45 | 46 | with open(file_name, "w") as file: 47 | file.write(response) 48 | print(f"{file_name} updated.") 49 | 50 | return response 51 | 52 | 53 | def generate_improved_plan(prompt: str) -> str: 54 | """Generate an improved plan using OpenAI's ChatCompletion functionality""" 55 | 56 | import openai 57 | 58 | tasks = load_tasks() 59 | 60 | model = os.getenv('PLANNER_MODEL', os.getenv('FAST_LLM_MODEL', 'gpt-3.5-turbo')) 61 | max_tokens = os.getenv('PLANNER_TOKEN_LIMIT', os.getenv('FAST_TOKEN_LIMIT', 1500)) 62 | temperature = os.getenv('PLANNER_TEMPERATURE', os.getenv('TEMPERATURE', 0.5)) 63 | 64 | # Call the OpenAI API for chat completion 65 | response = openai.ChatCompletion.create( 66 | model=model, 67 | messages=[ 68 | { 69 | "role": "system", 70 | "content": "You are an assistant that improves and adds crucial points to plans in .md format.", 71 | }, 72 | { 73 | "role": "user", 74 | "content": f"Update the following plan given the task status below, keep the .md format:\n{prompt}\n" 75 | f"Include the current tasks in the improved plan, keep mind of their status and track them " 76 | f"with a checklist:\n{tasks}\n Revised version should comply with the contents of the " 77 | f"tasks at hand:", 78 | }, 79 | ], 80 | max_tokens=int(max_tokens), 81 | n=1, 82 | temperature=float(temperature), 83 | ) 84 | 85 | # Extract the improved plan from the response 86 | improved_plan = response.choices[0].message.content.strip() 87 | return improved_plan 88 | 89 | 90 | def create_task(task_id=None, task_description: str = None, status=False): 91 | task = {"description": task_description, "completed": status} 92 | tasks = load_tasks() 93 | tasks[str(task_id)] = task 94 | 95 | current_working_directory = os.getcwd() 96 | workdir = os.path.join( 97 | current_working_directory, "auto_gpt_workspace", "tasks.json" 98 | ) 99 | file_name = workdir 100 | 101 | with open(file_name, "w") as f: 102 | json.dump(tasks, f) 103 | 104 | return tasks 105 | 106 | 107 | def load_tasks() -> dict: 108 | current_working_directory = os.getcwd() 109 | workdir = os.path.join( 110 | current_working_directory, "auto_gpt_workspace", "tasks.json" 111 | ) 112 | file_name = workdir 113 | 114 | if not os.path.exists(file_name): 115 | with open(file_name, "w") as f: 116 | f.write("{}") 117 | 118 | with open(file_name) as f: 119 | try: 120 | tasks = json.load(f) 121 | if isinstance(tasks, list): 122 | tasks = {} 123 | except json.JSONDecodeError: 124 | tasks = {} 125 | 126 | return tasks 127 | 128 | 129 | def update_task_status(task_id): 130 | tasks = load_tasks() 131 | 132 | if str(task_id) not in tasks: 133 | print(f"Task with ID {task_id} not found.") 134 | return 135 | 136 | tasks[str(task_id)]["completed"] = True 137 | 138 | current_working_directory = os.getcwd() 139 | workdir = os.path.join( 140 | current_working_directory, "auto_gpt_workspace", "tasks.json" 141 | ) 142 | file_name = workdir 143 | 144 | with open(file_name, "w") as f: 145 | json.dump(tasks, f) 146 | 147 | return f"Task with ID {task_id} has been marked as completed." 148 | -------------------------------------------------------------------------------- /src/autogpt_plugins/random_values/README.md: -------------------------------------------------------------------------------- 1 | # Random Values Plugin 2 | 3 | The Random Values plugin will enable AutoGPT to generate various random assorted things like numbers and strings. 4 | 5 | ## Key Features: 6 | - __uuids__: generates 1 or more UUIDs (128-bit label) 7 | - __make_str__: generates 1 or more alphanumeric strings of at least 2 characters in length 8 | - __pwds__: generates 1 or more passwords of 6 or more characters using letters, numbers and punctuation 9 | - __lorem_ipsum__: generates 1 or more sentences of lorem ipsum text 10 | - __rnd_num__: draws 1 or more random numbers between min and max 11 | 12 | ## Installation: 13 | As part of the AutoGPT plugins package, follow the [installation instructions](https://github.com/Significant-Gravitas/Auto-GPT-Plugins) on the Auto-GPT-Plugins GitHub reporistory README page. 14 | 15 | ## AutoGPT Configuration 16 | 17 | Set `ALLOWLISTED_PLUGINS=AutoGPTRandomValues,example-plugin1,example-plugin2,etc` in your AutoGPT `.env` file. 18 | -------------------------------------------------------------------------------- /src/autogpt_plugins/random_values/__init__.py: -------------------------------------------------------------------------------- 1 | """Random Values commands.""" 2 | from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar 3 | from auto_gpt_plugin_template import AutoGPTPluginTemplate 4 | try: 5 | from .random_values import RandomValues 6 | except ImportError: 7 | from random_values import RandomValues 8 | 9 | PromptGenerator = TypeVar("PromptGenerator") 10 | 11 | class Message(TypedDict): 12 | role: str 13 | content: str 14 | 15 | 16 | class AutoGPTRandomValues(AutoGPTPluginTemplate): 17 | """ 18 | Random Values plugin for Auto-GPT. 19 | """ 20 | 21 | def __init__(self): 22 | super().__init__() 23 | self._name = "AutoGPTRandomValues" 24 | self._version = "0.1.2" 25 | self._description = "Enable Auto-GPT with the power of random values." 26 | self.plugin_class = RandomValues(self) 27 | 28 | def can_handle_on_response(self) -> bool: 29 | """This method is called to check that the plugin can 30 | handle the on_response method. 31 | Returns: 32 | bool: True if the plugin can handle the on_response method.""" 33 | return False 34 | 35 | def on_response(self, response: str, *args, **kwargs) -> str: 36 | """This method is called when a response is received from the model.""" 37 | return response 38 | 39 | def can_handle_post_prompt(self) -> bool: 40 | """This method is called to check that the plugin can 41 | handle the post_prompt method. 42 | Returns: 43 | bool: True if the plugin can handle the post_prompt method.""" 44 | return True 45 | 46 | def can_handle_on_planning(self) -> bool: 47 | """This method is called to check that the plugin can 48 | handle the on_planning method. 49 | Returns: 50 | bool: True if the plugin can handle the on_planning method.""" 51 | return False 52 | 53 | def on_planning( 54 | self, prompt: PromptGenerator, messages: List[str] 55 | ) -> Optional[str]: 56 | """This method is called before the planning chat completeion is done. 57 | Args: 58 | prompt (PromptGenerator): The prompt generator. 59 | messages (List[str]): The list of messages. 60 | """ 61 | pass 62 | 63 | def can_handle_post_planning(self) -> bool: 64 | """This method is called to check that the plugin can 65 | handle the post_planning method. 66 | Returns: 67 | bool: True if the plugin can handle the post_planning method.""" 68 | return False 69 | 70 | def post_planning(self, response: str) -> str: 71 | """This method is called after the planning chat completeion is done. 72 | Args: 73 | response (str): The response. 74 | Returns: 75 | str: The resulting response. 76 | """ 77 | return response 78 | 79 | def can_handle_pre_instruction(self) -> bool: 80 | """This method is called to check that the plugin can 81 | handle the pre_instruction method. 82 | Returns: 83 | bool: True if the plugin can handle the pre_instruction method.""" 84 | return False 85 | 86 | def pre_instruction(self, messages: List[str]) -> List[str]: 87 | """This method is called before the instruction chat is done. 88 | Args: 89 | messages (List[str]): The list of context messages. 90 | Returns: 91 | List[str]: The resulting list of messages. 92 | """ 93 | return messages 94 | 95 | def can_handle_on_instruction(self) -> bool: 96 | """This method is called to check that the plugin can 97 | handle the on_instruction method. 98 | Returns: 99 | bool: True if the plugin can handle the on_instruction method.""" 100 | return False 101 | 102 | def on_instruction(self, messages: List[str]) -> Optional[str]: 103 | """This method is called when the instruction chat is done. 104 | Args: 105 | messages (List[str]): The list of context messages. 106 | Returns: 107 | Optional[str]: The resulting message. 108 | """ 109 | pass 110 | 111 | def can_handle_post_instruction(self) -> bool: 112 | """This method is called to check that the plugin can 113 | handle the post_instruction method. 114 | Returns: 115 | bool: True if the plugin can handle the post_instruction method.""" 116 | return False 117 | 118 | def post_instruction(self, response: str) -> str: 119 | """This method is called after the instruction chat is done. 120 | Args: 121 | response (str): The response. 122 | Returns: 123 | str: The resulting response. 124 | """ 125 | return response 126 | 127 | def can_handle_pre_command(self) -> bool: 128 | """This method is called to check that the plugin can 129 | handle the pre_command method. 130 | Returns: 131 | bool: True if the plugin can handle the pre_command method.""" 132 | return False 133 | 134 | def pre_command( 135 | self, command_name: str, arguments: Dict[str, Any] 136 | ) -> Tuple[str, Dict[str, Any]]: 137 | """This method is called before the command is executed. 138 | Args: 139 | command_name (str): The command name. 140 | arguments (Dict[str, Any]): The arguments. 141 | Returns: 142 | Tuple[str, Dict[str, Any]]: The command name and the arguments. 143 | """ 144 | return command_name, arguments 145 | 146 | def can_handle_post_command(self) -> bool: 147 | """This method is called to check that the plugin can 148 | handle the post_command method. 149 | Returns: 150 | bool: True if the plugin can handle the post_command method.""" 151 | return False 152 | 153 | def post_command(self, command_name: str, response: str) -> str: 154 | """This method is called after the command is executed. 155 | Args: 156 | command_name (str): The command name. 157 | response (str): The response. 158 | Returns: 159 | str: The resulting response. 160 | """ 161 | return response 162 | 163 | def can_handle_chat_completion( 164 | self, 165 | messages: list[Dict[Any, Any]], 166 | model: str, 167 | temperature: float, 168 | max_tokens: int, 169 | ) -> bool: 170 | """This method is called to check that the plugin can 171 | handle the chat_completion method. 172 | Args: 173 | messages (Dict[Any, Any]): The messages. 174 | model (str): The model name. 175 | temperature (float): The temperature. 176 | max_tokens (int): The max tokens. 177 | Returns: 178 | bool: True if the plugin can handle the chat_completion method.""" 179 | return False 180 | 181 | def handle_chat_completion( 182 | self, 183 | messages: list[Dict[Any, Any]], 184 | model: str, 185 | temperature: float, 186 | max_tokens: int, 187 | ) -> str: 188 | """This method is called when the chat completion is done. 189 | Args: 190 | messages (Dict[Any, Any]): The messages. 191 | model (str): The model name. 192 | temperature (float): The temperature. 193 | max_tokens (int): The max tokens. 194 | Returns: 195 | str: The resulting response. 196 | """ 197 | return '' 198 | 199 | def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator: 200 | """This method is called just after the generate_prompt is called, 201 | but actually before the prompt is generated. 202 | Args: 203 | prompt (PromptGenerator): The prompt generator. 204 | Returns: 205 | PromptGenerator: The prompt generator. 206 | """ 207 | 208 | prompt.add_command( # type: ignore 209 | "rnd_num", 210 | "Random Numbers", 211 | {"min": "", "max": "", "cnt": ""}, 212 | self.plugin_class.random_number, 213 | ) 214 | prompt.add_command( # type: ignore 215 | "uuids", 216 | "Make UUIDs", 217 | {"cnt": ""}, 218 | self.plugin_class.make_uuids 219 | ) 220 | prompt.add_command( # type: ignore 221 | "make_str", 222 | "Generate Strings", 223 | {"len": "", "cnt": ""}, 224 | self.plugin_class.generate_string, 225 | ) 226 | prompt.add_command( # type: ignore 227 | "pwds", 228 | "Create Passwords", 229 | {"len": "", "cnt": ""}, 230 | self.plugin_class.generate_password, 231 | ) 232 | prompt.add_command( # type: ignore 233 | "lorem_ipsum", 234 | "Create Lorem Sentences", 235 | {"cnt": ""}, 236 | self.plugin_class.generate_placeholder_text, 237 | ) 238 | return prompt 239 | 240 | def can_handle_text_embedding(self, text: str) -> bool: # type: ignore 241 | return False 242 | 243 | def handle_text_embedding(self, text: str) -> list: # type: ignore 244 | pass 245 | 246 | def can_handle_user_input(self, user_input: str) -> bool: 247 | return False 248 | 249 | def user_input(self, user_input: str) -> str: 250 | return user_input 251 | 252 | def can_handle_report(self) -> bool: 253 | return False 254 | 255 | def report(self, message: str) -> None: 256 | pass 257 | -------------------------------------------------------------------------------- /src/autogpt_plugins/random_values/random_values.py: -------------------------------------------------------------------------------- 1 | """Random Values classes for Autogpt.""" 2 | 3 | import json 4 | import random 5 | import string 6 | import uuid 7 | 8 | import lorem 9 | 10 | """Random Number function for Autogpt.""" 11 | 12 | class RandomValues: 13 | """Random Values plugin for Auto-GPT.""" 14 | 15 | def __init__(self, plugin): 16 | self.plugin = plugin 17 | 18 | 19 | def random_number(self, min:int|str = 0, max:int|str = 65535, cnt:int|str = 1) -> str: 20 | """ 21 | Return a random integer between min and max 22 | 23 | Args: 24 | min (int): The minimum value 25 | max (int): The maximum value 26 | cnt (int): The number of random numbers to return 27 | 28 | Returns: 29 | str: a json array with 1 to "count" random numbers in the format 30 | [""] 31 | """ 32 | 33 | # Type-check the arguments 34 | try: 35 | min = int(min) 36 | except ValueError: 37 | raise ValueError("min must be an integer") 38 | try: 39 | max = int(max) 40 | except ValueError: 41 | raise ValueError("max must be an integer") 42 | try: 43 | cnt = int(cnt) 44 | except ValueError: 45 | raise ValueError("cnt must be an integer") 46 | 47 | # Ensure min is less than max 48 | if min > max: 49 | min, max = max, min 50 | 51 | # Test ranges 52 | if not (1 <= cnt <= 65535): 53 | raise ValueError("cnt must be between 1 and 65535") 54 | if not (0 <= min <= 65535): 55 | raise ValueError("min must be between 0 and 65535") 56 | if not (0 <= max <= 65535): 57 | raise ValueError("max must be between 0 and 65535") 58 | 59 | # Make random numbers 60 | random_numbers = [] 61 | if isinstance(min, int) and isinstance(max, int): 62 | for _ in range(cnt): 63 | random_numbers.append(random.randint(min, max)) 64 | else: 65 | for _ in range(cnt): 66 | random_numbers.append(random.uniform(min, max)) 67 | 68 | return json.dumps(random_numbers) 69 | 70 | # End of random_number() 71 | 72 | 73 | def make_uuids(self, cnt:int|str = 1) -> str: 74 | """ 75 | Return a UUID 76 | 77 | Args: 78 | cnt (int): The number of UUIDs to return 79 | 80 | Returns: 81 | str: a json array with 1 to "count" UUIDs 82 | [""] 83 | """ 84 | 85 | # Type-check the arguments 86 | if not isinstance(cnt, int): 87 | try: 88 | cnt = int(cnt) 89 | except ValueError: 90 | raise ValueError("cnt must be an integer") 91 | 92 | # Make values sane 93 | if not (1 <= cnt <= 65535): 94 | raise ValueError("cnt must be between 1 and 65535") 95 | 96 | # Do the thing 97 | uuids = [] 98 | for _ in range(cnt): 99 | uuids.append(str(uuid.uuid4())) 100 | 101 | return json.dumps(uuids) 102 | 103 | # End of make_uuids() 104 | 105 | 106 | def generate_string(self, len:int|str = 10, cnt:int|str = 1) -> str: 107 | """ 108 | Return a random string 109 | 110 | Args: 111 | len (int): The length of the string 112 | cnt (int): The number of strings to return 113 | 114 | Returns: 115 | str: a json array with 1 to "count" strings of "length" length 116 | [""] 117 | """ 118 | 119 | # Type-check the arguments 120 | if not isinstance(len, int): 121 | try: 122 | len = int(len) 123 | except ValueError: 124 | raise ValueError("len must be an integer") 125 | if not isinstance(cnt, int): 126 | try: 127 | cnt = int(cnt) 128 | except ValueError: 129 | raise ValueError("cnt must be an integer") 130 | 131 | # Range checks 132 | if not (1 <= cnt <= 65535): 133 | raise ValueError("cnt must be between 1 and 65535") 134 | if not (1 <= len <= 65535): 135 | raise ValueError("len must be between 1 and 65535") 136 | 137 | # Do the thing 138 | strings = [] 139 | for _ in range(cnt): 140 | strings.append( 141 | "".join(random.choice(string.ascii_letters) for i in range(len)) 142 | ) 143 | 144 | return json.dumps(strings) 145 | 146 | 147 | def generate_password(self, len:int|str = 16, cnt:int|str = 1) -> str: 148 | """ 149 | Return a random password of letters, numbers, and punctuation 150 | 151 | Args: 152 | len (int): The length of the password 153 | cnt (int): The number of passwords to return 154 | 155 | Returns: 156 | str: a json array with 1 to "count" passwords of "length" length 157 | [""] 158 | """ 159 | 160 | # Type-check the arguments 161 | if not isinstance(len, int): 162 | try: 163 | len = int(len) 164 | except ValueError: 165 | raise ValueError("len must be an integer") 166 | if not isinstance(cnt, int): 167 | try: 168 | cnt = int(cnt) 169 | except ValueError: 170 | raise ValueError("cnt must be an integer") 171 | 172 | # Make values sane 173 | if not (6 <= len <= 65535): 174 | raise ValueError("len must be between 6 and 65535") 175 | if not (1 <= cnt <= 65535): 176 | raise ValueError("cnt must be between 1 and 65535") 177 | 178 | # Do the thing 179 | passwords = [] 180 | for _ in range(cnt): 181 | passwords.append( 182 | "".join( 183 | random.choice(string.ascii_letters + string.digits + string.punctuation) 184 | for i in range(len) 185 | ) 186 | ) 187 | 188 | return json.dumps(passwords) 189 | 190 | 191 | def generate_placeholder_text(self, cnt:int|str = 1) -> str: 192 | """ 193 | Return a random sentence of lorem ipsum text 194 | 195 | Args: 196 | cnt (int): The number of sentences to return 197 | 198 | Returns: 199 | str: a json array with 1 to "sentences" strings of lorem ipsum 200 | [""] 201 | """ 202 | 203 | # Type-check the arguments 204 | if not isinstance(cnt, int): 205 | try: 206 | cnt = int(cnt) 207 | except ValueError: 208 | raise ValueError("cnt must be an integer") 209 | 210 | # Make values sane 211 | if not (1 <= cnt <= 65535): 212 | raise ValueError("cnt must be between 1 and 65535") 213 | 214 | # Do the thing 215 | strings = [] 216 | for _ in range(cnt): 217 | strings.append(lorem.get_sentence()) 218 | 219 | return json.dumps(strings) 220 | -------------------------------------------------------------------------------- /src/autogpt_plugins/random_values/test_random_valaues.py: -------------------------------------------------------------------------------- 1 | import json 2 | import string 3 | from unittest.mock import Mock 4 | from unittest import TestCase 5 | try: 6 | from .random_values import RandomValues 7 | except ImportError: 8 | from random_values import RandomValues 9 | 10 | class TestRandomValueCommands(TestCase): 11 | # _random_number Tests 12 | 13 | def setUp(self): 14 | self.random_values = RandomValues(Mock()) 15 | 16 | def test_random_number(self): 17 | result = json.loads(self.random_values.random_number(min=10, max=20, cnt=5)) 18 | self.assertEqual(len(result), 5) 19 | for num in result: 20 | self.assertTrue(10 <= num <= 20) 21 | 22 | def test_random_number_using_strings(self): 23 | result = json.loads(self.random_values.random_number(min="10", max="20", cnt="5")) 24 | self.assertEqual(len(result), 5) 25 | for num in result: 26 | self.assertTrue(10 <= num <= 20) 27 | 28 | def test_random_number_using_missing_min(self): 29 | result = json.loads(self.random_values.random_number(max=20, cnt=5)) 30 | self.assertEqual(len(result), 5) 31 | for num in result: 32 | self.assertTrue(0 <= num <= 20) 33 | 34 | def test_random_number_using_missing_max(self): 35 | result = json.loads(self.random_values.random_number(min=10, cnt=5)) 36 | self.assertEqual(len(result), 5) 37 | for num in result: 38 | self.assertTrue(10 <= num <= 65535) 39 | 40 | def test_random_number_using_missing_count(self): 41 | result = json.loads(self.random_values.random_number(min=10, max=20)) 42 | self.assertEqual(len(result), 1) 43 | for num in result: 44 | self.assertTrue(10 <= num <= 20) 45 | 46 | def test_random_number_min_using_garbage(self): 47 | with self.assertRaises(ValueError) as e: 48 | self.random_values.random_number(min="foo", max="20", cnt="5") 49 | self.assertEqual(str(e.exception), "min must be an integer") 50 | 51 | def test_random_number_max_using_garbage(self): 52 | with self.assertRaises(ValueError) as e: 53 | self.random_values.random_number(min="10", max="bar", cnt="5") 54 | self.assertEqual(str(e.exception), "max must be an integer") 55 | 56 | def test_random_number_count_using_garbage(self): 57 | with self.assertRaises(ValueError) as e: 58 | self.random_values.random_number(min="10", max="20", cnt="baz") 59 | self.assertEqual(str(e.exception), "cnt must be an integer") 60 | 61 | def test_make_uuids(self): 62 | result = json.loads(self.random_values.make_uuids(cnt=5)) 63 | self.assertEqual(len(result), 5) 64 | for uid in result: 65 | self.assertIsInstance(uid, str) 66 | self.assertEqual(len(uid), 36) # UUIDs have 36 characters 67 | 68 | def test_make_uuids_using_strings(self): 69 | result = json.loads(self.random_values.make_uuids(cnt="5")) 70 | self.assertEqual(len(result), 5) 71 | for uid in result: 72 | self.assertIsInstance(uid, str) 73 | self.assertEqual(len(uid), 36) 74 | 75 | def test_make_uuids_using_missing_count(self): 76 | # If missing, count defaults to 1 77 | result = json.loads(self.random_values.make_uuids()) 78 | self.assertEqual(len(result), 1) 79 | for uid in result: 80 | self.assertIsInstance(uid, str) 81 | self.assertEqual(len(uid), 36) 82 | 83 | def test_make_uuids_using_garbage(self): 84 | with self.assertRaises(ValueError) as e: 85 | self.random_values.make_uuids(cnt="foo") 86 | self.assertEqual(str(e.exception), "cnt must be an integer") 87 | 88 | # _generate_string Tests 89 | 90 | def test_generate_string(self): 91 | result = json.loads(self.random_values.generate_string(len=10, cnt=5)) 92 | self.assertEqual(len(result), 5) 93 | for string in result: 94 | self.assertEqual(len(string), 10) 95 | # Strings should only contain letters and numbers 96 | self.assertTrue(string.isalnum()) 97 | 98 | def test_generate_string_using_strings(self): 99 | result = json.loads(self.random_values.generate_string(len="10", cnt="5")) 100 | self.assertEqual(len(result), 5) 101 | for string in result: 102 | self.assertEqual(len(string), 10) 103 | # Strings should only contain letters and numbers 104 | self.assertTrue(string.isalnum()) 105 | 106 | def test_generate_string_using_missing_length(self): 107 | # If missing, length defaults to 10 108 | result = json.loads(self.random_values.generate_string(cnt=5)) 109 | self.assertEqual(len(result), 5) 110 | for string in result: 111 | self.assertEqual(len(string), 10) 112 | # Strings should only contain letters and numbers 113 | self.assertTrue(string.isalnum()) 114 | 115 | def test_generate_string_using_missing_count(self): 116 | # If missing, count defaults to 1 117 | result = json.loads(self.random_values.generate_string(len=10)) 118 | self.assertEqual(len(result), 1) 119 | for string in result: 120 | self.assertEqual(len(string), 10) 121 | # Strings should only contain letters and numbers 122 | self.assertTrue(string.isalnum()) 123 | 124 | def test_generate_string_using_garbage(self): 125 | with self.assertRaises(ValueError) as e: 126 | self.random_values.generate_string(len="foo", cnt="bar") 127 | self.assertEqual(str(e.exception), "len must be an integer") 128 | 129 | # _generate_password Tests 130 | 131 | def test_generate_password(self): 132 | result = json.loads(self.random_values.generate_password(len=10, cnt=5)) 133 | self.assertEqual(len(result), 5) 134 | for password in result: 135 | self.assertEqual(len(password), 10) 136 | # Passwords should contain letters, numbers, and symbols 137 | self.assertTrue(self.is_password(password)) 138 | 139 | def test_generate_password_using_strings(self): 140 | result = json.loads(self.random_values.generate_password(len="10", cnt="5")) 141 | self.assertEqual(len(result), 5) 142 | for password in result: 143 | self.assertEqual(len(password), 10) 144 | # Passwords should contain letters, numbers, and symbols 145 | self.assertTrue(self.is_password(password)) 146 | 147 | def test_generate_password_using_missing_length(self): 148 | # If missing, length defaults to 10 149 | result = json.loads(self.random_values.generate_password(cnt=5)) 150 | self.assertEqual(len(result), 5) 151 | for password in result: 152 | self.assertEqual(len(password), 16) 153 | # Passwords should contain letters, numbers, and symbols 154 | self.assertTrue(self.is_password(password)) 155 | 156 | def test_generate_password_using_missing_count(self): 157 | # If missing, count defaults to 1 158 | result = json.loads(self.random_values.generate_password(len=10)) 159 | self.assertEqual(len(result), 1) 160 | for password in result: 161 | self.assertEqual(len(password), 10) 162 | # Passwords should contain letters, numbers, and symbols 163 | self.assertTrue(self.is_password(password)) 164 | 165 | def test_generate_password_using_garbage(self): 166 | with self.assertRaises(ValueError) as e: 167 | self.random_values.generate_password(len="foo", cnt="bar") 168 | self.assertEqual(str(e.exception), "len must be an integer") 169 | 170 | # _generate_placeholder_text Tests 171 | 172 | def test_generate_placeholder_text(self): 173 | result = json.loads(self.random_values.generate_placeholder_text(cnt=5)) 174 | self.assertEqual(len(result), 5) 175 | for text in result: 176 | self.assertGreater(len(text), 3) 177 | 178 | def test_generate_placeholder_text_using_strings(self): 179 | result = json.loads(self.random_values.generate_placeholder_text(cnt="5")) 180 | self.assertEqual(len(result), 5) 181 | for text in result: 182 | self.assertGreater(len(text), 3) 183 | 184 | def test_generate_placeholder_text_using_empty_string(self): 185 | with self.assertRaises(ValueError) as e: 186 | self.random_values.generate_placeholder_text(cnt="") 187 | self.assertEqual(str(e.exception), "cnt must be an integer") 188 | 189 | def test_generate_placeholder_text_using_garbage(self): 190 | with self.assertRaises(ValueError) as e: 191 | self.random_values.generate_placeholder_text(cnt="foo") 192 | self.assertEqual(str(e.exception), "cnt must be an integer") 193 | 194 | # checks that the given string only contains ascii letters, digits & punctuation 195 | def is_password(self, input_str): 196 | characters = string.ascii_letters + string.digits + string.punctuation 197 | for character in input_str: 198 | if character not in characters: 199 | return False 200 | return True 201 | -------------------------------------------------------------------------------- /src/autogpt_plugins/scenex/README.md: -------------------------------------------------------------------------------- 1 | # Auto-GPT SceneXplain Plugin: Explore image storytelling beyond pixels 2 | 3 | [SceneXplain](https://scenex.jina.ai) is your gateway to revealing the rich narratives hidden within your images. Our cutting-edge AI technology dives deep into every detail, generating sophisticated textual descriptions that breathe life into your visuals. With a user-friendly interface and seamless API integration, SceneX empowers developers to effortlessly incorporate our advanced service into their multimodal applications. 4 | 5 | image 6 | auto-gpt-scenex-plugin 7 | 8 | ## 🌟 Key Features 9 | 10 | - **Advanced Large Model**: SceneX utilizes state-of-the-art large models and large language models to generate comprehensive, sophisticated textual descriptions for your images, surpassing conventional captioning algorithms. 11 | - **Multilingual Support**: SceneX 's powerful AI technology provides seamless multilingual support, enabling users to receive accurate and meaningful descriptions in multiple languages. 12 | - **API Integration**: SceneX offers a seamless API integration, empowering developers to effortlessly incorporate our innovative service into their multimodal applications. 13 | - **Fast Batch Performance**: Experience up to 3 Query Per Second (QPS) performance, ensuring that SceneX delivers prompt and efficient textual descriptions for your images. 14 | 15 | ## 🔧 Installation 16 | 17 | Follow these steps to configure the Auto-GPT SceneX Plugin: 18 | 19 | ### 1. Follow Auto-GPT-Plugins Installation Instructions 20 | 21 | Follow the instructions as per the [Auto-GPT-Plugins/README.md](https://github.com/Significant-Gravitas/Auto-GPT-Plugins/blob/master/README.md) 22 | 23 | ### 2. Locate the `.env.template` file 24 | 25 | Find the file named `.env.template` in the main `/Auto-GPT` folder. 26 | 27 | ### 3. Create and rename a copy of the file 28 | 29 | Duplicate the `.env.template` file and rename the copy to `.env` inside the `/Auto-GPT` folder. 30 | 31 | ### 4. Edit the `.env` file 32 | 33 | Open the `.env` file in a text editor. Note: Files starting with a dot might be hidden by your operating system. 34 | 35 | ### 5. Add API configuration settings 36 | 37 | Append the following configuration settings to the end of the file: 38 | 39 | ```ini 40 | ################################################################################ 41 | ### SCENEX API 42 | ################################################################################ 43 | 44 | SCENEX_API_KEY= 45 | ``` 46 | 47 | - `SCENEX_API_KEY`: Your API key for the SceneXplain API. You can obtain a key by following the steps below. 48 | - Sign up for a free account at [SceneXplain](https://scenex.jina.ai/). 49 | - Navigate to the [API Access](https://scenex.jina.ai/api) page and create a new API key. 50 | 51 | ### 6. Allowlist Plugin 52 | 53 | In your `.env` search for `ALLOWLISTED_PLUGINS` and add this Plugin: 54 | 55 | ```ini 56 | ################################################################################ 57 | ### ALLOWLISTED PLUGINS 58 | ################################################################################ 59 | 60 | #ALLOWLISTED_PLUGINS - Sets the listed plugins that are allowed (Example: plugin1,plugin2,plugin3) 61 | ALLOWLISTED_PLUGINS=AutoGPTSceneXPlugin 62 | ``` 63 | 64 | ## 🧪 Test the Auto-GPT SceneX Plugin 65 | 66 | Experience the plugin's capabilities by testing it for describing an image. 67 | 68 | 1. **Configure Auto-GPT:** 69 | Set up Auto-GPT with the following parameters: 70 | 71 | - Name: `ImageGPT` 72 | - Role: `Describe a given image` 73 | - Goals: 74 | 1. Goal 1: `Describe an image. Image URL is https://storage.googleapis.com/causal-diffusion.appspot.com/imagePrompts%2F0rw369i5h9t%2Foriginal.png.` 75 | 2. Goal 2: `Terminate` 76 | 77 | 2. **Run Auto-GPT:** 78 | Launch Auto-GPT, which should use the SceneXplain plugin to describe an image. 79 | -------------------------------------------------------------------------------- /src/autogpt_plugins/scenex/__init__.py: -------------------------------------------------------------------------------- 1 | """This is a SceneX plugin for describing images for Auto-GPT.""" 2 | import os 3 | from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar 4 | 5 | from auto_gpt_plugin_template import AutoGPTPluginTemplate 6 | from colorama import Fore 7 | 8 | from .scenex_plugin import SceneXplain 9 | 10 | PromptGenerator = TypeVar("PromptGenerator") 11 | 12 | 13 | class Message(TypedDict): 14 | role: str 15 | content: str 16 | 17 | 18 | class AutoGPTSceneXPlugin(AutoGPTPluginTemplate): 19 | """ 20 | This is the Auto-GPT SceneX plugin. 21 | """ 22 | 23 | def __init__(self): 24 | super().__init__() 25 | self._name = "ImageExplainer" 26 | self._version = "0.0.1" 27 | self._description = ( 28 | "An Image Captioning Tool: Use this tool to generate a detailed caption for an image. " 29 | "The input can be an image file of any format, and " 30 | "the output will be a text description that covers every detail of the image." 31 | ) 32 | self._api_key = os.getenv("SCENEX_API_KEY") 33 | self.scenexplain = SceneXplain(self._api_key) 34 | 35 | def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator: 36 | if self._api_key: 37 | prompt.add_command( 38 | self._description, 39 | "describe_image", 40 | { 41 | "image": "", 42 | }, 43 | self.scenexplain.describe_image, 44 | ) 45 | else: 46 | print( 47 | Fore.RED 48 | + f"{self._name} - {self._version} - SceneX plugin not loaded, because SCENEX_API_KEY was not set in env." 49 | ) 50 | 51 | return prompt 52 | 53 | def can_handle_post_prompt(self) -> bool: 54 | """This method is called to check that the plugin can 55 | handle the post_prompt method. 56 | 57 | Returns: 58 | bool: True if the plugin can handle the post_prompt method.""" 59 | return True 60 | 61 | def can_handle_on_response(self) -> bool: 62 | """This method is called to check that the plugin can 63 | handle the on_response method. 64 | 65 | Returns: 66 | bool: True if the plugin can handle the on_response method.""" 67 | return False 68 | 69 | def on_response(self, response: str, *args, **kwargs) -> str: 70 | """This method is called when a response is received from the model.""" 71 | pass 72 | 73 | def can_handle_on_planning(self) -> bool: 74 | """This method is called to check that the plugin can 75 | handle the on_planning method. 76 | 77 | Returns: 78 | bool: True if the plugin can handle the on_planning method.""" 79 | return False 80 | 81 | def on_planning( 82 | self, prompt: PromptGenerator, messages: List[Message] 83 | ) -> Optional[str]: 84 | """This method is called before the planning chat completion is done. 85 | 86 | Args: 87 | prompt (PromptGenerator): The prompt generator. 88 | messages (List[str]): The list of messages. 89 | """ 90 | pass 91 | 92 | def can_handle_post_planning(self) -> bool: 93 | """This method is called to check that the plugin can 94 | handle the post_planning method. 95 | 96 | Returns: 97 | bool: True if the plugin can handle the post_planning method.""" 98 | return False 99 | 100 | def post_planning(self, response: str) -> str: 101 | """This method is called after the planning chat completion is done. 102 | 103 | Args: 104 | response (str): The response. 105 | 106 | Returns: 107 | str: The resulting response. 108 | """ 109 | pass 110 | 111 | def can_handle_pre_instruction(self) -> bool: 112 | """This method is called to check that the plugin can 113 | handle the pre_instruction method. 114 | 115 | Returns: 116 | bool: True if the plugin can handle the pre_instruction method.""" 117 | return False 118 | 119 | def pre_instruction(self, messages: List[Message]) -> List[Message]: 120 | """This method is called before the instruction chat is done. 121 | 122 | Args: 123 | messages (List[Message]): The list of context messages. 124 | 125 | Returns: 126 | List[Message]: The resulting list of messages. 127 | """ 128 | pass 129 | 130 | def can_handle_on_instruction(self) -> bool: 131 | """This method is called to check that the plugin can 132 | handle the on_instruction method. 133 | 134 | Returns: 135 | bool: True if the plugin can handle the on_instruction method.""" 136 | return False 137 | 138 | def on_instruction(self, messages: List[Message]) -> Optional[str]: 139 | """This method is called when the instruction chat is done. 140 | 141 | Args: 142 | messages (List[Message]): The list of context messages. 143 | 144 | Returns: 145 | Optional[str]: The resulting message. 146 | """ 147 | pass 148 | 149 | def can_handle_post_instruction(self) -> bool: 150 | """This method is called to check that the plugin can 151 | handle the post_instruction method. 152 | 153 | Returns: 154 | bool: True if the plugin can handle the post_instruction method.""" 155 | return False 156 | 157 | def post_instruction(self, response: str) -> str: 158 | """This method is called after the instruction chat is done. 159 | 160 | Args: 161 | response (str): The response. 162 | 163 | Returns: 164 | str: The resulting response. 165 | """ 166 | pass 167 | 168 | def can_handle_pre_command(self) -> bool: 169 | """This method is called to check that the plugin can 170 | handle the pre_command method. 171 | 172 | Returns: 173 | bool: True if the plugin can handle the pre_command method.""" 174 | return False 175 | 176 | def pre_command( 177 | self, command_name: str, arguments: Dict[str, Any] 178 | ) -> Tuple[str, Dict[str, Any]]: 179 | """This method is called before the command is executed. 180 | 181 | Args: 182 | command_name (str): The command name. 183 | arguments (Dict[str, Any]): The arguments. 184 | 185 | Returns: 186 | Tuple[str, Dict[str, Any]]: The command name and the arguments. 187 | """ 188 | pass 189 | 190 | def can_handle_post_command(self) -> bool: 191 | """This method is called to check that the plugin can 192 | handle the post_command method. 193 | 194 | Returns: 195 | bool: True if the plugin can handle the post_command method.""" 196 | return False 197 | 198 | def post_command(self, command_name: str, response: str) -> str: 199 | """This method is called after the command is executed. 200 | 201 | Args: 202 | command_name (str): The command name. 203 | response (str): The response. 204 | 205 | Returns: 206 | str: The resulting response. 207 | """ 208 | pass 209 | 210 | def can_handle_chat_completion( 211 | self, messages: Dict[Any, Any], model: str, temperature: float, max_tokens: int 212 | ) -> bool: 213 | """This method is called to check that the plugin can 214 | handle the chat_completion method. 215 | 216 | Args: 217 | messages (List[Message]): The messages. 218 | model (str): The model name. 219 | temperature (float): The temperature. 220 | max_tokens (int): The max tokens. 221 | 222 | Returns: 223 | bool: True if the plugin can handle the chat_completion method.""" 224 | return False 225 | 226 | def handle_chat_completion( 227 | self, messages: List[Message], model: str, temperature: float, max_tokens: int 228 | ) -> str: 229 | """This method is called when the chat completion is done. 230 | 231 | Args: 232 | messages (List[Message]): The messages. 233 | model (str): The model name. 234 | temperature (float): The temperature. 235 | max_tokens (int): The max tokens. 236 | 237 | Returns: 238 | str: The resulting response. 239 | """ 240 | pass 241 | 242 | def can_handle_text_embedding( 243 | self, text: str 244 | ) -> bool: 245 | return False 246 | 247 | def handle_text_embedding( 248 | self, text: str 249 | ) -> list: 250 | pass 251 | 252 | def can_handle_user_input(self, user_input: str) -> bool: 253 | return False 254 | 255 | def user_input(self, user_input: str) -> str: 256 | return user_input 257 | 258 | def can_handle_report(self) -> bool: 259 | return False 260 | 261 | def report(self, message: str) -> None: 262 | pass -------------------------------------------------------------------------------- /src/autogpt_plugins/scenex/scenex_plugin.py: -------------------------------------------------------------------------------- 1 | from typing import List, Union 2 | 3 | import requests 4 | 5 | Algorithm = Union["Aqua", "Bolt", "Comet", "Dune", "Ember", "Flash"] 6 | 7 | 8 | class SceneXplain: 9 | API_ENDPOINT = "https://us-central1-causal-diffusion.cloudfunctions.net/describe" 10 | 11 | def __init__(self, api_key): 12 | self._api_key = api_key 13 | 14 | def describe_image( 15 | self, 16 | image: str, 17 | algorithm: Algorithm = "Dune", 18 | features: List[str] = [], 19 | languages: List[str] = [], 20 | ) -> str: 21 | headers = { 22 | "x-api-key": f"token {self._api_key}", 23 | "content-type": "application/json", 24 | } 25 | 26 | payload = { 27 | "data": [ 28 | { 29 | "image": image, 30 | "algorithm": algorithm, 31 | "features": features, 32 | "languages": languages, 33 | } 34 | ] 35 | } 36 | 37 | response = requests.post(self.API_ENDPOINT, headers=headers, json=payload) 38 | result = response.json().get("result", []) 39 | img = result[0] if result else {} 40 | 41 | return {"image": image, "description": img.get("text", "")} 42 | -------------------------------------------------------------------------------- /src/autogpt_plugins/scenex/test_scenex_plugin.py: -------------------------------------------------------------------------------- 1 | from .scenex_plugin import SceneXplain 2 | 3 | MOCK_API_KEY = "secret" 4 | MOCK_IMAGE = "https://example.com/image.png" 5 | MOCK_DESCRIPTION = "example description" 6 | 7 | 8 | def test_describe_image(requests_mock): 9 | requests_mock.post( 10 | SceneXplain.API_ENDPOINT, 11 | json={ 12 | "result": [ 13 | { 14 | "image": MOCK_IMAGE, 15 | "text": MOCK_DESCRIPTION, 16 | } 17 | ] 18 | }, 19 | ) 20 | 21 | scenex = SceneXplain(MOCK_API_KEY) 22 | result = scenex.describe_image( 23 | image=MOCK_IMAGE, 24 | algorithm="Dune", 25 | features=[], 26 | languages=[], 27 | ) 28 | 29 | # Check the results 30 | assert result == { 31 | "image": MOCK_IMAGE, 32 | "description": MOCK_DESCRIPTION, 33 | } 34 | 35 | # Check that the mocked functions were called with the correct arguments 36 | requests_mock.request_history[0].json() == { 37 | "data": [ 38 | { 39 | "image": MOCK_IMAGE, 40 | "algorithm": "Dune", 41 | "features": [], 42 | "languages": [], 43 | } 44 | ] 45 | } 46 | -------------------------------------------------------------------------------- /src/autogpt_plugins/serpapi/README.md: -------------------------------------------------------------------------------- 1 | # Auto-GPT SerpApi Search Plugin 2 | 3 | This search plugin integrates [SerpApi](https://serpapi.com) into Auto-GPT, allowing users to choose a broader range of 4 | search engines supported by SerpApi, and get much more information than the default search engine in Auto-GPT. 5 | 6 | ## Key Features: 7 | - Perform search queries with engine of your choice supported by SerpApi, including Google, Bing, Baidu, Yahoo, DuckDuckGo, Yandex and so on. 8 | 9 | ## Installation 10 | 11 | - Follow the instructions as per the [Auto-GPT-Plugins/README.md](https://github.com/Significant-Gravitas/Auto-GPT-Plugins/blob/master/README.md) 12 | 13 | - Append the following configuration settings to the `.env` file within AutoGPT, see [Configuration](#configuration) for details: 14 | 15 | ```ini 16 | ################################################################################ 17 | ### SerpApi 18 | ################################################################################ 19 | 20 | SERPAPI_API_KEY= 21 | SERPAPI_ENGINE= 22 | SERPAPI_NO_CACHE= 23 | SERPAPI_RESULT_FILTER= 24 | ``` 25 | 26 | 27 | - In the `.env` file, search for `ALLOWLISTED_PLUGINS` and add this plugin: 28 | 29 | ```ini 30 | ################################################################################ 31 | ### ALLOWLISTED PLUGINS 32 | ################################################################################ 33 | 34 | #ALLOWLISTED_PLUGINS - Sets the listed plugins that are allowed (Example: plugin1,plugin2,plugin3) 35 | ALLOWLISTED_PLUGINS=AutoGPTSerpApiSearch 36 | ``` 37 | 38 | ## Configuration 39 | 40 | | Variable | Required | Description | 41 | | ---- | ---- | ---- | 42 | | SERPAPI_API_KEY | Yes | Your API key for the SerpApi. You can obtain a key by following the steps:
- Sign up for a free account at [SerpApi](https://serpapi.com).
- Navigate to the [Dashboard](https://serpapi.com/dashboard) page and find "Your Private API Key". | 43 | | SERPAPI_ENGINE | No | The engine you want to use for web searches performed by Auto-GPT.
- You can find valid engine values from [SerpApi Documentation](https://serpapi.com/search-api).
- Typical values are: `google`, `bing`, `baidu`, `yahoo`, `duckduckgo`, `yandex`, ...
- The default value is `google` if not set. | 44 | | SERPAPI_NO_CACHE | No | Set to `true` if you want to force SerpApi to fetch the results even if a cached version is already present. Defaulted to `false`. | 45 | | SERPAPI_RESULT_FILTER | No | SerpApi can return JSON results that is too large for Auto-GPT to process. This variable allows you to pick certain fields from the returned JSON to reduce the size. Defaulted to `organic_results(title,link,snippet)`. See [Result Filter](#result-filter) for details.| 46 | 47 | ### Result Filter 48 | This plugin supports filtering fields up to a depth of 2. The syntax of the filter is `(,,...),(,,...),...`, where `` is top level field, and `` is second level field. `` is optional. Set to `` to disable filtering. Here are some examples: 49 | - `` 50 | - Filter disabled. The whole JSON output will be the input of the current command. 51 | - `organic_results`: 52 | - Pick only `organic_results` from the top level fields of JSON output. 53 | - `organic_results, knowledge_graph`: 54 | - Pick only `organic_results` and `knowledge_graph` from the top level fields of JSON output. 55 | - `organic_results(title, link, snippet)`: 56 | - Pick only `organic_results` from the top level fields of JSON output. 57 | - Pick only `title`, `link` and `snippet` from `organic_results`. 58 | - If `organic_results` is an object, applies to itself. 59 | - If `organic_results` is an array, applies to all its containing objects. 60 | - Otherwise, the second level filter is ignored. 61 | - `organic_results(title,link,snippet), knowledge_graph(website, description)`: 62 | - Pick only `organic_results` and `knowledge_graph` from the top level fields of JSON output. 63 | - Pick only `title`, `link` and `snippet` from `organic_results`. 64 | - If `organic_results` is an object, applies to itself. 65 | - If `organic_results` is an array, applies to all its containing objects. 66 | - Otherwise, the second level filter is ignored. 67 | - Pick only `website`, and `description` from `knowledge_graph`. 68 | - If `knowledge_graph` is an object, applies to itself. 69 | - If `knowledge_graph` is an array, applies to all its containing objects. 70 | - Otherwise, the second level filter is ignored. 71 | 72 | ### Filter Tuning 73 | Sometimes too much input can make Auto-GPT confused, failing to extract the correct information. Other than [organic_results](https://serpapi.com/organic-results), SerpApi extracts more fields such as [answer_box](https://serpapi.com/direct-answer-box-api), [knowledge_graph](https://serpapi.com/knowledge-graph) and [related_questions](https://serpapi.com/related-questions), which are more straightforward and easier to make sense of, but not always present. You can always check if those exist through the [Dashboard](https://serpapi.com/searches) and add/remove fields to the filter according to your needs. 74 | 75 | ### Example 76 | Here's an example to let Auto-GPT search on Google and get information from "Answer Box" and "Knowledge Graph" 77 | 78 | ```ini 79 | SERPAPI_API_KEY=your_api_key 80 | SERPAPI_ENGINE=google 81 | SERPAPI_RESULT_FILTER=answer_box,knowledge_graph 82 | ``` 83 | 84 | ## How it works 85 | When `SERPAPI_API_KEY` is set. The plugin will add a new command `serpapi_search` to Auto-GPT. The `google` command will be intercepted to use `serpapi_search` instead. Auto-GPT can also use `serpapi_search` command directly. Therefore, all web searches performed by Auto-GPT are routed to SerpApi. -------------------------------------------------------------------------------- /src/autogpt_plugins/serpapi/__init__.py: -------------------------------------------------------------------------------- 1 | """This is the SerpApi search engines plugin for Auto-GPT.""" 2 | import os 3 | from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar 4 | 5 | from auto_gpt_plugin_template import AutoGPTPluginTemplate 6 | 7 | from .serpapi_search import serpapi_search 8 | 9 | PromptGenerator = TypeVar("PromptGenerator") 10 | 11 | 12 | class Message(TypedDict): 13 | role: str 14 | content: str 15 | 16 | 17 | class AutoGPTSerpApiSearch(AutoGPTPluginTemplate): 18 | def __init__(self): 19 | super().__init__() 20 | self._name = "SerpApi-Search-Plugin" 21 | self._version = "0.1.0" 22 | self._description = ( 23 | "This plugin performs SerpApi searches using the provided query." 24 | ) 25 | self.load_commands = ( 26 | os.getenv("SERPAPI_API_KEY") 27 | ) 28 | 29 | def can_handle_post_prompt(self) -> bool: 30 | return True 31 | 32 | def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator: 33 | if self.load_commands: 34 | # Add SerpApi Search command 35 | prompt.add_command( 36 | "SerpApi Search", 37 | "serpapi_search", 38 | {"query": ""}, 39 | serpapi_search, 40 | ) 41 | else: 42 | print( 43 | "Warning: SerpApi-Search-Plugin is not fully functional. " 44 | "Please set the SERPAPI_API_KEY environment variable." 45 | ) 46 | return prompt 47 | 48 | def can_handle_pre_command(self) -> bool: 49 | return True 50 | 51 | def pre_command( 52 | self, command_name: str, arguments: Dict[str, Any] 53 | ) -> Tuple[str, Dict[str, Any]]: 54 | if command_name == "google" and self.load_commands: 55 | return "serpapi_search", {"query": arguments["query"]} 56 | else: 57 | return command_name, arguments 58 | 59 | def can_handle_post_command(self) -> bool: 60 | return False 61 | 62 | def post_command(self, command_name: str, response: str) -> str: 63 | pass 64 | 65 | def can_handle_on_planning(self) -> bool: 66 | return False 67 | 68 | def on_planning( 69 | self, prompt: PromptGenerator, messages: List[Message] 70 | ) -> Optional[str]: 71 | pass 72 | 73 | def can_handle_on_response(self) -> bool: 74 | return False 75 | 76 | def on_response(self, response: str, *args, **kwargs) -> str: 77 | pass 78 | 79 | def can_handle_post_planning(self) -> bool: 80 | return False 81 | 82 | def post_planning(self, response: str) -> str: 83 | pass 84 | 85 | def can_handle_pre_instruction(self) -> bool: 86 | return False 87 | 88 | def pre_instruction(self, messages: List[Message]) -> List[Message]: 89 | pass 90 | 91 | def can_handle_on_instruction(self) -> bool: 92 | return False 93 | 94 | def on_instruction(self, messages: List[Message]) -> Optional[str]: 95 | pass 96 | 97 | def can_handle_post_instruction(self) -> bool: 98 | return False 99 | 100 | def post_instruction(self, response: str) -> str: 101 | pass 102 | 103 | def can_handle_chat_completion( 104 | self, messages: Dict[Any, Any], model: str, temperature: float, max_tokens: int 105 | ) -> bool: 106 | return False 107 | 108 | def handle_chat_completion( 109 | self, messages: List[Message], model: str, temperature: float, max_tokens: int 110 | ) -> str: 111 | pass 112 | 113 | def can_handle_text_embedding( 114 | self, text: str 115 | ) -> bool: 116 | return False 117 | 118 | def handle_text_embedding( 119 | self, text: str 120 | ) -> list: 121 | pass 122 | 123 | def can_handle_user_input(self, user_input: str) -> bool: 124 | return False 125 | 126 | def user_input(self, user_input: str) -> str: 127 | return user_input 128 | 129 | def can_handle_report(self) -> bool: 130 | return False 131 | 132 | def report(self, message: str) -> None: 133 | pass 134 | -------------------------------------------------------------------------------- /src/autogpt_plugins/serpapi/serpapi_search.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | import requests 4 | 5 | _engine_query_key = { 6 | "ebay": "_nkw", 7 | "google_maps_reviews": "data_id", 8 | "google_product": "product_id", 9 | "google_lens": "url", 10 | "google_immersive_product": "page_token", 11 | "google_scholar_author": "author_id", 12 | "google_scholar_profiles": "mauthors", 13 | "google_related_questions": "next_page_token", 14 | "google_finance_markets": "trend", 15 | "google_health_insurance": "provider_id", 16 | "home_depot_product": "product_id", 17 | "walmart": "query", 18 | "walmart_product": "product_id", 19 | "walmart_product_reviews": "product_id", 20 | "yahoo": "p", 21 | "yahoo_images": "p", 22 | "yahoo_videos": "p", 23 | "yandex": "text", 24 | "yandex_images": "text", 25 | "yandex_videos": "text", 26 | "youtube": "search_query", 27 | "google_play_product": "product_id", 28 | "yahoo_shopping": "p", 29 | "apple_app_store": "term", 30 | "apple_reviews": "product_id", 31 | "apple_product": "product_id", 32 | "naver": "query", 33 | "yelp": "find_desc", 34 | "yelp_reviews": "place_id", 35 | } 36 | 37 | 38 | def _filter_dict(obj, filter): 39 | if not isinstance(obj, dict): 40 | return obj 41 | 42 | return dict([(k, v) for k, v in obj.items() if k in filter]) 43 | 44 | 45 | def _filter_results(json, filterstr): 46 | if not filterstr or filterstr == "": 47 | return json 48 | 49 | filter = {} 50 | matches = re.findall(r"(\w+)(?:\((.*?)\))*", filterstr) 51 | for match in matches: 52 | first_level = match[0] 53 | second_levels = [x.strip() for x in match[1].split(",") if x.strip() != ""] 54 | filter[first_level] = second_levels 55 | 56 | filtered_json = _filter_dict(json, list(filter.keys())) 57 | for k, v in filtered_json.items(): 58 | inner_filter = filter[k] 59 | if len(inner_filter) > 0: 60 | if isinstance(v, list): 61 | filtered_json[k] = [ 62 | _filter_dict(x, inner_filter) for x in filtered_json[k] 63 | ] 64 | elif isinstance(v, dict): 65 | filtered_json[k] = _filter_dict(filtered_json[k], inner_filter) 66 | 67 | if len(filtered_json) == 1: 68 | return filtered_json[list(filtered_json.keys())[0]] 69 | return filtered_json 70 | 71 | 72 | def _get_params(query: str): 73 | engine = os.getenv("SERPAPI_ENGINE") or "google" 74 | no_cache = os.getenv("SERPAPI_NO_CACHE") 75 | api_key = os.getenv("SERPAPI_API_KEY") 76 | params = { 77 | "engine": engine, 78 | "api_key": api_key, 79 | "source": "serpapi-auto-gpt-plugin-1st", 80 | } 81 | 82 | if no_cache and no_cache != "false": 83 | params["no_cache"] = "true" 84 | 85 | query_key = _engine_query_key[engine] if engine in _engine_query_key else "q" 86 | params[query_key] = query 87 | 88 | return params 89 | 90 | 91 | def serpapi_search(query: str): 92 | """ 93 | Perform a SerpApi search and return the JSON results. 94 | """ 95 | 96 | response = requests.get("https://serpapi.com/search", params=_get_params(query)) 97 | response.raise_for_status() 98 | 99 | result_json = response.json() 100 | 101 | filter = os.getenv("SERPAPI_RESULT_FILTER") or "organic_results(title,link,snippet)" 102 | return _filter_results(result_json, filter) 103 | -------------------------------------------------------------------------------- /src/autogpt_plugins/serpapi/test_serpapi_plugin.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | from typing import List 4 | 5 | import requests 6 | import requests_mock 7 | 8 | from . import AutoGPTSerpApiSearch 9 | from .serpapi_search import serpapi_search, _filter_results, _get_params 10 | 11 | 12 | class TestAutoGPTSerpApiSearch(unittest.TestCase): 13 | json_data = { 14 | "field1": 1, 15 | "field2": [1, 2, "3"], 16 | "field3": {"a": 1, "b": 2, "c": "3"}, 17 | "field4": [ 18 | {"a": 1, "b": 2, "c": "3"}, 19 | {"a": 4, "b": 5, "c": "6"}, 20 | 7, 21 | False, 22 | {"a": 8, "b": "9", "c": 10}, 23 | ], 24 | "organic_results": [ 25 | { 26 | "title": "title 1", 27 | "link": "https://example1.com", 28 | "snippet": "snippet 1", 29 | "thumbnail": "https://path-to-thumbnail-1", 30 | }, 31 | { 32 | "title": "title 2", 33 | "link": "https://example2.com", 34 | "snippet": "snippet 2", 35 | "thumbnail": "https://path-to-thumbnail-2", 36 | }, 37 | ], 38 | } 39 | 40 | def setUp(self): 41 | os.environ["SERPAPI_API_KEY"] = "secret" 42 | self.plugin = AutoGPTSerpApiSearch() 43 | 44 | def tearDown(self): 45 | os.environ.pop("SERPAPI_API_KEY", None) 46 | os.environ.pop("SERPAPI_ENGINE", None) 47 | os.environ.pop("SERPAPI_NO_CACHE", None) 48 | os.environ.pop("SERPAPI_RESULT_FILTER", None) 49 | 50 | def test_pre_command(self): 51 | os.environ["SERPAPI_API_KEY"] = "secret" 52 | self.plugin = AutoGPTSerpApiSearch() 53 | 54 | command_name, arguments = self.plugin.pre_command( 55 | "google", {"query": "test query"} 56 | ) 57 | self.assertEqual(command_name, "serpapi_search") 58 | self.assertEqual(arguments, {"query": "test query"}) 59 | 60 | def test_can_handle_pre_command(self): 61 | self.assertTrue(self.plugin.can_handle_pre_command()) 62 | 63 | def test_can_handle_post_prompt(self): 64 | self.assertTrue(self.plugin.can_handle_post_prompt()) 65 | 66 | def test_filter_results_none(self): 67 | filtered = _filter_results(self.json_data, "") 68 | self.assertDictEqual(filtered, self.json_data) 69 | 70 | def test_filter_results_top_level(self): 71 | filtered = _filter_results(self.json_data, "field1, field3") 72 | self.assertDictEqual( 73 | filtered, {"field1": 1, "field3": {"a": 1, "b": 2, "c": "3"}} 74 | ) 75 | 76 | def test_filter_results_top_level_one_field(self): 77 | filtered = _filter_results(self.json_data, "field3") 78 | self.assertDictEqual(filtered, {"a": 1, "b": 2, "c": "3"}) 79 | 80 | def test_filter_results_top_level_second_level_dict(self): 81 | filtered = _filter_results(self.json_data, "field1(a, b), field3(b, c)") 82 | self.assertDictEqual(filtered, {"field1": 1, "field3": {"b": 2, "c": "3"}}) 83 | 84 | def test_filter_results_top_level_second_level_list(self): 85 | filtered = _filter_results(self.json_data, "field1(a, b), field4(b, c)") 86 | self.assertDictEqual( 87 | filtered, 88 | { 89 | "field1": 1, 90 | "field4": [ 91 | {"b": 2, "c": "3"}, 92 | {"b": 5, "c": "6"}, 93 | 7, 94 | False, 95 | {"b": "9", "c": 10}, 96 | ], 97 | }, 98 | ) 99 | 100 | def test_get_params_no_engine(self): 101 | params = _get_params("test query") 102 | self.assertDictEqual( 103 | params, 104 | { 105 | "engine": "google", 106 | "q": "test query", 107 | "api_key": "secret", 108 | "source": "serpapi-auto-gpt-plugin-1st", 109 | }, 110 | ) 111 | 112 | def test_get_params_engine_query_non_q(self): 113 | os.environ["SERPAPI_ENGINE"] = "yahoo" 114 | params = _get_params("test query") 115 | self.assertDictEqual( 116 | params, 117 | { 118 | "engine": "yahoo", 119 | "p": "test query", 120 | "api_key": "secret", 121 | "source": "serpapi-auto-gpt-plugin-1st", 122 | }, 123 | ) 124 | 125 | def test_get_params_no_cache_true(self): 126 | os.environ["SERPAPI_NO_CACHE"] = "true" 127 | params = _get_params("test query") 128 | self.assertDictEqual( 129 | params, 130 | { 131 | "engine": "google", 132 | "q": "test query", 133 | "no_cache": "true", 134 | "api_key": "secret", 135 | "source": "serpapi-auto-gpt-plugin-1st", 136 | }, 137 | ) 138 | 139 | def test_get_params_no_cache_false(self): 140 | os.environ["SERPAPI_NO_CACHE"] = "false" 141 | params = _get_params("test query") 142 | self.assertDictEqual( 143 | params, 144 | { 145 | "engine": "google", 146 | "q": "test query", 147 | "api_key": "secret", 148 | "source": "serpapi-auto-gpt-plugin-1st", 149 | }, 150 | ) 151 | 152 | @requests_mock.Mocker() 153 | def test_serpapi_search_default(self, m): 154 | m.get("https://serpapi.com/search", json=self.json_data) 155 | json = serpapi_search("test query") 156 | self.assertListEqual( 157 | json, 158 | [ 159 | { 160 | "title": "title 1", 161 | "link": "https://example1.com", 162 | "snippet": "snippet 1", 163 | }, 164 | { 165 | "title": "title 2", 166 | "link": "https://example2.com", 167 | "snippet": "snippet 2", 168 | }, 169 | ], 170 | ) 171 | 172 | @requests_mock.Mocker() 173 | def test_serpapi_search_custom_filter(self, m): 174 | os.environ["SERPAPI_RESULT_FILTER"] = "field1(a, b), field4(b, c)" 175 | m.get("https://serpapi.com/search", json=self.json_data) 176 | json = serpapi_search("test query") 177 | self.assertDictEqual( 178 | json, 179 | { 180 | "field1": 1, 181 | "field4": [ 182 | {"b": 2, "c": "3"}, 183 | {"b": 5, "c": "6"}, 184 | 7, 185 | False, 186 | {"b": "9", "c": 10}, 187 | ], 188 | }, 189 | ) 190 | 191 | 192 | if __name__ == "__main__": 193 | unittest.main() 194 | -------------------------------------------------------------------------------- /src/autogpt_plugins/telegram/README.md: -------------------------------------------------------------------------------- 1 | ## Disclaimer! 2 | As many people keep creating issues: 3 | do not run "pip install telegram" 4 | it is not meantioned anywhere! 5 | 6 | ## Telegram Plugin for Auto-GPT 7 | 8 | A smoothly working Telegram bot that gives you all the messages you would normally get through the Terminal. 9 | Making Auto-GPT a more user-friendly application to interact with. 10 | 11 | 12 | ## SETUP 13 | First setup a telegram bot by following the instructions here: https://core.telegram.org/bots#6-botfather 14 | 15 | To get the chat_id just start auto-gpt and follow the instructions in the terminal. 16 | 17 | Then set the following variables in your .env: 18 | ``` 19 | TELEGRAM_API_KEY=your-telegram-bot-token 20 | TELEGRAM_CHAT_ID=your-telegram-bot-chat-id 21 | 22 | ALLOWLISTED_PLUGINS=AutoGPTTelegram 23 | CHAT_MESSAGES_ENABLED=True 24 | 25 | ```` 26 | within your .env file. 27 | Also keep in mind to use the official documentation on how to use plugins. 28 | 29 | 30 | 31 | 32 | # Running Auto-GPT with this plugin 33 | 34 | To run this plugin, zip this repo and put it under Auto-GPT/plugins/ 35 | To run it, add the following to your start command: 36 | ``` 37 | For non docker: 38 | python -m autogpt --install-plugin-deps 39 | 40 | For Docker: 41 | docker-compose run --rm auto-gpt --install-plugin-deps 42 | ``` 43 | 44 | # Auto-GPT-Plugins 45 | 46 | Plugins for Auto-GPT 47 | 48 | Clone this repo into the plugins direcory of [Auto-GPT](https://github.dev/Significant-Gravitas/Auto-GPT) 49 | 50 | For interactionless use, set `ALLOWLISTED_PLUGINS=example-plugin1,example-plugin2,example-plugin3` in your `.env` 51 | 52 | | Plugin | Description | 53 | |----------|---------------------------------------------------------------------------------------------------------------------| 54 | | Telegram | AutoGPT is capable of asking/prompting the user via a Telegram Chat bot and also responds to commands and messages. | 55 | 56 | -------------------------------------------------------------------------------- /src/autogpt_plugins/telegram/__init__.py: -------------------------------------------------------------------------------- 1 | """Telegram controller bot integration using python-telegram-bot.""" 2 | import os 3 | import re 4 | from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar 5 | 6 | from auto_gpt_plugin_template import AutoGPTPluginTemplate 7 | 8 | from .telegram_chat import TelegramUtils 9 | 10 | PromptGenerator = TypeVar("PromptGenerator") 11 | 12 | 13 | class Message(TypedDict): 14 | role: str 15 | content: str 16 | 17 | 18 | def remove_color_codes(s: str) -> str: 19 | ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])") 20 | return ansi_escape.sub("", s) 21 | 22 | 23 | class AutoGPTTelegram(AutoGPTPluginTemplate): 24 | """ 25 | Telegram controller bot integration using python-telegram-bot. 26 | """ 27 | 28 | def __init__(self): 29 | super().__init__() 30 | self._name = "Auto-GPT-Telegram" 31 | self._version = "0.2.0" 32 | self._description = ( 33 | "This integrates a Telegram chat bot with your autogpt instance." 34 | ) 35 | self.telegram_api_key = os.getenv("TELEGRAM_API_KEY", None) 36 | self.telegram_chat_id = os.getenv("TELEGRAM_CHAT_ID", None) 37 | self.telegram_utils = TelegramUtils( 38 | chat_id=self.telegram_chat_id, api_key=self.telegram_api_key 39 | ) 40 | 41 | def can_handle_on_response(self) -> bool: 42 | """This method is called to check that the plugin can 43 | handle the on_response method. 44 | 45 | Returns: 46 | bool: True if the plugin can handle the on_response method.""" 47 | return False 48 | 49 | def on_response(self, response: str, *args, **kwargs) -> str: 50 | """This method is called when a response is received from the model.""" 51 | pass 52 | 53 | def can_handle_post_prompt(self) -> bool: 54 | """This method is called to check that the plugin can 55 | handle the post_prompt method. 56 | 57 | Returns: 58 | bool: True if the plugin can handle the post_prompt method.""" 59 | return False 60 | 61 | def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator: 62 | """This method is called just after the generate_prompt is called, 63 | but actually before the prompt is generated. 64 | 65 | Args: 66 | prompt (PromptGenerator): The prompt generator. 67 | 68 | Returns: 69 | PromptGenerator: The prompt generator. 70 | """ 71 | pass 72 | 73 | def can_handle_on_planning(self) -> bool: 74 | """This method is called to check that the plugin can 75 | handle the on_planning method. 76 | 77 | Returns: 78 | bool: True if the plugin can handle the on_planning method.""" 79 | return False 80 | 81 | def on_planning( 82 | self, prompt: PromptGenerator, messages: List[Message] 83 | ) -> Optional[str]: 84 | """This method is called before the planning chat completion is done. 85 | 86 | Args: 87 | prompt (PromptGenerator): The prompt generator. 88 | messages (List[str]): The list of messages. 89 | """ 90 | pass 91 | 92 | def can_handle_post_planning(self) -> bool: 93 | """This method is called to check that the plugin can 94 | handle the post_planning method. 95 | 96 | Returns: 97 | bool: True if the plugin can handle the post_planning method.""" 98 | return False 99 | 100 | def post_planning(self, response: str) -> str: 101 | """This method is called after the planning chat completion is done. 102 | 103 | Args: 104 | response (str): The response. 105 | 106 | Returns: 107 | str: The resulting response. 108 | """ 109 | pass 110 | 111 | def can_handle_pre_instruction(self) -> bool: 112 | """This method is called to check that the plugin can 113 | handle the pre_instruction method. 114 | 115 | Returns: 116 | bool: True if the plugin can handle the pre_instruction method.""" 117 | return False 118 | 119 | def pre_instruction(self, messages: List[Message]) -> List[Message]: 120 | """This method is called before the instruction chat is done. 121 | 122 | Args: 123 | messages (List[Message]): The list of context messages. 124 | 125 | Returns: 126 | List[Message]: The resulting list of messages. 127 | """ 128 | pass 129 | 130 | def can_handle_on_instruction(self) -> bool: 131 | """This method is called to check that the plugin can 132 | handle the on_instruction method. 133 | 134 | Returns: 135 | bool: True if the plugin can handle the on_instruction method.""" 136 | return False 137 | 138 | def on_instruction(self, messages: List[Message]) -> Optional[str]: 139 | """This method is called when the instruction chat is done. 140 | 141 | Args: 142 | messages (List[Message]): The list of context messages. 143 | 144 | Returns: 145 | Optional[str]: The resulting message. 146 | """ 147 | pass 148 | 149 | def can_handle_post_instruction(self) -> bool: 150 | """This method is called to check that the plugin can 151 | handle the post_instruction method. 152 | 153 | Returns: 154 | bool: True if the plugin can handle the post_instruction method.""" 155 | return False 156 | 157 | def post_instruction(self, response: str) -> str: 158 | """This method is called after the instruction chat is done. 159 | 160 | Args: 161 | response (str): The response. 162 | 163 | Returns: 164 | str: The resulting response. 165 | """ 166 | pass 167 | 168 | def can_handle_pre_command(self) -> bool: 169 | """This method is called to check that the plugin can 170 | handle the pre_command method. 171 | 172 | Returns: 173 | bool: True if the plugin can handle the pre_command method.""" 174 | return False 175 | 176 | def pre_command( 177 | self, command_name: str, arguments: Dict[str, Any] 178 | ) -> Tuple[str, Dict[str, Any]]: 179 | """This method is called before the command is executed. 180 | 181 | Args: 182 | command_name (str): The command name. 183 | arguments (Dict[str, Any]): The arguments. 184 | 185 | Returns: 186 | Tuple[str, Dict[str, Any]]: The command name and the arguments. 187 | """ 188 | pass 189 | 190 | def can_handle_post_command(self) -> bool: 191 | """This method is called to check that the plugin can 192 | handle the post_command method. 193 | 194 | Returns: 195 | bool: True if the plugin can handle the post_command method.""" 196 | return False 197 | 198 | def post_command(self, command_name: str, response: str) -> str: 199 | """This method is called after the command is executed. 200 | 201 | Args: 202 | command_name (str): The command name. 203 | response (str): The response. 204 | 205 | Returns: 206 | str: The resulting response. 207 | """ 208 | pass 209 | 210 | def can_handle_chat_completion( 211 | self, messages: Dict[Any, Any], model: str, temperature: float, max_tokens: int 212 | ) -> bool: 213 | """This method is called to check that the plugin can 214 | handle the chat_completion method. 215 | 216 | Args: 217 | messages (List[Message]): The messages. 218 | model (str): The model name. 219 | temperature (float): The temperature. 220 | max_tokens (int): The max tokens. 221 | 222 | Returns: 223 | bool: True if the plugin can handle the chat_completion method.""" 224 | return False 225 | 226 | def handle_chat_completion( 227 | self, messages: List[Message], model: str, temperature: float, max_tokens: int 228 | ) -> str: 229 | """This method is called when the chat completion is done. 230 | 231 | Args: 232 | messages (List[Message]): The messages. 233 | model (str): The model name. 234 | temperature (float): The temperature. 235 | max_tokens (int): The max tokens. 236 | 237 | Returns: 238 | str: The resulting response. 239 | """ 240 | pass 241 | 242 | def can_handle_text_embedding(self, text: str) -> bool: 243 | return False 244 | 245 | def handle_text_embedding(self, text: str) -> list: 246 | pass 247 | 248 | def can_handle_user_input(self, user_input: str) -> bool: 249 | return True 250 | 251 | def user_input(self, user_input: str) -> str: 252 | user_input = remove_color_codes(user_input) 253 | # if the user_input is too long, shorten it 254 | try: 255 | return self.telegram_utils.ask_user(prompt=user_input) 256 | except Exception as e: 257 | print(e) 258 | print("Error sending message to telegram") 259 | return "s" 260 | 261 | def can_handle_report(self) -> bool: 262 | """This method is called to check that the plugin can 263 | handle the report method. 264 | 265 | Returns: 266 | bool: True if the plugin can handle the report method.""" 267 | return True 268 | 269 | def report(self, message: str) -> None: 270 | message = remove_color_codes(message) 271 | # if the message is too long, shorten it 272 | try : 273 | self.telegram_utils.send_message(message=message) 274 | except Exception as e: 275 | print(e) 276 | print("Error sending message to telegram") 277 | 278 | 279 | def can_handle_text_embedding(self, text: str) -> bool: 280 | return False 281 | 282 | def handle_text_embedding(self, text: str) -> list: 283 | pass 284 | -------------------------------------------------------------------------------- /src/autogpt_plugins/twitter/README.md: -------------------------------------------------------------------------------- 1 | # desojo/autogpt-twitter 🐣 2 | 3 | A plugin adding twitter API integration into Auto GPT 4 | 5 | ## Features(more coming soon!) 6 | 7 | - Post a tweet using the `post_tweet(tweet)` command 8 | - Post a reply to a specific tweet using the `post_reply(tweet, tweet_id)` command 9 | - Get recent mentions using the `get_mentions()` command 10 | - Search a user's recent tweets via username using the `search_twitter_user(targetUser, numOfItems)' command 11 | 12 | ## Installation 13 | 14 | 1. Clone this repo as instructed in the main repository 15 | 2. Add this chunk of code along with your twitter API information to the `.env` file within AutoGPT: 16 | 17 | ``` 18 | ################################################################################ 19 | ### TWITTER API 20 | ################################################################################ 21 | 22 | # Consumer Keys are also known as API keys on the dev portal 23 | 24 | TW_CONSUMER_KEY= 25 | TW_CONSUMER_SECRET= 26 | TW_ACCESS_TOKEN= 27 | TW_ACCESS_TOKEN_SECRET= 28 | TW_CLIENT_ID= 29 | TW_CLIENT_ID_SECRET= 30 | 31 | ################################################################################ 32 | ### ALLOWLISTED PLUGINS 33 | ################################################################################ 34 | 35 | ALLOWLISTED_PLUGINS=AutoGPTTwitter 36 | 37 | ``` 38 | 39 | ## Twitter API Setup for v1.1 access(soon to be deprecated 😭) 40 | 41 | 1. Go to the [Twitter Dev Portal](https://developer.twitter.com/en/portal/dashboard) 42 | 2. Delete any apps/projects that it creates for you 43 | 3. Create a new project with whatever name you want 44 | 4. Create a new app under said project with whatever name you want 45 | 5. Under the app, edit user authentication settings and give it read/write perms. 46 | 6. Grab the keys listed in installation instructions and save them for later 47 | -------------------------------------------------------------------------------- /src/autogpt_plugins/twitter/twitter.py: -------------------------------------------------------------------------------- 1 | """This module contains functions for interacting with the Twitter API.""" 2 | from __future__ import annotations 3 | 4 | import pandas as pd 5 | import tweepy 6 | 7 | from . import AutoGPTTwitter 8 | 9 | plugin = AutoGPTTwitter() 10 | 11 | 12 | def post_tweet(tweet_text: str) -> str: 13 | """Posts a tweet to twitter. 14 | 15 | Args: 16 | tweet (str): The tweet to post. 17 | 18 | Returns: 19 | str: The tweet that was posted. 20 | """ 21 | 22 | _tweetID = plugin.api.update_status(status=tweet_text) 23 | 24 | return f"Success! Tweet: {_tweetID.text}" 25 | 26 | 27 | def post_reply(tweet_text: str, tweet_id: int) -> str: 28 | """Posts a reply to a tweet. 29 | 30 | Args: 31 | tweet (str): The tweet to post. 32 | tweet_id (int): The ID of the tweet to reply to. 33 | 34 | Returns: 35 | str: The tweet that was posted. 36 | """ 37 | 38 | replyID = plugin.api.update_status( 39 | status=tweet_text, 40 | in_reply_to_status_id=tweet_id, 41 | auto_populate_reply_metadata=True, 42 | ) 43 | 44 | return f"Success! Tweet: {replyID.text}" 45 | 46 | 47 | def get_mentions() -> str | None: 48 | """Gets the most recent mention. 49 | 50 | Args: 51 | api (tweepy.API): The tweepy API object. 52 | 53 | Returns: 54 | str | None: The most recent mention. 55 | """ 56 | 57 | _tweets = plugin.api.mentions_timeline(tweet_mode="extended") 58 | 59 | for tweet in _tweets: 60 | return ( 61 | f"@{tweet.user.screen_name} Replied: {tweet.full_text}" 62 | f" Tweet ID: {tweet.id}" 63 | ) # Returns most recent mention 64 | 65 | 66 | def search_twitter_user(target_user: str, number_of_tweets: int) -> str: 67 | """Searches a user's tweets given a number of items to retrive and 68 | returns a dataframe. 69 | 70 | Args: 71 | target_user (str): The user to search. 72 | num_of_items (int): The number of items to retrieve. 73 | api (tweepy.API): The tweepy API object. 74 | 75 | Returns: 76 | str: The dataframe containing the tweets. 77 | """ 78 | 79 | tweets = tweepy.Cursor( 80 | plugin.api.user_timeline, screen_name=target_user, tweet_mode="extended" 81 | ).items(number_of_tweets) 82 | 83 | columns = ["Time", "User", "ID", "Tweet"] 84 | data = [] 85 | 86 | for tweet in tweets: 87 | data.append( 88 | [tweet.created_at, tweet.user.screen_name, tweet.id, tweet.full_text] 89 | ) 90 | 91 | df = str(pd.DataFrame(data, columns=columns)) 92 | 93 | print(df) 94 | 95 | return df # Prints a dataframe object containing the Time, User, ID, and Tweet 96 | -------------------------------------------------------------------------------- /src/autogpt_plugins/wikipedia_search/README.md: -------------------------------------------------------------------------------- 1 | # Wikipedia Search Plugin 2 | 3 | The Wikipedia Search plugin will allow AutoGPT to directly interact with Wikipedia. 4 | 5 | ## Key Features: 6 | - Wikipedia Search performs search queries using Wikipedia. 7 | 8 | ## Installation: 9 | 1. Download the Wikipedia Search Plugin repository as a ZIP file. 10 | 2. Copy the ZIP file into the "plugins" folder of your Auto-GPT project. 11 | 12 | ## AutoGPT Configuration 13 | 14 | Set `ALLOWLISTED_PLUGINS=autogpt-wikipedia-search,example-plugin1,example-plugin2,etc` in your AutoGPT `.env` file. 15 | -------------------------------------------------------------------------------- /src/autogpt_plugins/wikipedia_search/__init__.py: -------------------------------------------------------------------------------- 1 | """Wikipedia search integrations.""" 2 | from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar 3 | 4 | from auto_gpt_plugin_template import AutoGPTPluginTemplate 5 | 6 | from .wikipedia_search import _wikipedia_search 7 | 8 | PromptGenerator = TypeVar("PromptGenerator") 9 | 10 | 11 | class Message(TypedDict): 12 | role: str 13 | content: str 14 | 15 | 16 | class AutoGPTWikipediaSearch(AutoGPTPluginTemplate): 17 | """ 18 | Wikipedia search integrations 19 | """ 20 | 21 | def __init__(self): 22 | super().__init__() 23 | self._name = "autogpt-wikipedia-search" 24 | self._version = "0.1.0" 25 | self._description = "Wikipedia search integrations." 26 | 27 | def can_handle_on_response(self) -> bool: 28 | """This method is called to check that the plugin can 29 | handle the on_response method. 30 | Returns: 31 | bool: True if the plugin can handle the on_response method.""" 32 | return False 33 | 34 | def on_response(self, response: str, *args, **kwargs) -> str: 35 | """This method is called when a response is received from the model.""" 36 | pass 37 | 38 | def can_handle_post_prompt(self) -> bool: 39 | """This method is called to check that the plugin can 40 | handle the post_prompt method. 41 | Returns: 42 | bool: True if the plugin can handle the post_prompt method.""" 43 | return True 44 | 45 | def can_handle_on_planning(self) -> bool: 46 | """This method is called to check that the plugin can 47 | handle the on_planning method. 48 | Returns: 49 | bool: True if the plugin can handle the on_planning method.""" 50 | return False 51 | 52 | def on_planning( 53 | self, prompt: PromptGenerator, messages: List[str] 54 | ) -> Optional[str]: 55 | """This method is called before the planning chat completeion is done. 56 | Args: 57 | prompt (PromptGenerator): The prompt generator. 58 | messages (List[str]): The list of messages. 59 | """ 60 | pass 61 | 62 | def can_handle_post_planning(self) -> bool: 63 | """This method is called to check that the plugin can 64 | handle the post_planning method. 65 | Returns: 66 | bool: True if the plugin can handle the post_planning method.""" 67 | return False 68 | 69 | def post_planning(self, response: str) -> str: 70 | """This method is called after the planning chat completeion is done. 71 | Args: 72 | response (str): The response. 73 | Returns: 74 | str: The resulting response. 75 | """ 76 | pass 77 | 78 | def can_handle_pre_instruction(self) -> bool: 79 | """This method is called to check that the plugin can 80 | handle the pre_instruction method. 81 | Returns: 82 | bool: True if the plugin can handle the pre_instruction method.""" 83 | return False 84 | 85 | def pre_instruction(self, messages: List[str]) -> List[str]: 86 | """This method is called before the instruction chat is done. 87 | Args: 88 | messages (List[str]): The list of context messages. 89 | Returns: 90 | List[str]: The resulting list of messages. 91 | """ 92 | pass 93 | 94 | def can_handle_on_instruction(self) -> bool: 95 | """This method is called to check that the plugin can 96 | handle the on_instruction method. 97 | Returns: 98 | bool: True if the plugin can handle the on_instruction method.""" 99 | return False 100 | 101 | def on_instruction(self, messages: List[str]) -> Optional[str]: 102 | """This method is called when the instruction chat is done. 103 | Args: 104 | messages (List[str]): The list of context messages. 105 | Returns: 106 | Optional[str]: The resulting message. 107 | """ 108 | pass 109 | 110 | def can_handle_post_instruction(self) -> bool: 111 | """This method is called to check that the plugin can 112 | handle the post_instruction method. 113 | Returns: 114 | bool: True if the plugin can handle the post_instruction method.""" 115 | return False 116 | 117 | def post_instruction(self, response: str) -> str: 118 | """This method is called after the instruction chat is done. 119 | Args: 120 | response (str): The response. 121 | Returns: 122 | str: The resulting response. 123 | """ 124 | pass 125 | 126 | def can_handle_pre_command(self) -> bool: 127 | """This method is called to check that the plugin can 128 | handle the pre_command method. 129 | Returns: 130 | bool: True if the plugin can handle the pre_command method.""" 131 | return False 132 | 133 | def pre_command( 134 | self, command_name: str, arguments: Dict[str, Any] 135 | ) -> Tuple[str, Dict[str, Any]]: 136 | """This method is called before the command is executed. 137 | Args: 138 | command_name (str): The command name. 139 | arguments (Dict[str, Any]): The arguments. 140 | Returns: 141 | Tuple[str, Dict[str, Any]]: The command name and the arguments. 142 | """ 143 | pass 144 | 145 | def can_handle_post_command(self) -> bool: 146 | """This method is called to check that the plugin can 147 | handle the post_command method. 148 | Returns: 149 | bool: True if the plugin can handle the post_command method.""" 150 | return False 151 | 152 | def post_command(self, command_name: str, response: str) -> str: 153 | """This method is called after the command is executed. 154 | Args: 155 | command_name (str): The command name. 156 | response (str): The response. 157 | Returns: 158 | str: The resulting response. 159 | """ 160 | pass 161 | 162 | def can_handle_chat_completion( 163 | self, 164 | messages: list[Dict[Any, Any]], 165 | model: str, 166 | temperature: float, 167 | max_tokens: int, 168 | ) -> bool: 169 | """This method is called to check that the plugin can 170 | handle the chat_completion method. 171 | Args: 172 | messages (Dict[Any, Any]): The messages. 173 | model (str): The model name. 174 | temperature (float): The temperature. 175 | max_tokens (int): The max tokens. 176 | Returns: 177 | bool: True if the plugin can handle the chat_completion method.""" 178 | return False 179 | 180 | def handle_chat_completion( 181 | self, 182 | messages: list[Dict[Any, Any]], 183 | model: str, 184 | temperature: float, 185 | max_tokens: int, 186 | ) -> str: 187 | """This method is called when the chat completion is done. 188 | Args: 189 | messages (Dict[Any, Any]): The messages. 190 | model (str): The model name. 191 | temperature (float): The temperature. 192 | max_tokens (int): The max tokens. 193 | Returns: 194 | str: The resulting response. 195 | """ 196 | return None 197 | 198 | def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator: 199 | """This method is called just after the generate_prompt is called, 200 | but actually before the prompt is generated. 201 | Args: 202 | prompt (PromptGenerator): The prompt generator. 203 | Returns: 204 | PromptGenerator: The prompt generator. 205 | """ 206 | 207 | prompt.add_command( 208 | "wikipedia_search", 209 | "Wikipedia search", 210 | {"query": ""}, 211 | _wikipedia_search, 212 | ) 213 | return prompt 214 | 215 | def can_handle_text_embedding( 216 | self, text: str 217 | ) -> bool: 218 | return False 219 | 220 | def handle_text_embedding( 221 | self, text: str 222 | ) -> list: 223 | pass 224 | 225 | def can_handle_user_input(self, user_input: str) -> bool: 226 | return False 227 | 228 | def user_input(self, user_input: str) -> str: 229 | return user_input 230 | 231 | def can_handle_report(self) -> bool: 232 | return False 233 | 234 | def report(self, message: str) -> None: 235 | pass -------------------------------------------------------------------------------- /src/autogpt_plugins/wikipedia_search/wikipedia_search.py: -------------------------------------------------------------------------------- 1 | """Wikipedia search command for Autogpt.""" 2 | from __future__ import annotations 3 | 4 | import json 5 | import re 6 | from urllib.parse import quote 7 | 8 | import requests 9 | 10 | HTML_TAG_CLEANER = re.compile("<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});") 11 | 12 | 13 | def _wikipedia_search(query: str, num_results: int = 5) -> str | list[str]: 14 | """Return the results of a Wikipedia search 15 | Args: 16 | query (str): The search query. 17 | num_results (int): The number of results to return. 18 | Returns: 19 | str: The results of the search. The resulting string is a `json.dumps` 20 | of a list of len `num_results` containing dictionaries with the 21 | following structure: `{'title': , 'summary': <summary>, 22 | 'url': <url to relevant page>}` 23 | """ 24 | search_url = ( 25 | "https://en.wikipedia.org/w/api.php?action=query&" 26 | "format=json&list=search&utf8=1&formatversion=2&" 27 | f"srsearch={quote(query)}" 28 | ) 29 | with requests.Session() as session: 30 | session.headers.update( 31 | { 32 | "User-Agent": ( 33 | "AutoGPT wikipedia_search plugin (https://news.agpt.co/contact/) Requests" 34 | ) 35 | } 36 | ) 37 | session.headers.update({"Accept": "application/json"}) 38 | results = session.get(search_url) 39 | items = [] 40 | try: 41 | results = results.json() 42 | for item in results["query"]["search"]: 43 | summary = re.sub(HTML_TAG_CLEANER, "", item["snippet"]) 44 | items.append( 45 | { 46 | "title": item["title"], 47 | "summary": summary, 48 | "url": f"http://en.wikipedia.org/?curid={item['pageid']}", 49 | } 50 | ) 51 | if len(items) == num_results: 52 | break 53 | except Exception as e: 54 | return f"'wikipedia_search' on query: '{query}' raised exception: '{e}'" 55 | 56 | return json.dumps(items, ensure_ascii=False, indent=4) 57 | -------------------------------------------------------------------------------- /src/autogpt_plugins/wolframalpha_search/README.md: -------------------------------------------------------------------------------- 1 | # Wolfram Search Plugin 2 | 3 | The Wolfram Search plugin will allow AutoGPT to directly interact with Wolfram. 4 | 5 | ## Key Features: 6 | - Wolfram Search performs search queries using Wolfram. 7 | 8 | ## Installation: 9 | 1. Download the Wolfram Search Plugin repository as a ZIP file. 10 | 2. Copy the ZIP file into the "plugins" folder of your Auto-GPT project. 11 | 3. Add this chunk of code along with your Wolfram AppID (Token API) information to the `.env` file within AutoGPT: 12 | 13 | ``` 14 | ################################################################################ 15 | ### WOLFRAM API 16 | ################################################################################ 17 | 18 | # Wolfram AppId or API keys can be found here: https://developer.wolframalpha.com/portal/myapps/index.html 19 | # the AppId can be generated once you register in Wolfram Developer portal. 20 | 21 | WOLFRAMALPHA_APPID= 22 | ``` 23 | 24 | ## AutoGPT Configuration 25 | 26 | Set `ALLOWLISTED_PLUGINS=autogpt-wolframalpha-search,example-plugin1,example-plugin2,etc` in your AutoGPT `.env` file. 27 | -------------------------------------------------------------------------------- /src/autogpt_plugins/wolframalpha_search/__init__.py: -------------------------------------------------------------------------------- 1 | """WolframAlpha search integrations.""" 2 | import os 3 | from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar 4 | 5 | from auto_gpt_plugin_template import AutoGPTPluginTemplate 6 | from wolframalpha import Client 7 | 8 | PromptGenerator = TypeVar("PromptGenerator") 9 | 10 | 11 | class Message(TypedDict): 12 | role: str 13 | content: str 14 | 15 | 16 | class AutoGPTWolframAlphaSearch(AutoGPTPluginTemplate): 17 | """ 18 | WolframAlpha search integrations 19 | """ 20 | 21 | def __init__(self): 22 | super().__init__() 23 | self._name = "autogpt-wolframalpha-search" 24 | self._version = "0.1.0" 25 | self._description = ("WolframAlpha is an answer engine, it answers " 26 | "factual queries by computing answers from " 27 | "externally sourced data. It can provide answers " 28 | "to math, data and science queries.") 29 | self.wolframalpha_appid = os.getenv("WOLFRAMALPHA_APPID") 30 | 31 | self.api = None 32 | if self.wolframalpha_appid is not None: 33 | self.api = Client(self.wolframalpha_appid) 34 | else: 35 | print("WolframAlpha AppID not found in .env file.") 36 | 37 | def can_handle_on_response(self) -> bool: 38 | """This method is called to check that the plugin can 39 | handle the on_response method. 40 | Returns: 41 | bool: True if the plugin can handle the on_response method.""" 42 | return False 43 | 44 | def on_response(self, response: str, *args, **kwargs) -> str: 45 | """This method is called when a response is received from the model.""" 46 | pass 47 | 48 | def can_handle_post_prompt(self) -> bool: 49 | """This method is called to check that the plugin can 50 | handle the post_prompt method. 51 | Returns: 52 | bool: True if the plugin can handle the post_prompt method.""" 53 | return True 54 | 55 | def can_handle_on_planning(self) -> bool: 56 | """This method is called to check that the plugin can 57 | handle the on_planning method. 58 | Returns: 59 | bool: True if the plugin can handle the on_planning method.""" 60 | return False 61 | 62 | def on_planning( 63 | self, prompt: PromptGenerator, messages: List[str] 64 | ) -> Optional[str]: 65 | """This method is called before the planning chat completeion is done. 66 | Args: 67 | prompt (PromptGenerator): The prompt generator. 68 | messages (List[str]): The list of messages. 69 | """ 70 | pass 71 | 72 | def can_handle_post_planning(self) -> bool: 73 | """This method is called to check that the plugin can 74 | handle the post_planning method. 75 | Returns: 76 | bool: True if the plugin can handle the post_planning method.""" 77 | return False 78 | 79 | def post_planning(self, response: str) -> str: 80 | """This method is called after the planning chat completeion is done. 81 | Args: 82 | response (str): The response. 83 | Returns: 84 | str: The resulting response. 85 | """ 86 | pass 87 | 88 | def can_handle_pre_instruction(self) -> bool: 89 | """This method is called to check that the plugin can 90 | handle the pre_instruction method. 91 | Returns: 92 | bool: True if the plugin can handle the pre_instruction method.""" 93 | return False 94 | 95 | def pre_instruction(self, messages: List[str]) -> List[str]: 96 | """This method is called before the instruction chat is done. 97 | Args: 98 | messages (List[str]): The list of context messages. 99 | Returns: 100 | List[str]: The resulting list of messages. 101 | """ 102 | pass 103 | 104 | def can_handle_on_instruction(self) -> bool: 105 | """This method is called to check that the plugin can 106 | handle the on_instruction method. 107 | Returns: 108 | bool: True if the plugin can handle the on_instruction method.""" 109 | return False 110 | 111 | def on_instruction(self, messages: List[str]) -> Optional[str]: 112 | """This method is called when the instruction chat is done. 113 | Args: 114 | messages (List[str]): The list of context messages. 115 | Returns: 116 | Optional[str]: The resulting message. 117 | """ 118 | pass 119 | 120 | def can_handle_post_instruction(self) -> bool: 121 | """This method is called to check that the plugin can 122 | handle the post_instruction method. 123 | Returns: 124 | bool: True if the plugin can handle the post_instruction method.""" 125 | return False 126 | 127 | def post_instruction(self, response: str) -> str: 128 | """This method is called after the instruction chat is done. 129 | Args: 130 | response (str): The response. 131 | Returns: 132 | str: The resulting response. 133 | """ 134 | pass 135 | 136 | def can_handle_pre_command(self) -> bool: 137 | """This method is called to check that the plugin can 138 | handle the pre_command method. 139 | Returns: 140 | bool: True if the plugin can handle the pre_command method.""" 141 | return False 142 | 143 | def pre_command( 144 | self, command_name: str, arguments: Dict[str, Any] 145 | ) -> Tuple[str, Dict[str, Any]]: 146 | """This method is called before the command is executed. 147 | Args: 148 | command_name (str): The command name. 149 | arguments (Dict[str, Any]): The arguments. 150 | Returns: 151 | Tuple[str, Dict[str, Any]]: The command name and the arguments. 152 | """ 153 | pass 154 | 155 | def can_handle_post_command(self) -> bool: 156 | """This method is called to check that the plugin can 157 | handle the post_command method. 158 | Returns: 159 | bool: True if the plugin can handle the post_command method.""" 160 | return False 161 | 162 | def post_command(self, command_name: str, response: str) -> str: 163 | """This method is called after the command is executed. 164 | Args: 165 | command_name (str): The command name. 166 | response (str): The response. 167 | Returns: 168 | str: The resulting response. 169 | """ 170 | pass 171 | 172 | def can_handle_chat_completion( 173 | self, 174 | messages: list[Dict[Any, Any]], 175 | model: str, 176 | temperature: float, 177 | max_tokens: int, 178 | ) -> bool: 179 | """This method is called to check that the plugin can 180 | handle the chat_completion method. 181 | Args: 182 | messages (Dict[Any, Any]): The messages. 183 | model (str): The model name. 184 | temperature (float): The temperature. 185 | max_tokens (int): The max tokens. 186 | Returns: 187 | bool: True if the plugin can handle the chat_completion method.""" 188 | return False 189 | 190 | def handle_chat_completion( 191 | self, 192 | messages: list[Dict[Any, Any]], 193 | model: str, 194 | temperature: float, 195 | max_tokens: int, 196 | ) -> str: 197 | """This method is called when the chat completion is done. 198 | Args: 199 | messages (Dict[Any, Any]): The messages. 200 | model (str): The model name. 201 | temperature (float): The temperature. 202 | max_tokens (int): The max tokens. 203 | Returns: 204 | str: The resulting response. 205 | """ 206 | return None 207 | 208 | def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator: 209 | """This method is called just after the generate_prompt is called, 210 | but actually before the prompt is generated. 211 | Args: 212 | prompt (PromptGenerator): The prompt generator. 213 | Returns: 214 | PromptGenerator: The prompt generator. 215 | """ 216 | if self.api: 217 | from .wolframalpha_search import _wolframalpha_search 218 | prompt.add_command( 219 | "wolframalpha_search", 220 | self._description, 221 | {"query": "<query>"}, 222 | _wolframalpha_search, 223 | ) 224 | return prompt 225 | 226 | def can_handle_text_embedding( 227 | self, text: str 228 | ) -> bool: 229 | return False 230 | 231 | def handle_text_embedding( 232 | self, text: str 233 | ) -> list: 234 | pass 235 | 236 | def can_handle_user_input(self, user_input: str) -> bool: 237 | return False 238 | 239 | def user_input(self, user_input: str) -> str: 240 | return user_input 241 | 242 | def can_handle_report(self) -> bool: 243 | return False 244 | 245 | def report(self, message: str) -> None: 246 | pass -------------------------------------------------------------------------------- /src/autogpt_plugins/wolframalpha_search/test_wolframalpha_search.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | 4 | import requests 5 | 6 | from . import AutoGPTWolframAlphaSearch 7 | 8 | 9 | class TestAutoGPTWolframAlphaSearch(unittest.TestCase): 10 | def setUp(self): 11 | os.environ["WOLFRAMALPHA_APPID"] = "test_appid" 12 | self.plugin = AutoGPTWolframAlphaSearch() 13 | 14 | def tearDown(self): 15 | os.environ.pop("WOLFRAMALPHA_APPID", None) 16 | 17 | def test_wolframalpha_search(self): 18 | query = "2+2" 19 | try: 20 | from .wolframalpha_search import _wolframalpha_search 21 | _wolframalpha_search(query) 22 | except requests.exceptions.HTTPError as e: 23 | self.assertEqual(e.response.status_code, 401) 24 | 25 | 26 | if __name__ == "__main__": 27 | unittest.main() 28 | -------------------------------------------------------------------------------- /src/autogpt_plugins/wolframalpha_search/wolframalpha_search.py: -------------------------------------------------------------------------------- 1 | from . import AutoGPTWolframAlphaSearch 2 | 3 | plugin = AutoGPTWolframAlphaSearch() 4 | 5 | 6 | def _wolframalpha_search(query: str) -> str | list[str]: 7 | res = "" 8 | try: 9 | ans = plugin.api.query(query) 10 | res = next(ans.results).text 11 | except Exception as e: 12 | return f"'_wolframalpha_search' on query: '{query}' raised exception: '{e}'" 13 | return res 14 | --------------------------------------------------------------------------------