├── .devcontainer ├── Dockerfile ├── devcontainer.json └── noop.txt ├── .env.template ├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE.txt ├── README.md ├── app.py ├── assets ├── demo.gif └── ksGPT.png ├── config.py ├── install.bat ├── output_methods ├── __init__.py └── audio_pyttsx3.py ├── plugins ├── __init__.py ├── _accuweather_plugin │ ├── __init__.py │ ├── accuweather_base.py │ └── accuweather_tools.py ├── _gemini_pro_plugin │ ├── __init__.py │ ├── gemini_pro_base.py │ ├── gemini_pro_tools.py │ └── gemini_pro_vision_tools.py ├── _gmail_plugin │ ├── __init__.py │ ├── calendar_tools.py │ ├── drive_tools.py │ ├── email_tools.py │ └── gmail_base.py ├── _google_search_plugin │ ├── __init__.py │ ├── google_search_base.py │ └── google_search_tools.py ├── _news_plugin │ ├── __init__.py │ ├── news_base.py │ ├── newsapi_tools.py │ └── nytimes_tools.py ├── _nhtsa_plugin │ ├── __init__.py │ ├── nhtsa_vpic_base.py │ └── nhtsa_vpic_tools.py ├── _system_commands │ ├── __init__.py │ ├── system_commands_base.py │ └── system_commands_tools.py ├── plugin_base.py └── plugins_enabled.py ├── requirements.txt ├── static ├── J5.webp ├── U1.webp ├── chat-style.css └── chat.js ├── templates └── index.html ├── uploads └── test_pic.jpg ├── utils ├── __init__.py ├── core_tools.py ├── openai_dalle_tools.py └── openai_model_tools.py └── web_app.py /.devcontainer/Dockerfile: -------------------------------------------------------------------------------- 1 | 2 | # Use the official Miniconda image as a parent image 3 | FROM mcr.microsoft.com/devcontainers/miniconda:0-3 4 | 5 | # Set the working directory in the container 6 | WORKDIR /workspace 7 | 8 | # Initialize conda for shell interaction 9 | RUN conda init 10 | 11 | # Create a new Conda environment named eAI with Python 3.12 12 | RUN conda create -n gpt_all -c conda-forge python=3.12 -y 13 | 14 | # Initialize conda for shell interaction and set conda to automatically activate the base environment 15 | RUN conda init bash \ 16 | && echo "conda activate gpt_all" >> ~/.bashrc 17 | 18 | # Copy the requirements.txt file into the container at /workspace 19 | COPY requirements.txt /workspace/ 20 | 21 | # Install the Python requirements in the eAI environment 22 | RUN conda run -n gpt_all pip install --no-cache-dir -r requirements.txt 23 | 24 | # Download the SpaCy NLP model in the eAI environment 25 | RUN conda run -n gpt_all python -m spacy download en_core_web_md 26 | 27 | # Set the default environment to eAI when starting the container 28 | # This will activate the eAI environment for any interactive or non-interactive shell 29 | ENV CONDA_DEFAULT_ENV=gpt_all 30 | ENV CONDA_PREFIX=/opt/conda/envs/gpt_all 31 | ENV PATH=$CONDA_PREFIX/bin:$PATH 32 | 33 | # Ensure that the conda environment is activated on startup 34 | ENTRYPOINT ["conda", "run", "-n", "gpt_all", "/bin/bash"] 35 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // For format details, see https://aka.ms/devcontainer.json. For config options, see the 2 | // README at: https://github.com/devcontainers/templates/tree/main/src/miniconda 3 | { 4 | "name": "(GPT_ALL)", 5 | "build": { 6 | "context": "..", 7 | "dockerfile": "Dockerfile" 8 | }, 9 | "customizations": { 10 | "vscode": { 11 | "extensions": [ 12 | "ms-python.python", 13 | "ms-python.vscode-pylance", 14 | "ms-python.autopep8", 15 | "ms-python.black-formatter", 16 | "ms-python.flake8", 17 | "ms-python.isort", 18 | "ms-python.mypy-type-checker", 19 | "ms-python.vscode-pylance", 20 | "ms-python.pylint", 21 | "ms-python.debugpy", 22 | "vscode-icons-team.vscode-icons", 23 | "ms-azuretools.vscode-docker", 24 | "esbenp.prettier-vscode", 25 | "ms-vsliveshare.vsliveshare", 26 | "eamodio.gitlens", 27 | "GitHub.copilot-chat", 28 | "GitHub.copilot", 29 | "streetsidesoftware.code-spell-checker", 30 | "genieai.chatgpt-vscode", 31 | "github.vscode-github-actions", 32 | "GitHub.vscode-pull-request-github", 33 | "GitHub.remotehub", 34 | "GitHub.github-vscode-theme", 35 | "ms-vscode.live-server", 36 | "dbaeumer.vscode-eslint", 37 | "ms-vscode-remote.remote-containers", 38 | "GitHub.vscode-pull-request-github", 39 | "GitHub.remotehub", 40 | "charliermarsh.ruff", 41 | "ms-pyright.pyright" 42 | ] 43 | } 44 | } 45 | } -------------------------------------------------------------------------------- /.devcontainer/noop.txt: -------------------------------------------------------------------------------- 1 | This file is copied into the container along with environment.yml* from the 2 | parent folder. This is done to prevent the Dockerfile COPY instruction from 3 | failing if no environment.yml is found. -------------------------------------------------------------------------------- /.env.template: -------------------------------------------------------------------------------- 1 | # .env.template 2 | 3 | # This template file contains environment variables required for the application. 4 | # Copy this file to '.env' and fill in the necessary API keys and settings. 5 | # Please refer to the respective API documentation for more details on obtaining API keys. 6 | 7 | MAIN_SYSTEM_PROMPT=You are an AI Assistant integrated within a Python-based application designed to assist users by leveraging a suite of tools and functions, both synchronous and asynchronous, to process user requests and manage dynamic workflows. Your capabilities include interacting with a larger AI language model (LLM) for synchronous and asynchronous assistance, accessing the current date and time, and utilizing enabled plugins for additional functionalities. You are expected to maintain a conversation memory, ensuring the context remains within the token limit for efficient processing. When responding to user requests, consider the available tools and their descriptions, dynamically structuring workflows to include multiple turns where necessary. Prioritize reasoning and delivering the best possible response based on the users original request, taking into account the data gathered and actions completed during the interaction. Ensure that your responses are clear, concise, and directly address the users needs, while also being prepared to handle errors or unexpected situations gracefully. 8 | 9 | ######################################################################################### 10 | # 11 | # OPENAI API SETTINGS 12 | # 13 | # Obtain your API key from: https://platform.openai.com/account/api-keys 14 | # 15 | # For model selection, refer to: https://platform.openai.com/docs/models 16 | # 17 | # For model pricing, refer to: https://openai.com/pricing/ 18 | # 19 | # gpt-3.5-turbo-1106 = Input $0.001 / 1K tokens Output $0.002 / 1K tokens 20 | # gpt-4-1106-preview = Input $0.010 / 1K tokens Output $0.030 / 1K tokens 21 | # gpt-4-0613 = Input $0.030 / 1K tokens Output $0.060 / 1K tokens 22 | # 23 | ######################################################################################### 24 | 25 | # Your OpenAI API key (required) 26 | 27 | OPENAI_API_KEY= 28 | OPENAI_ORG_ID= 29 | OPENAI_MODEL=gpt-4-1106-preview 30 | OPENAI_TEMP=0.3 31 | OPENAI_TOP_P=0.3 32 | MAX_TOKENS=4095 33 | ############################################################################################################## 34 | 35 | # PLUGIN SETTINGS 36 | 37 | # Set to True to enable the plugin, False to disable the plugin 38 | 39 | ############################################################################################################## 40 | 41 | ENABLE_ACCUWEATHERPLUGIN=True 42 | 43 | ENABLE_GEMINIPROPLUGIN=False 44 | 45 | ENABLE_GMAILPLUGIN=False 46 | 47 | ENABLE_GOOGLESEARCHPLUGIN=False 48 | 49 | ENABLE_NEWSPLUGIN=False 50 | 51 | ENABLE_NHTSAVPICPLUGIN=False 52 | 53 | ENABLE_SYSTEMCOMMANDSPLUGIN=False 54 | 55 | ############################################################################################################## 56 | # TTS SETTINGS 57 | ### ELEVENLABS API 58 | ## Eleven Labs Default Voice IDs 59 | ## Rachel : 21m00Tcm4TlvDq8ikWAM 60 | ## Domi : AZnzlk1XvdvUeBnXmlld 61 | ## Bella : EXAVITQu4vr4xnSDxMaL 62 | ## Antoni : ErXwobaYiN019PkySvjV 63 | ## Elli : MF3mGyEYCl7XYWbV9V6O 64 | ## Josh : TxGEqnHWrfWFTfGW9XjX 65 | ## Arnold : VR6AewLTigWG4xSOukaG 66 | ## Adam : pNInz6obpgDQGcFmaJgB 67 | ## Sam : yoZ06aMxZJJ28mfd3POQ 68 | ############################################################################################################## 69 | 70 | # switch between elevenlabs or pyttsx3 71 | TTS_ENGINE=pyttsx3 72 | 73 | ELEVEN_API_KEY= 74 | ELEVENLABS_VOICE= 75 | 76 | # pyttsx3 Win 11 David or Zira 77 | TTS_VOICE_ID=Microsoft Zira Desktop - English (United States) 78 | TTS_RATE=150 79 | 80 | ############################################################################################################## 81 | # GEMINI PRO SETTINGS 82 | # Obtain your API key from: https://makersuite.google.com/app/apikey 83 | ############################################################################################################## 84 | 85 | GEMINI_API_KEY= 86 | 87 | ######################################################################################### 88 | # ACCUWEATHER API SETTINGS 89 | # Sign up and obtain your API key from: https://developer.accuweather.com/ 90 | ######################################################################################### 91 | 92 | # Your AccuWeather API key (required if tools are enabled) 93 | ACCUWEATHER_API_KEY= 94 | 95 | # Base URL for AccuWeather API (do not change unless necessary) 96 | ACCUWEATHER_BASE_URL=http://dataservice.accuweather.com 97 | 98 | ######################################################################################### 99 | # GOOGLE SEARCH API SETTINGS 100 | # Get your API key and Custom Search Engine ID from: https://developers.google.com/custom-search/v1/overview 101 | ######################################################################################### 102 | 103 | # Your Google API key (required if tools are enabled) 104 | GMAIL_ADDRESS= 105 | GOOGLE_CLIENT_ID= 106 | GOOGLE_CLIENT_SECRET= 107 | 108 | # Your Google Custom Search Engine ID (required if tools are enabled) 109 | GOOGLE_API_KEY= 110 | GOOGLE_CSE_ID= 111 | 112 | ######################################################################################### 113 | # NEWSAPI.org API SETTINGS 114 | # Get started with NewsAPI at: https://newsapi.org/docs/get-started 115 | ######################################################################################### 116 | 117 | # Base URL for NewsAPI.org (do not change unless necessary) 118 | NEWSAPI_ORG_URL=https://newsapi.org/v2/everything 119 | 120 | # Your NewsAPI.org API key (required if tools are enabled) 121 | NEWS_API_KEY= 122 | 123 | ######################################################################################### 124 | # NEW YORK TIMES API SETTINGS 125 | # Register and obtain your API key from: https://developer.nytimes.com/ 126 | ######################################################################################### 127 | 128 | # Your New York Times API key (required if tools are enabled) 129 | NYT_API_KEY= 130 | 131 | # Your New York Times API secret (required if tools are enabled) 132 | NYT_API_SECRET= 133 | 134 | # Your New York Times API app ID (required if tools are enabled) 135 | NYT_API_APP_ID= 136 | 137 | # Your New York Times app name (required if tools are enabled) 138 | NYT_APP_NAME= 139 | 140 | # Base URL for New York Times Article Search API (do not change unless necessary) 141 | NYT_ARTICLE_SEARCH_URL=https://api.nytimes.com/svc/search/v2/articlesearch.json -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | .idea/ 161 | .vscode 162 | *.ini 163 | *.json 164 | *.log 165 | .trunk 166 | *.code-workspace -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct for Gemini_Pro_CLI 2 | 3 | ## 1. Purpose 4 | 5 | The purpose of this Code of Conduct is to provide guidelines for contributors to the Gemini_Pro_CLI projects on GitHub. We aim to create a positive and inclusive environment where all participants can contribute and collaborate effectively. By participating in this project, you agree to abide by this Code of Conduct. 6 | 7 | ## 2. Scope 8 | 9 | This Code of Conduct applies to all contributors, maintainers, and users of the Gemini_Pro_CLI project. It extends to all project spaces, including but not limited to issues, pull requests, code reviews, comments, and other forms of communication within the project. 10 | 11 | ## 3. Our Standards 12 | 13 | We encourage the following behavior: 14 | 15 | * Being respectful and considerate to others 16 | * Actively seeking diverse perspectives 17 | * Providing constructive feedback and assistance 18 | * Demonstrating empathy and understanding 19 | 20 | We discourage the following behavior: 21 | 22 | * Harassment or discrimination of any kind 23 | * Disrespectful, offensive, or inappropriate language or content 24 | * Personal attacks or insults 25 | * Unwarranted criticism or negativity 26 | 27 | ## 4. Reporting and Enforcement 28 | 29 | If you witness or experience any violations of this Code of Conduct, please report them to the project maintainers by email or other appropriate means. The maintainers will investigate and take appropriate action, which may include warnings, temporary or permanent bans, or other measures as necessary. 30 | 31 | Maintainers are responsible for ensuring compliance with this Code of Conduct and may take action to address any violations. 32 | 33 | ## 5. Acknowledgements 34 | 35 | This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org/version/2/0/code_of_conduct.html). -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # eAI Contribution Guide 2 | 3 | ## In short 4 | 1. Avoid duplicate work, issues, PRs etc. 5 | 2. Also consider contributing something other than code. 6 | 3. Create a draft PR before starting work on non-small changes. 7 | 4. Clearly explain your changes when submitting a PR. 8 | 5. Don't submit stuff that's broken. 9 | 6. Avoid making unnecessary changes, especially if they're purely based on your personal 10 | preferences. 11 | 7. Fork the repo and submit a PR. 12 | 13 | ## Why instructions like these are necessary 14 | "We value all contributions". After all, we are an open-source 15 | project, so we should welcome any input that people are willing to give, right? 16 | Well, the reality is that contributions are SUPER-valuable. 17 | 18 | ❤️ & 🔆 -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [2023] [Second Opinion Solutions LLC] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # GPT_ALL 3 | 4 | ![Demo GIF](assets/demo.gif) 5 | 6 |

7 | Welcome to GPT_ALL: a cutting-edge AI assistant empowered by the capabilities of large language models (LLMs). This versatile assistant is adept at harvesting and interpreting real-time data from a plethora of sources, all while seamlessly meshing with the extensive functionalities of Python 3.12. At its heart lies OpenAI's GPT-4-1106 model, skillfully selecting functions and tools from a suite of specialized "experts" to optimize token efficiency. GPT_ALL boasts a dynamic command-line interface, embodying both power and flexibility. Engineered to harness the most recent advancements, tools, and expanded context windows available in the latest OpenAI API updates. The goal is clear and ambitious: to integrate chat GPT with an array of applications and devices, unlocking unprecedented possibilities. GPT_ALL's capabilities are further amplified by a range of built-in plugins, and you're encouraged to craft your own to broaden its diverse array of functionalities. 8 |

9 | 10 | ## Features 11 |
12 | Latest Features: 13 | 14 | - **AsyncOpenAI Integration**: 15 | - Asynchronous parallel function calling to allow the assistant to complete multiple tool calls in a single request. 16 | - Leverage the power of GPT-4 for complex inquiries using tools and tool calls. 17 | 18 | - **Conversation Memory Management**: 19 | - Can remember and reference previous inputs and responses in the same session only. 20 | 21 | - **Conversation Flow**: 22 | - Managed flow of conversation by appending user input to memory and ensuring responses are within context limits. 23 | 24 | - **Dynamic Function Invocation Based on Tool Responses**: 25 | - Handles dynamic invocation of functions based on tool call responses. 26 | - Manages conversation state by appending messages from tool calls. 27 | - Generates follow-up responses considering tool call results. 28 | 29 | - **Environment Cleanup**: 30 | - Cleans up tools list after processing each request to help manage token usage. 31 | 32 | - **Modular Plugin System**: 33 | - easily install new functions/tools to extend GPT_ALLs' abilities. 34 | - Plugins load dynamically if enabled via the .env 35 | 36 |
37 |
38 | 39 | # Getting Started 40 | 41 | ### Prerequisites 42 | 43 | - Ensure you have conda installed on your system. 44 | 45 | - VScode Dev Container extension and Docker
is recommended for safety with system commands enabled. 46 | 47 | ### Installation 48 | 49 | Clone this repository to your local machine using: 50 | 51 | ```bash 52 | git clone https://github.com/Eloquent-Algorithmics/GPT_ALL.git 53 | ``` 54 | 55 | Navigate into the project directory: 56 | 57 | ```bash 58 | 59 | cd GPT_ALL 60 | 61 | .\install.bat 62 | 63 | ``` 64 | 65 | ## Configuration 66 | 67 | To enable specific plugins or features, modify the `.env` file in your project directory according to your needs. 68 | 69 | **OpenAI API key is required** all others optional. 70 | 71 | 72 | ### Usage 73 | 74 | Run GPT_ALL using: 75 | 76 | ```bash 77 | 78 | conda activate GPT_ALL 79 | 80 | python -m app 81 | 82 | python -m web_app # To use the web interface. 83 | 84 | or 85 | 86 | python -m app --talk # To use TTS. 87 | 88 | ``` 89 | 90 | # ⚠️ Disclamer ⚠️ 91 | **Please note** some materials may not provide ***the best possible or the most optimal*** recommendations, solutions or source codes. Try to be open minded and take everything as a `step` in the `learning process`. If you encounter something to improve in the materials, **please** write your suggestions to the respected authors. 92 | 93 | **This version of GPT_ALL is totally experimental, USE IT AT YOUR OWN RISK. Even if a lot of tests have been performed on this version some things can be buggy or some lack of functionality.** 94 | 95 | This version can be driven by the community. If you want to help improve this version don't hesitate to submit a github issue or PR. 96 | 97 | ## Contributing 98 | 99 | Contributions are what make the open-source community such an amazing place to learn, inspire, and create. Any contributions you make are greatly appreciated. ## Contribution guidelines 100 | 101 | **If you want to contribute to GPT_ALL, be sure to review the [contribution guidelines](CONTRIBUTING.md). This project adheres to [code of conduct](CODE_OF_CONDUCT.md). By participating, you are expected to uphold this code.** 102 | 103 | 1. Fork the Project 104 | 2. Create your Feature Branch (`git checkout -b feature/AmazingFeature`) 105 | 3. Commit your Changes (`git commit -m 'Add some AmazingFeature'`) 106 | 4. Push to the Branch (`git push origin feature/AmazingFeature`) 107 | 5. Open a Pull Request 108 | 109 | 110 | ## License 111 | 112 | [Apache License 2.0](LICENSE) 113 | -------------------------------------------------------------------------------- /assets/demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eloquent-Algorithmics/GPT_ALL/2ad8d7df177274104bf70a302b240a2f49b283da/assets/demo.gif -------------------------------------------------------------------------------- /assets/ksGPT.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eloquent-Algorithmics/GPT_ALL/2ad8d7df177274104bf70a302b240a2f49b283da/assets/ksGPT.png -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | 2 | # !/usr/bin/env python 3 | # coding: utf-8 4 | # Filename: config.py 5 | # Path: /config.py 6 | 7 | """ 8 | This module loads environment variables from the .env file. 9 | 10 | The .env file is not included in the repository for security reasons. 11 | """ 12 | 13 | import os 14 | from dotenv import load_dotenv 15 | from rich.live import Live 16 | from rich.spinner import Spinner 17 | 18 | # Load the .env file 19 | load_dotenv() 20 | 21 | # Define the live_spinner 22 | live_spinner = Live(Spinner("pong", " "), auto_refresh=True) 23 | 24 | # Main app system prompt. 25 | MAIN_SYSTEM_PROMPT = os.getenv("MAIN_SYSTEM_PROMPT") 26 | 27 | # Main app OpenAI API key. 28 | OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") 29 | if OPENAI_API_KEY is None: 30 | raise ValueError("OPENAI_API_KEY not set") 31 | 32 | # Main app OpenAI organization ID. 33 | OPENAI_ORG_ID = os.getenv("OPENAI_ORG_ID") 34 | if OPENAI_ORG_ID is None: 35 | raise ValueError("OPENAI_ORG_ID not set") 36 | 37 | # Main app OpenAI model ID. 38 | OPENAI_MODEL = os.getenv("OPENAI_MODEL") 39 | if OPENAI_MODEL is None: 40 | raise ValueError("OPENAI_MODEL not set") 41 | 42 | # Main app OpenAI Temperature. 43 | OPENAI_TEMP = float(os.getenv("OPENAI_TEMP", str(0.5))) 44 | 45 | # Main app OpenAI Top P. 46 | OPENAI_TOP_P = float(os.getenv("OPENAI_TOP_P", str(0.5))) 47 | 48 | # Main app OpenAI MAX Response token limit. 49 | OPENAI_MAX_TOKENS = int(os.getenv("OPENAI_MAX_TOKENS", str(1500))) 50 | 51 | # Configures the main app to use the local system TTS engine. 52 | TTS_ENGINE = os.getenv("TTS_ENGINE") 53 | if TTS_ENGINE is None: 54 | raise ValueError("TTS_ENGINE not set") 55 | 56 | # Configures the main app to use the local system TTS voice ID. 57 | TTS_VOICE_ID = os.getenv("TTS_VOICE_ID") 58 | if TTS_VOICE_ID is None: 59 | raise ValueError("TTS_VOICE_ID not set") 60 | 61 | # Configures the main app to use the local system TTS rate. 62 | TTS_RATE = int(os.getenv("TTS_RATE", str(150))) 63 | 64 | ELEVENLABS_API_KEY = os.getenv("ELEVENLABS_API_KEY") 65 | ELEVENLABS_VOICE = os.getenv("ELEVENLABS_VOICE", "Rachel") 66 | -------------------------------------------------------------------------------- /install.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | echo Creating a new Conda environment... 4 | call conda create -n gpt_all -c conda-forge python=3.12 -y 5 | 6 | echo Activating the new Conda environment... 7 | call conda activate gpt_all 8 | 9 | echo Installing the Python requirements... 10 | call pip install -r requirements.txt 11 | 12 | echo Downloading the SpaCy NLP model... 13 | call python -m spacy download en_core_web_md 14 | 15 | echo Installation completed. 16 | pause -------------------------------------------------------------------------------- /output_methods/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eloquent-Algorithmics/GPT_ALL/2ad8d7df177274104bf70a302b240a2f49b283da/output_methods/__init__.py -------------------------------------------------------------------------------- /output_methods/audio_pyttsx3.py: -------------------------------------------------------------------------------- 1 | 2 | # !/usr/bin/env python 3 | # coding: utf-8 4 | # Filename: audio_pyttsx3.py 5 | # File Path: output/audio_pyttsx3.py 6 | 7 | """ 8 | This module is responsible for handling audio output. 9 | 10 | """ 11 | import os 12 | os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "1" 13 | from typing import Union 14 | from io import BytesIO 15 | import pyttsx3 16 | import pygame 17 | from dotenv import load_dotenv 18 | from config import TTS_ENGINE, TTS_VOICE_ID, TTS_RATE, ELEVENLABS_VOICE 19 | import websockets 20 | import base64 21 | import asyncio 22 | 23 | # Import ElevenLabs functions 24 | from elevenlabs import generate, play, set_api_key, get_api_key, stream 25 | 26 | # Load environment variables from .env file 27 | load_dotenv() 28 | 29 | # Set the ElevenLabs API key if it exists in the environment 30 | ELEVEN_API_KEY = os.getenv('ELEVEN_API_KEY') 31 | if ELEVEN_API_KEY: 32 | set_api_key(ELEVEN_API_KEY) 33 | TTS_ENGINE = os.getenv('TTS_ENGINE') 34 | 35 | 36 | def tts_output(text): 37 | """ 38 | This function outputs the given text as speech. 39 | 40 | Args: 41 | text (str): The text to output. 42 | """ 43 | if TTS_ENGINE == "pyttsx3": 44 | tts_output_pyttsx3(text) 45 | elif TTS_ENGINE == "elevenlabs" and ELEVEN_API_KEY: 46 | # Since tts_output_elevenlabs is an async function, we need to run it with asyncio.run 47 | asyncio.run(tts_output_elevenlabs(text)) 48 | else: 49 | raise ValueError(f"Invalid TTS_ENGINE value or missing ElevenLabs API key: {TTS_ENGINE}") 50 | 51 | 52 | async def stream_elevenlabs(audio_stream): 53 | """Stream audio data using pygame player.""" 54 | initialize_audio() 55 | async for chunk in audio_stream: 56 | if chunk: 57 | play_audio(chunk) 58 | 59 | 60 | async def text_to_speech_input_streaming(voice_id, text_iterator): 61 | """Send text to ElevenLabs API and stream the returned audio.""" 62 | uri = f"wss://api.elevenlabs.io/v1/text-to-speech/{voice_id}/stream-input?model_id=eleven_monolingual_v1" 63 | 64 | async with websockets.connect(uri) as websocket: 65 | await websocket.send(json.dumps({ 66 | "text": " ", 67 | "voice_settings": {"stability": 0.5, "similarity_boost": 0.8}, 68 | "xi_api_key": ELEVEN_API_KEY, 69 | })) 70 | 71 | async def listen(): 72 | """Listen to the websocket for audio data and stream it.""" 73 | while True: 74 | try: 75 | message = await websocket.recv() 76 | data = json.loads(message) 77 | if data.get("audio"): 78 | yield base64.b64decode(data["audio"]) 79 | elif data.get('isFinal'): 80 | break 81 | except websockets.exceptions.ConnectionClosed as e: 82 | print(f"Connection closed with error: {e}") 83 | break 84 | except websockets.exceptions.ConnectionClosedOK: 85 | print("Connection closed without error.") 86 | break 87 | 88 | listen_task = asyncio.create_task(stream_elevenlabs(listen())) 89 | 90 | async for text in text_iterator: 91 | await websocket.send(json.dumps({"text": text, "try_trigger_generation": True})) 92 | 93 | await websocket.send(json.dumps({"text": ""})) 94 | 95 | await listen_task 96 | 97 | 98 | def tts_output_elevenlabs(text): 99 | """ 100 | This function outputs the given text as speech using ElevenLabs API with streaming. 101 | 102 | Args: 103 | text (str): The text to output. 104 | """ 105 | async def text_iterator(): 106 | yield text 107 | 108 | asyncio.run(text_to_speech_input_streaming(ELEVENLABS_VOICE, text_iterator())) 109 | 110 | 111 | def initialize_audio(): 112 | """ 113 | This function initializes the audio system. 114 | """ 115 | pygame.mixer.pre_init(44100, -16, 2, 4096) 116 | pygame.mixer.init() 117 | 118 | 119 | def play_audio(audio: Union[bytes, BytesIO]): 120 | """ 121 | This function plays the given audio. 122 | 123 | Args: 124 | audio (bytes or BytesIO): The audio to play. 125 | """ 126 | 127 | if not isinstance(audio, (bytes, BytesIO)): 128 | return 129 | if isinstance(audio, bytes): 130 | audio = BytesIO(audio) 131 | 132 | pygame.mixer.music.load(audio) 133 | pygame.mixer.music.play() 134 | while pygame.mixer.music.get_busy(): 135 | pygame.time.wait(10) 136 | 137 | 138 | def tts_output_pyttsx3(text): 139 | 140 | """ 141 | This function outputs the given text as speech using pyttsx3. 142 | 143 | Args: 144 | text (str): The text to output. 145 | """ 146 | 147 | engine = pyttsx3.init('sapi5') 148 | 149 | voices = engine.getProperty('voices') 150 | 151 | if TTS_VOICE_ID: 152 | for voice in voices: 153 | if voice.name == TTS_VOICE_ID: 154 | engine.setProperty('voice', voice.id) 155 | break 156 | else: 157 | print("TTS_VOICE_ID not set, using default voice") 158 | 159 | engine.setProperty('rate', TTS_RATE) 160 | 161 | engine.say(text) 162 | engine.runAndWait() 163 | -------------------------------------------------------------------------------- /plugins/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eloquent-Algorithmics/GPT_ALL/2ad8d7df177274104bf70a302b240a2f49b283da/plugins/__init__.py -------------------------------------------------------------------------------- /plugins/_accuweather_plugin/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | # plugins/_accuweather_plugin/__init__.py 3 | 4 | from .accuweather_base import AccuWeatherPlugin 5 | from .accuweather_tools import ( 6 | extract_location, 7 | get_location_key, 8 | get_current_weather, 9 | get_one_hour_weather_forecast, 10 | get_twelve_hour_weather_forecast, 11 | get_one_day_weather_forecast, 12 | get_five_day_weather_forecast, 13 | accu_weather_tools, 14 | available_functions, 15 | ) 16 | 17 | __all__ = [ 18 | 'AccuWeatherPlugin', 19 | 'extract_location', 20 | 'get_location_key', 21 | 'get_current_weather', 22 | 'get_one_hour_weather_forecast', 23 | 'get_twelve_hour_weather_forecast', 24 | 'get_one_day_weather_forecast', 25 | 'get_five_day_weather_forecast', 26 | 'accu_weather_tools', 27 | 'available_functions', 28 | ] 29 | -------------------------------------------------------------------------------- /plugins/_accuweather_plugin/accuweather_base.py: -------------------------------------------------------------------------------- 1 | 2 | # !/usr/bin/env python 3 | # coding: utf-8 4 | # Filename: accuweather_base.py 5 | # Path: plugins/_accuweather_plugin/accuweather_base.py 6 | 7 | """ 8 | This is the Accuweather plugin Base class. 9 | 10 | It defines the Accuweather plugin and loads the tools and functions from the 11 | accompanying scripts. 12 | """ 13 | 14 | import os 15 | import functools 16 | from plugins.plugin_base import PluginBase 17 | from plugins._accuweather_plugin.accuweather_tools import ( 18 | accu_weather_tools, 19 | available_functions as accuweather_functions, 20 | ) 21 | 22 | 23 | class AccuWeatherPlugin(PluginBase): 24 | """ 25 | This class defines the AccuWeather plugin. 26 | 27 | It loads the tools and functions from the accompanying scripts. 28 | 29 | """ 30 | def __init__(self): 31 | accuweather_api_key = os.getenv("ACCUWEATHER_API_KEY") 32 | if accuweather_api_key is None: 33 | raise ValueError("ACCUWEATHER_API_KEY not set") 34 | 35 | accuweather_base_url = os.getenv("ACCUWEATHER_BASE_URL") 36 | if accuweather_base_url is None: 37 | raise ValueError("ACCUWEATHER_BASE_URL not set") 38 | 39 | super().__init__() 40 | 41 | self.api_key = accuweather_api_key 42 | self.base_url = accuweather_base_url 43 | 44 | async def initialize(self): 45 | await self.load_plugin_tools() 46 | 47 | async def load_plugin_tools(self): 48 | """ 49 | Load tools and functions from accompanying scripts. 50 | 51 | """ 52 | self.tools.extend(accu_weather_tools) 53 | for func_name, func in accuweather_functions.items(): 54 | # Bind the AccuWeather API key and base URL to the functions 55 | self.available_functions[func_name] = functools.partial( 56 | func, 57 | api_key=self.api_key, 58 | base_url=self.base_url 59 | ) 60 | -------------------------------------------------------------------------------- /plugins/_accuweather_plugin/accuweather_tools.py: -------------------------------------------------------------------------------- 1 | 2 | # !/usr/bin/env python 3 | # coding: utf-8 4 | # Filename: accuweather_tools.py 5 | # Path: plugins/_accuweather_plugin/accuweather_tools.py 6 | 7 | """ 8 | This file contains the AccuWeather plugins functions and tools. 9 | 10 | The free AccuWeather API key is limited to 50 calls per day and is limited to 11 | 5 days of Daily Forecasts, 12 hours of Hourly Forecasts, and 5 days of Indices. 12 | 13 | See https://developer.accuweather.com/ for more information. 14 | 15 | """ 16 | 17 | import json 18 | import aiohttp 19 | import spacy 20 | 21 | nlp = spacy.load("en_core_web_md") 22 | 23 | 24 | def extract_location(user_input): 25 | """ 26 | This function extracts the location from the user input. 27 | 28 | It uses the spaCy library to extract the location from the user input. 29 | 30 | """ 31 | doc = nlp(user_input) 32 | locations = [ 33 | ent.text for ent in doc.ents if ent.label_ in ("GPE", "LOC") 34 | ] 35 | return locations 36 | 37 | 38 | async def get_location_key(api_key, base_url, location_name): 39 | """ 40 | This function gets the location key for the given location name. 41 | 42 | If no location provided or an empty string passed, defaults to Atlanta. 43 | 44 | """ 45 | url = f"{base_url}/locations/v1/cities/search" 46 | params = {"apikey": api_key, "q": location_name} 47 | async with aiohttp.ClientSession() as session: 48 | async with session.get(url, params=params) as response: 49 | response.raise_for_status() 50 | locations = await response.json() 51 | if locations: 52 | return locations[0]["Key"] 53 | else: 54 | return None 55 | 56 | 57 | async def get_current_weather(api_key, base_url, location: str = "Atlanta"): 58 | """ 59 | This function gets the current weather for the given location. 60 | 61 | If no location provided or an empty string passed, defaults to Atlanta. 62 | 63 | """ 64 | # Check if the location is an empty string and set it to the default 65 | if not location: 66 | location = "Atlanta" 67 | 68 | # Strip any extra quotes from the location string 69 | location = location.strip('"') 70 | 71 | location_key = await get_location_key(api_key, base_url, location) 72 | if not location_key: 73 | return json.dumps( 74 | {"error": "Failed to find location key for provided location"} 75 | ) 76 | 77 | url = f"{base_url}/currentconditions/v1/{location_key}" 78 | params = {"apikey": api_key, "details": "true", "metric": "false"} 79 | async with aiohttp.ClientSession() as session: 80 | try: 81 | async with session.get(url, params=params) as response: 82 | response.raise_for_status() 83 | data = await response.json() 84 | current_conditions = data[0] 85 | weather_info = { 86 | "weather_text": ( 87 | current_conditions["WeatherText"] 88 | ), 89 | "temperature": ( 90 | current_conditions["Temperature"]["Imperial"]["Value"] 91 | ), 92 | "humidity": ( 93 | current_conditions["RelativeHumidity"] 94 | ), 95 | "wind_speed": ( 96 | current_conditions["Wind"]["Speed"]["Imperial"]["Value"] 97 | ), 98 | "wind_direction": ( 99 | current_conditions["Wind"]["Direction"]["Localized"] 100 | ), 101 | "wind_direction_degrees": ( 102 | current_conditions["Wind"]["Direction"]["Degrees"] 103 | ), 104 | "uv_index": ( 105 | current_conditions["UVIndex"] 106 | ), 107 | "cloud_cover": ( 108 | current_conditions["CloudCover"] 109 | ), 110 | "pressure": ( 111 | current_conditions["Pressure"]["Imperial"]["Value"] 112 | ), 113 | "visibility": ( 114 | current_conditions["Visibility"]["Imperial"]["Value"] 115 | ), 116 | "precipitation": ( 117 | current_conditions["Precip1hr"]["Imperial"]["Value"] 118 | ), 119 | "observation_time": ( 120 | current_conditions["LocalObservationDateTime"] 121 | ), 122 | } 123 | return json.dumps(weather_info) 124 | 125 | except aiohttp.ClientError as error: 126 | return json.dumps({error, "Failed to fetch weather data"}) 127 | except ValueError as error: 128 | return json.dumps({error, "An unexpected error occurred"}) 129 | 130 | 131 | async def get_one_hour_weather_forecast(api_key, base_url, location: str = "Atlanta"): 132 | """ 133 | This function gets the hourly forecast weather for the given location. 134 | 135 | If no location provided or an empty string passed, defaults to Atlanta. 136 | """ 137 | 138 | if not location: 139 | location = "Atlanta" 140 | 141 | location = location.strip('"') 142 | 143 | location_key = await get_location_key(api_key, base_url, location) 144 | if not location_key: 145 | return json.dumps( 146 | {"error": "Failed to find location key for provided location"} 147 | ) 148 | 149 | url = f"{base_url}/forecasts/v1/hourly/1hour/{location_key}" 150 | params = {"apikey": api_key, "details": "true", "metric": "false"} 151 | async with aiohttp.ClientSession() as session: 152 | try: 153 | async with session.get(url, params=params) as response: 154 | response.raise_for_status() 155 | data = await response.json() 156 | forecast = data[0] if data and isinstance(data, list) else {} 157 | weather_info = { 158 | "DateTime": forecast.get("DateTime"), 159 | "WeatherIcon": forecast.get("WeatherIcon"), 160 | "IconPhrase": forecast.get("IconPhrase"), 161 | "HasPrecipitation": forecast.get("HasPrecipitation"), 162 | "PrecipitationType": forecast.get("PrecipitationType"), 163 | "PrecipitationIntensity": forecast.get("PrecipitationIntensity"), 164 | "IsDaylight": forecast.get("IsDaylight"), 165 | "Temperature": forecast.get("Temperature"), 166 | "RealFeelTemperature": forecast.get("RealFeelTemperature"), 167 | "RealFeelTemperatureShade": forecast.get("RealFeelTemperatureShade"), 168 | "WetBulbTemperature": forecast.get("WetBulbTemperature"), 169 | "WetBulbGlobeTemperature": forecast.get("WetBulbGlobeTemperature"), 170 | "DewPoint": forecast.get("DewPoint"), 171 | "Wind": forecast.get("Wind"), 172 | "WindGust": forecast.get("WindGust"), 173 | "RelativeHumidity": forecast.get("RelativeHumidity"), 174 | "Visibility": forecast.get("Visibility"), 175 | "Ceiling": forecast.get("Ceiling"), 176 | "UVIndex": forecast.get("UVIndex"), 177 | "UVIndexText": forecast.get("UVIndexText"), 178 | "PrecipitationProbability": forecast.get("PrecipitationProbability"), 179 | "RainProbability": forecast.get("RainProbability"), 180 | "SnowProbability": forecast.get("SnowProbability"), 181 | "IceProbability": forecast.get("IceProbability"), 182 | "TotalLiquid": forecast.get("TotalLiquid"), 183 | "Rain": forecast.get("Rain"), 184 | "Snow": forecast.get("Snow"), 185 | "Ice": forecast.get("Ice"), 186 | "CloudCover": forecast.get("CloudCover"), 187 | "Evapotranspiration": forecast.get("Evapotranspiration"), 188 | "SolarIrradiance": forecast.get("SolarIrradiance"), 189 | } 190 | return json.dumps(weather_info) 191 | 192 | except aiohttp.ClientError as e: 193 | return json.dumps({"error": str(e)}) 194 | except ValueError as error: 195 | return json.dumps({error, "An unexpected error occurred"}) 196 | 197 | 198 | async def get_twelve_hour_weather_forecast(api_key, base_url, location: str = "Atlanta"): 199 | """ 200 | This function gets the twelve hour hourly forecast weather for the given location. 201 | 202 | If no location provided or an empty string passed, defaults to Atlanta. 203 | 204 | """ 205 | if not location: 206 | location = "Atlanta" 207 | 208 | location = location.strip('"') 209 | 210 | location_key = await get_location_key(api_key, base_url, location) 211 | if not location_key: 212 | return json.dumps( 213 | {"error": "Failed to find location key for provided location"} 214 | ) 215 | 216 | url = f"{base_url}/forecasts/v1/hourly/12hour/{location_key}" 217 | params = {"apikey": api_key, "details": "true", "metric": "false"} 218 | async with aiohttp.ClientSession() as session: 219 | try: 220 | async with session.get(url, params=params) as response: 221 | response.raise_for_status() 222 | data = await response.json() 223 | if not data or not isinstance(data, list): 224 | return json.dumps( 225 | {"error": "Invalid forecast data format"} 226 | ) 227 | # Process each forecast in the list 228 | forecasts_info = [] 229 | for forecast in data: 230 | weather_info = { 231 | "DateTime": forecast.get("DateTime"), 232 | "WeatherIcon": forecast.get("WeatherIcon"), 233 | "IconPhrase": forecast.get("IconPhrase"), 234 | "HasPrecipitation": forecast.get("HasPrecipitation"), 235 | "IsDaylight": forecast.get("IsDaylight"), 236 | "Temperature": forecast.get("Temperature"), 237 | "RealFeelTemperature": forecast.get("RealFeelTemperature"), 238 | "RealFeelTemperatureShade": forecast.get("RealFeelTemperatureShade"), 239 | "WetBulbTemperature": forecast.get("WetBulbTemperature"), 240 | "WetBulbGlobeTemperature": forecast.get("WetBulbGlobeTemperature"), 241 | "DewPoint": forecast.get("DewPoint"), 242 | "Wind": forecast.get("Wind"), 243 | "WindGust": forecast.get("WindGust"), 244 | "RelativeHumidity": forecast.get("RelativeHumidity"), 245 | "Visibility": forecast.get("Visibility"), 246 | "Ceiling": forecast.get("Ceiling"), 247 | "UVIndex": forecast.get("UVIndex"), 248 | "UVIndexText": forecast.get("UVIndexText"), 249 | "PrecipitationProbability": forecast.get("PrecipitationProbability"), 250 | "RainProbability": forecast.get("RainProbability"), 251 | "SnowProbability": forecast.get("SnowProbability"), 252 | "IceProbability": forecast.get("IceProbability"), 253 | "TotalLiquid": forecast.get("TotalLiquid"), 254 | "Rain": forecast.get("Rain"), 255 | "Snow": forecast.get("Snow"), 256 | "Ice": forecast.get("Ice"), 257 | "CloudCover": forecast.get("CloudCover"), 258 | "Evapotranspiration": forecast.get("Evapotranspiration"), 259 | "SolarIrradiance": forecast.get("SolarIrradiance"), 260 | } 261 | forecasts_info.append(weather_info) 262 | 263 | return json.dumps(forecasts_info) 264 | 265 | except aiohttp.ClientError as error: 266 | return json.dumps({error,"Failed to fetch weather data"}) 267 | except ValueError as error: 268 | return json.dumps({error, "An unexpected error occurred"}) 269 | 270 | 271 | async def get_one_day_weather_forecast(api_key, base_url, location: str = "Atlanta"): 272 | """ 273 | This function gets the one day forecast weather for the given location. 274 | 275 | If no location provided or an empty string passed, defaults to Atlanta. 276 | 277 | """ 278 | if not location: 279 | location = "Atlanta" 280 | 281 | location = location.strip('"') 282 | 283 | location_key = await get_location_key(api_key, base_url, location) 284 | if not location_key: 285 | return json.dumps( 286 | {"error": "Failed to find location key for provided location"} 287 | ) 288 | 289 | url = f"{base_url}/forecasts/v1/daily/1day/{location_key}" 290 | params = {"apikey": api_key, "details": "true", "metric": "false"} 291 | async with aiohttp.ClientSession() as session: 292 | try: 293 | async with session.get(url, params=params) as response: 294 | response.raise_for_status() 295 | data = await response.json() 296 | weather_info = { 297 | "Headline": data.get("Headline", {}), 298 | "DailyForecasts": data.get("DailyForecasts", []) 299 | } 300 | return json.dumps(weather_info) 301 | 302 | except aiohttp.ClientError as error: 303 | return json.dumps({error, "Failed to fetch weather data"}) 304 | except ValueError as error: 305 | return json.dumps({error, "An unexpected error occurred"}) 306 | 307 | 308 | async def get_five_day_weather_forecast(api_key, base_url, location: str = "Atlanta"): 309 | """ 310 | This function gets the five day forecast weather for the given location. 311 | 312 | If no location provided or an empty string passed, defaults to Atlanta. 313 | 314 | """ 315 | if not location: 316 | location = "Atlanta" 317 | 318 | location = location.strip('"') 319 | 320 | location_key = await get_location_key(api_key, base_url, location) 321 | if not location_key: 322 | return json.dumps( 323 | {"error": "Failed to find location key for provided location"} 324 | ) 325 | 326 | url = f"{base_url}/forecasts/v1/daily/5day/{location_key}" 327 | params = {"apikey": api_key, "details": "true", "metric": "false"} 328 | async with aiohttp.ClientSession() as session: 329 | try: 330 | async with session.get(url, params=params) as response: 331 | response.raise_for_status() 332 | data = await response.json() 333 | weather_info = { 334 | "Headline": data.get("Headline", {}), 335 | "DailyForecasts": data.get("DailyForecasts", []) 336 | } 337 | return json.dumps(weather_info) 338 | 339 | except aiohttp.ClientError as error: 340 | return json.dumps({error, "Failed to fetch weather data"}) 341 | except ValueError as error: 342 | return json.dumps({error, "An unexpected error occurred"}) 343 | 344 | 345 | accu_weather_tools = [ 346 | { 347 | "type": "function", 348 | "function": { 349 | "name": "get_current_weather", 350 | "description": "Get the current weather by City.", 351 | "parameters": { 352 | "type": "object", 353 | "properties": { 354 | "location": { 355 | "type": "string", 356 | "description": "The city, e.g. Atlanta.", 357 | }, 358 | }, 359 | "required": ["location"], 360 | }, 361 | }, 362 | }, 363 | { 364 | "type": "function", 365 | "function": { 366 | "name": "get_one_day_weather_forecast", 367 | "description": "Get the 1 day forecast weather by City.", 368 | "parameters": { 369 | "type": "object", 370 | "properties": { 371 | "location": { 372 | "type": "string", 373 | "description": "The city, e.g. Atlanta.", 374 | }, 375 | }, 376 | "required": ["location"], 377 | }, 378 | }, 379 | }, 380 | { 381 | "type": "function", 382 | "function": { 383 | "name": "get_five_day_weather_forecast", 384 | "description": "Get the 5 day forecast weather by City.", 385 | "parameters": { 386 | "type": "object", 387 | "properties": { 388 | "location": { 389 | "type": "string", 390 | "description": "The city, e.g. Atlanta.", 391 | }, 392 | }, 393 | "required": ["location"], 394 | }, 395 | }, 396 | }, 397 | { 398 | "type": "function", 399 | "function": { 400 | "name": "get_one_hour_weather_forecast", 401 | "description": "Get the 1 hour forecast weather by City.", 402 | "parameters": { 403 | "type": "object", 404 | "properties": { 405 | "location": { 406 | "type": "string", 407 | "description": "The city, e.g. Atlanta.", 408 | }, 409 | }, 410 | "required": ["location"], 411 | }, 412 | }, 413 | }, 414 | { 415 | "type": "function", 416 | "function": { 417 | "name": "get_twelve_hour_weather_forecast", 418 | "description": "Get the 12 hour forecast weather by City.", 419 | "parameters": { 420 | "type": "object", 421 | "properties": { 422 | "location": { 423 | "type": "string", 424 | "description": "The city, e.g. Atlanta.", 425 | }, 426 | }, 427 | "required": ["location"], 428 | }, 429 | }, 430 | }, 431 | ] 432 | 433 | available_functions = { 434 | "get_current_weather": get_current_weather, 435 | "get_one_hour_weather_forecast": get_one_hour_weather_forecast, 436 | "get_twelve_hour_weather_forecast": get_twelve_hour_weather_forecast, 437 | "get_one_day_weather_forecast": get_one_day_weather_forecast, 438 | "get_five_day_weather_forecast": get_five_day_weather_forecast, 439 | } 440 | -------------------------------------------------------------------------------- /plugins/_gemini_pro_plugin/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eloquent-Algorithmics/GPT_ALL/2ad8d7df177274104bf70a302b240a2f49b283da/plugins/_gemini_pro_plugin/__init__.py -------------------------------------------------------------------------------- /plugins/_gemini_pro_plugin/gemini_pro_base.py: -------------------------------------------------------------------------------- 1 | 2 | # !/usr/bin/env python 3 | # coding: utf-8 4 | # Filename: gemini_pro_base.py 5 | # Path: plugins/_gemini_pro_plugin/gemini_pro_base.py 6 | 7 | """ 8 | This is the Gemini Pro Expert plugin. 9 | 10 | This plugin is a wrapper around the Gemini Pro API. 11 | 12 | """ 13 | 14 | import os 15 | import functools 16 | import google.generativeai as genai 17 | from plugins.plugin_base import PluginBase 18 | 19 | from plugins._gemini_pro_plugin.gemini_pro_vision_tools import ( 20 | gemini_pro_vision_tools, 21 | available_functions as gemini_pro_vision_functions 22 | ) 23 | from plugins._gemini_pro_plugin.gemini_pro_tools import ( 24 | gemini_pro_tools, 25 | available_functions as gemini_pro_functions 26 | ) 27 | 28 | 29 | class GeminiProPlugin(PluginBase): 30 | """ 31 | This is the Gemini Pro Expert plugin. 32 | 33 | This plugin is a wrapper around the Gemini Pro API. 34 | 35 | """ 36 | def __init__(self): 37 | self.api_key = os.getenv("GEMINI_API_KEY") 38 | if self.api_key is None: 39 | raise ValueError("GEMINI_API_KEY not set in the .env file") 40 | 41 | genai.configure(api_key=self.api_key) 42 | 43 | self.model = genai.GenerativeModel( 44 | model_name="gemini-pro", 45 | generation_config=self.default_generation_config(), 46 | safety_settings=self.default_safety_settings() 47 | ) 48 | self.convo = self.model.start_chat(history=[]) 49 | 50 | # Initialize the tools and available functions dictionaries 51 | self.tools = [] 52 | self.available_functions = {} 53 | 54 | super().__init__() 55 | 56 | async def initialize(self): 57 | # Load tools and functions from gemini_pro_tools.py 58 | self.tools.extend(gemini_pro_tools) 59 | for func_name, func in gemini_pro_functions.items(): 60 | # Bind the GeminiProPlugin instance to the function 61 | self.available_functions[func_name] = functools.partial(func, self) 62 | 63 | # Load tools and functions from gemini_pro_vision_tools.py 64 | self.tools.extend(gemini_pro_vision_tools) 65 | for func_name, func in gemini_pro_vision_functions.items(): 66 | # Bind the GeminiProPlugin instance to the function 67 | self.available_functions[func_name] = functools.partial(func, self) 68 | 69 | def default_generation_config(self): 70 | """ 71 | Returns the default generation config for the Gemini Pro API. 72 | 73 | This is the same as the default generation config for the 74 | Gemini Pro API, except that the max_output_tokens is set to 512 75 | instead of 256. 76 | """ 77 | return { 78 | "temperature": 1, 79 | "top_p": 1, 80 | "top_k": 1, 81 | "max_output_tokens": 512, 82 | } 83 | 84 | def default_safety_settings(self): 85 | """ 86 | Returns the default safety settings for the Gemini Pro API. 87 | 88 | This is the same as the default safety settings for the 89 | Gemini Pro API, except that the threshold for all categories 90 | is set to BLOCK_MEDIUM_AND_ABOVE instead of BLOCK_HIGH_AND_ABOVE. 91 | 92 | """ 93 | return [ 94 | { 95 | "category": "HARM_CATEGORY_HARASSMENT", 96 | "threshold": "BLOCK_MEDIUM_AND_ABOVE" 97 | }, 98 | { 99 | "category": "HARM_CATEGORY_HATE_SPEECH", 100 | "threshold": "BLOCK_MEDIUM_AND_ABOVE" 101 | }, 102 | { 103 | "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", 104 | "threshold": "BLOCK_MEDIUM_AND_ABOVE" 105 | }, 106 | { 107 | "category": "HARM_CATEGORY_DANGEROUS_CONTENT", 108 | "threshold": "BLOCK_MEDIUM_AND_ABOVE" 109 | } 110 | ] 111 | -------------------------------------------------------------------------------- /plugins/_gemini_pro_plugin/gemini_pro_tools.py: -------------------------------------------------------------------------------- 1 | 2 | # !/usr/bin/env python 3 | # coding: utf-8 4 | # Filename: gemini_pro_tools.py 5 | # Path: plugins/_gemini_pro_plugin/gemini_pro_tools.py 6 | 7 | """ 8 | This module defines the Gemini Pro Tools. 9 | """ 10 | 11 | import google.generativeai as genai 12 | 13 | generation_config = { 14 | "temperature": 0.5, 15 | "top_p": 0.5, 16 | "top_k": 32, 17 | "max_output_tokens": 1024, 18 | } 19 | 20 | # Set the safety settings to block harmful content 21 | safety_settings = [ 22 | { 23 | "category": "HARM_CATEGORY_HARASSMENT", 24 | "threshold": "BLOCK_MEDIUM_AND_ABOVE" 25 | }, 26 | { 27 | "category": "HARM_CATEGORY_HATE_SPEECH", 28 | "threshold": "BLOCK_MEDIUM_AND_ABOVE" 29 | }, 30 | { 31 | "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", 32 | "threshold": "BLOCK_MEDIUM_AND_ABOVE" 33 | }, 34 | { 35 | "category": "HARM_CATEGORY_DANGEROUS_CONTENT", 36 | "threshold": "BLOCK_MEDIUM_AND_ABOVE" 37 | } 38 | ] 39 | 40 | # Create object for the Gemini Pro model 41 | model = genai.GenerativeModel( 42 | model_name="gemini-pro", 43 | generation_config=generation_config, 44 | safety_settings=safety_settings 45 | ) 46 | 47 | 48 | def ask_gemini_pro_synchronous(plugin_instance, question): 49 | """ 50 | Ask Gemini Pro a question and get a response. 51 | 52 | This function is synchronous, meaning that it will block the 53 | main thread until the response is received. 54 | 55 | Args: 56 | plugin_instance (GeminiProPlugin): The Gemini Pro plugin instance. 57 | question (str): The question to ask Gemini Pro. 58 | 59 | Returns: 60 | str: The response from Gemini Pro. 61 | """ 62 | # Create a conversation object 63 | convo = model.start_chat(history=[]) 64 | 65 | # Ask the question and get the response 66 | convo.send_message(question) 67 | 68 | response_text = convo.last.text 69 | 70 | return response_text 71 | 72 | 73 | async def ask_gemini_pro_asynchronous(plugin_instance, question): 74 | """ 75 | Ask Gemini Pro a question and get a response. 76 | 77 | This function is asynchronous, meaning that it will not block the 78 | main thread while waiting for the response. 79 | 80 | """ 81 | # Create a conversation object 82 | convo = model.start_chat(history=[]) 83 | 84 | # Ask the question and get the response 85 | convo.send_message(question) 86 | 87 | response_text = convo.last.text 88 | 89 | return response_text 90 | 91 | 92 | gemini_pro_tools = [ 93 | { 94 | "type": "function", 95 | "function": { 96 | "name": "ask_gemini_pro_synchronous", 97 | "description": "This function allows you to send a request to the Gemini Pro LLM (which can be used for natural language tasks, multi-turn text and code chat, code generation) synchronously and get a response you can you use later in your workflow.", 98 | "parameters": { 99 | "type": "object", 100 | "properties": { 101 | "question": { 102 | "type": "string", 103 | "description": "The question to ask Gemini Pro.", 104 | }, 105 | }, 106 | "required": ["question"], 107 | }, 108 | }, 109 | }, 110 | { 111 | "type": "function", 112 | "function": { 113 | "name": "ask_gemini_pro_asynchronous", 114 | "description": "This function allows you to send a request to the Gemini Pro LLM (which can be used for natural language tasks, multi-turn text and code chat, code generation) asynchronously and get a response you can you use later in your workflow.", 115 | "parameters": { 116 | "type": "object", 117 | "properties": { 118 | "question": { 119 | "type": "string", 120 | "description": "The question to ask the Gemini Pro LLM.", 121 | }, 122 | }, 123 | "required": ["question"], 124 | }, 125 | }, 126 | }, 127 | ] 128 | 129 | available_functions = { 130 | "ask_gemini_pro_synchronous": ask_gemini_pro_synchronous, 131 | "ask_gemini_pro_asynchronous": ask_gemini_pro_asynchronous, 132 | } 133 | -------------------------------------------------------------------------------- /plugins/_gemini_pro_plugin/gemini_pro_vision_tools.py: -------------------------------------------------------------------------------- 1 | 2 | # !/usr/bin/env python 3 | # coding: utf-8 4 | # Filename: gemini_pro_tools.py 5 | # Path: plugins/_gemini_pro_plugin/gemini_pro_vision_tools.py 6 | 7 | """ 8 | This module defines the Gemini Pro Vision Tools. 9 | """ 10 | 11 | import os 12 | import google.generativeai as genai 13 | from vertexai.preview.generative_models import Image 14 | 15 | SOURCE_FOLDER = "uploads" 16 | 17 | 18 | async def ask_gemini_pro_vision(self, question, specific_file_name) -> Image: 19 | """ 20 | Ask Gemini Pro Vision a question about a specific image file. 21 | 22 | Args: 23 | question: The question to ask. 24 | specific_file_name: The name of the image file. 25 | """ 26 | try: 27 | 28 | generation_config = { 29 | "temperature": 0.4, 30 | "top_p": 1, 31 | "top_k": 32, 32 | "max_output_tokens": 1024, 33 | } 34 | 35 | try: 36 | image_path = os.path.join(SOURCE_FOLDER, specific_file_name) 37 | 38 | with open(image_path, "rb") as image_file: 39 | image_bytes = image_file.read() 40 | 41 | except FileNotFoundError: 42 | 43 | return "File not found." 44 | 45 | except PermissionError: 46 | 47 | return "Permission denied." 48 | 49 | except TypeError: 50 | 51 | return "Error reading the file." 52 | 53 | safety_settings = [ 54 | { 55 | "category": "HARM_CATEGORY_HARASSMENT", 56 | "threshold": "BLOCK_MEDIUM_AND_ABOVE" 57 | }, 58 | { 59 | "category": "HARM_CATEGORY_HATE_SPEECH", 60 | "threshold": "BLOCK_MEDIUM_AND_ABOVE" 61 | }, 62 | { 63 | "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", 64 | "threshold": "BLOCK_MEDIUM_AND_ABOVE" 65 | }, 66 | { 67 | "category": "HARM_CATEGORY_DANGEROUS_CONTENT", 68 | "threshold": "BLOCK_MEDIUM_AND_ABOVE" 69 | } 70 | ] 71 | 72 | try: 73 | model = genai.GenerativeModel(model_name="gemini-pro-vision", 74 | generation_config=generation_config, 75 | safety_settings=safety_settings) 76 | 77 | except TypeError: 78 | 79 | return "Error creating the model." 80 | 81 | except ValueError: 82 | 83 | return "Error creating the model." 84 | 85 | try: 86 | responses = model.generate_content( 87 | [image_bytes, question], 88 | stream=False, 89 | ) 90 | except FileNotFoundError: 91 | 92 | return "File not found." 93 | 94 | except PermissionError: 95 | 96 | return "Permission denied." 97 | 98 | except TypeError: 99 | 100 | return "Error reading the file." 101 | 102 | except ValueError: 103 | 104 | return "Error creating the model." 105 | 106 | try: 107 | for response in responses: 108 | if response.candidates: 109 | return response.candidates[0].content.parts[0].text 110 | else: 111 | return "No response candidates found." 112 | 113 | except FileNotFoundError: 114 | 115 | return "File not found." 116 | except PermissionError: 117 | 118 | return "Permission denied." 119 | except TypeError: 120 | 121 | return "Error reading the file." 122 | except ValueError: 123 | 124 | return "Error creating the model." 125 | 126 | except FileNotFoundError: 127 | return "File not found." 128 | except PermissionError: 129 | return "Permission denied." 130 | except TypeError: 131 | 132 | return "Error reading the file." 133 | except ValueError: 134 | return "Error creating the model." 135 | 136 | 137 | gemini_pro_vision_tools = [ 138 | { 139 | "type": "function", 140 | "function": { 141 | "name": "ask_gemini_pro_vision", 142 | "description": "Ask Gemini Pro Vision a question about a specific image or video file located in the 'uploads' folder.", 143 | "parameters": { 144 | "type": "object", 145 | "properties": { 146 | "question": { 147 | "type": "string", 148 | "description": "The question to ask Gemini Pro Vision", 149 | }, 150 | "specific_file_name": { 151 | "type": "string", 152 | "description": "The name of the image or video file in the 'uploads' folder.", 153 | }, 154 | }, 155 | "required": ["question", "specific_file_name"], 156 | }, 157 | }, 158 | }, 159 | ] 160 | 161 | available_functions = { 162 | "ask_gemini_pro_vision": ask_gemini_pro_vision, 163 | } 164 | -------------------------------------------------------------------------------- /plugins/_gmail_plugin/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eloquent-Algorithmics/GPT_ALL/2ad8d7df177274104bf70a302b240a2f49b283da/plugins/_gmail_plugin/__init__.py -------------------------------------------------------------------------------- /plugins/_gmail_plugin/calendar_tools.py: -------------------------------------------------------------------------------- 1 | 2 | # !/usr/bin/env python 3 | # coding: utf-8 4 | # Filename: calendar_tools.py 5 | # Path: plugins/_gmail_plugin/calendar_tools.py 6 | 7 | """ 8 | This module contains the Gmail Calendar functions and tools. 9 | """ 10 | 11 | import datetime 12 | from googleapiclient.errors import HttpError 13 | 14 | 15 | async def get_next_calendar_event(calendar_service): 16 | """ 17 | This function gets the next event from the users Google Calendar. 18 | """ 19 | now = datetime.datetime.utcnow().isoformat() + "Z" 20 | try: 21 | events_result = ( 22 | calendar_service.events() 23 | .list( 24 | calendarId="primary", 25 | timeMin=now, 26 | maxResults=1, 27 | singleEvents=True, 28 | orderBy="startTime" 29 | ) 30 | .execute() 31 | ) 32 | events = events_result.get("items", []) 33 | 34 | if not events: 35 | return "No upcoming events found." 36 | else: 37 | event = events[0] 38 | start = event["start"].get("dateTime", event["start"].get("date")) 39 | return f"Next event: {event['summary']} at {start}" 40 | except HttpError: 41 | return "An error occurred while retrieving events." 42 | 43 | 44 | async def add_event(calendar_service, event_details): 45 | """Add an event to the user's calendar.""" 46 | try: 47 | event = calendar_service.events().insert(calendarId='primary', body=event_details).execute() 48 | return f"Event created: {event.get('htmlLink')}" 49 | except HttpError as error: 50 | return f"An error occurred: {error}" 51 | 52 | 53 | async def update_event(calendar_service, event_id, updated_event_details): 54 | """Update an existing event by ID.""" 55 | try: 56 | updated_event = calendar_service.events().update(calendarId='primary', eventId=event_id, body=updated_event_details).execute() 57 | return f"Event updated: {updated_event.get('htmlLink')}" 58 | except HttpError as error: 59 | return f"An error occurred: {error}" 60 | 61 | 62 | async def delete_event(calendar_service, event_id): 63 | """Delete an event by its ID.""" 64 | try: 65 | calendar_service.events().delete(calendarId='primary', eventId=event_id).execute() 66 | return "Event deleted successfully." 67 | except HttpError as error: 68 | return f"An error occurred: {error}" 69 | 70 | 71 | async def list_events(calendar_service, max_results=10): 72 | """List the next 10 events on the user's calendar.""" 73 | now = datetime.datetime.utcnow().isoformat() + 'Z' 74 | try: 75 | events_result = calendar_service.events().list(calendarId='primary', timeMin=now, 76 | maxResults=max_results, singleEvents=True, 77 | orderBy='startTime').execute() 78 | events = events_result.get('items', []) 79 | return events 80 | except HttpError as error: 81 | return f"An error occurred: {error}" 82 | 83 | 84 | async def get_event(calendar_service, event_id): 85 | """Get a specific event by ID.""" 86 | try: 87 | event = calendar_service.events().get(calendarId='primary', eventId=event_id).execute() 88 | return event 89 | except HttpError as error: 90 | return f"An error occurred: {error}" 91 | 92 | 93 | async def clear_calendar(calendar_service): 94 | """Clears all events from the primary calendar.""" 95 | try: 96 | calendar_service.calendars().clear(calendarId='primary').execute() 97 | return "Primary calendar cleared." 98 | except HttpError as error: 99 | return f"An error occurred: {error}" 100 | 101 | 102 | async def list_calendars(calendar_service): 103 | """List all calendars for the user.""" 104 | try: 105 | calendar_list = calendar_service.calendarList().list().execute() 106 | return calendar_list.get('items', []) 107 | except HttpError as error: 108 | return f"An error occurred: {error}" 109 | 110 | 111 | async def create_calendar(calendar_service, calendar_details): 112 | """Create a new calendar.""" 113 | try: 114 | calendar = calendar_service.calendars().insert(body=calendar_details).execute() 115 | return f"Calendar created: {calendar.get('summary')}" 116 | except HttpError as error: 117 | return f"An error occurred: {error}" 118 | 119 | 120 | async def update_calendar(calendar_service, calendar_id, updated_calendar_details): 121 | """Update an existing calendar by ID.""" 122 | try: 123 | updated_calendar = calendar_service.calendars().update(calendarId=calendar_id, body=updated_calendar_details).execute() 124 | return f"Calendar updated: {updated_calendar.get('summary')}" 125 | except HttpError as error: 126 | return f"An error occurred: {error}" 127 | 128 | 129 | async def delete_calendar(calendar_service, calendar_id): 130 | """Delete a calendar by its ID.""" 131 | try: 132 | calendar_service.calendars().delete(calendarId=calendar_id).execute() 133 | return "Calendar deleted successfully." 134 | except HttpError as error: 135 | return f"An error occurred: {error}" 136 | 137 | 138 | calendar_tools_list = [ 139 | { 140 | "type": "function", 141 | "function": { 142 | "name": "get_next_calendar_event", 143 | "description": "Fetch the next event from the user's Google Calendar.", 144 | "parameters": { 145 | "type": "object", 146 | "properties": {}, 147 | "required": [], 148 | }, 149 | }, 150 | }, 151 | { 152 | "type": "function", 153 | "function": { 154 | "name": "add_event", 155 | "description": "Add a new event to the user's Google Calendar.", 156 | "parameters": { 157 | "type": "object", 158 | "properties": { 159 | "event_details": { 160 | "type": "object", 161 | "description": "A dictionary containing event details following Google's event format." 162 | }, 163 | }, 164 | "required": ["event_details"], 165 | }, 166 | }, 167 | }, 168 | { 169 | "type": "function", 170 | "function": { 171 | "name": "update_event", 172 | "description": "Update an existing event in the user's Google Calendar by event ID.", 173 | "parameters": { 174 | "type": "object", 175 | "properties": { 176 | "event_id": { 177 | "type": "string", 178 | "description": "The ID of the event to update." 179 | }, 180 | "updated_event_details": { 181 | "type": "object", 182 | "description": "A dictionary containing updated event details." 183 | }, 184 | }, 185 | "required": ["event_id", "updated_event_details"], 186 | }, 187 | }, 188 | }, 189 | { 190 | "type": "function", 191 | "function": { 192 | "name": "delete_event", 193 | "description": "Delete an event from the user's Google Calendar by event ID.", 194 | "parameters": { 195 | "type": "object", 196 | "properties": { 197 | "event_id": { 198 | "type": "string", 199 | "description": "The ID of the event to delete." 200 | }, 201 | }, 202 | "required": ["event_id"], 203 | }, 204 | }, 205 | }, 206 | { 207 | "type": "function", 208 | "function": { 209 | "name": "list_events", 210 | "description": "List the upcoming events from the user's Google Calendar.", 211 | "parameters": { 212 | "type": "object", 213 | "properties": { 214 | "max_results": { 215 | "type": "integer", 216 | "description": "Maximum number of events to return." 217 | }, 218 | }, 219 | "required": [], 220 | }, 221 | }, 222 | }, 223 | { 224 | "type": "function", 225 | "function": { 226 | "name": "get_event", 227 | "description": "Retrieve a specific event from the user's Google Calendar by event ID.", 228 | "parameters": { 229 | "type": "object", 230 | "properties": { 231 | "event_id": { 232 | "type": "string", 233 | "description": "The ID of the event to retrieve." 234 | }, 235 | }, 236 | "required": ["event_id"], 237 | }, 238 | }, 239 | }, 240 | { 241 | "type": "function", 242 | "function": { 243 | "name": "clear_calendar", 244 | "description": "Clears all events from the user's primary Google Calendar.", 245 | "parameters": { 246 | "type": "object", 247 | "properties": {}, 248 | "required": [], 249 | }, 250 | }, 251 | }, 252 | { 253 | "type": "function", 254 | "function": { 255 | "name": "list_calendars", 256 | "description": "List all calendars for the user.", 257 | "parameters": { 258 | "type": "object", 259 | "properties": {}, 260 | "required": [], 261 | }, 262 | }, 263 | }, 264 | { 265 | "type": "function", 266 | "function": { 267 | "name": "create_calendar", 268 | "description": "Create a new calendar.", 269 | "parameters": { 270 | "type": "object", 271 | "properties": { 272 | "calendar_details": { 273 | "type": "object", 274 | "description": "A dictionary containing calendar details following Google's calendar format." 275 | }, 276 | }, 277 | "required": ["calendar_details"], 278 | }, 279 | }, 280 | }, 281 | { 282 | "type": "function", 283 | "function": { 284 | "name": "update_calendar", 285 | "description": "Update an existing calendar by ID.", 286 | "parameters": { 287 | "type": "object", 288 | "properties": { 289 | "calendar_id": { 290 | "type": "string", 291 | "description": "The ID of the calendar to update." 292 | }, 293 | "updated_calendar_details": { 294 | "type": "object", 295 | "description": "A dictionary containing updated calendar details." 296 | }, 297 | }, 298 | "required": ["calendar_id", "updated_calendar_details"], 299 | }, 300 | }, 301 | }, 302 | { 303 | "type": "function", 304 | "function": { 305 | "name": "delete_calendar", 306 | "description": "Delete a calendar by its ID.", 307 | "parameters": { 308 | "type": "object", 309 | "properties": { 310 | "calendar_id": { 311 | "type": "string", 312 | "description": "The ID of the calendar to delete." 313 | }, 314 | }, 315 | "required": ["calendar_id"], 316 | }, 317 | }, 318 | }, 319 | ] 320 | 321 | available_functions = { 322 | "get_next_calendar_event": get_next_calendar_event, 323 | "add_event": add_event, 324 | "update_event": update_event, 325 | "delete_event": delete_event, 326 | "list_events": list_events, 327 | "get_event": get_event, 328 | "clear_calendar": clear_calendar, 329 | "list_calendars": list_calendars, 330 | "create_calendar": create_calendar, 331 | "update_calendar": update_calendar, 332 | "delete_calendar": delete_calendar, 333 | } 334 | -------------------------------------------------------------------------------- /plugins/_gmail_plugin/drive_tools.py: -------------------------------------------------------------------------------- 1 | # !/usr/bin/env python 2 | # coding: utf-8 3 | # Filename: drive_tools.py 4 | # Path: plugins/_gmail_plugin/drive_tools.py 5 | 6 | """ 7 | This module contains the Google Drive functions and tools. 8 | """ 9 | 10 | import io 11 | import re 12 | import spacy 13 | from googleapiclient.discovery import build 14 | from googleapiclient.http import ( 15 | MediaIoBaseUpload, 16 | MediaFileUpload, 17 | ) 18 | 19 | nlp = spacy.load("en_core_web_md") 20 | 21 | 22 | def extract_file_names(text): 23 | """ 24 | Extract file names from user input using spaCy. 25 | """ 26 | doc = nlp(text) 27 | file_names = [ent.text for ent in doc.ents if ent.label_ == "WORK_OF_ART"] 28 | return file_names 29 | 30 | 31 | def extract_mime_types(text): 32 | """ 33 | Extract MIME types from user input using spaCy. 34 | """ 35 | doc = nlp(text) 36 | mime_types = [ent.text for ent in doc.ents if ent.label_ == "PRODUCT"] 37 | return mime_types 38 | 39 | 40 | def extract_folder_names(text): 41 | """ 42 | Extract folder names from user input using spaCy. 43 | """ 44 | doc = nlp(text) 45 | folder_names = [ 46 | ent.text for ent in doc.ents if ent.label_ in ( 47 | "ORG", "GPE", "LOC", "FAC" 48 | ) 49 | ] 50 | return folder_names 51 | 52 | 53 | def extract_file_id(text): 54 | """ 55 | Extract a file ID-like pattern from user input using regular expressions. 56 | """ 57 | pattern = r'([a-zA-Z0-9-_]{25,})' 58 | matches = re.findall(pattern, text) 59 | return matches 60 | 61 | 62 | def extract_local_paths(text): 63 | """ 64 | Extract local paths from user input using spaCy and/or regular expressions. 65 | """ 66 | doc = nlp(text) 67 | local_paths = [ 68 | ent.text for ent in doc.ents if ent.label_ in ("ORG", "FAC") 69 | ] 70 | 71 | pattern = r'([a-zA-Z]:\\(?:[^\\\/:*?"<>|\r\n]+\\)*[^\\\/:*?"<>|\r\n]*)' 72 | local_paths.extend(re.findall(pattern, text)) 73 | 74 | return local_paths 75 | 76 | 77 | def build_drive_service(credentials): 78 | """Builds the Google Drive service.""" 79 | return build('drive', 'v3', credentials=credentials) 80 | 81 | 82 | async def upload_file(drive_service, user_input): 83 | """ 84 | Upload or update a file to Google Drive based on user input. 85 | """ 86 | try: 87 | file_names = extract_file_names(user_input) 88 | mime_types = extract_mime_types(user_input) 89 | folder_names = extract_folder_names(user_input) 90 | 91 | title = file_names[0] if file_names else 'Untitled' 92 | mime_type = mime_types[0] if mime_types else 'text/plain' 93 | folder_name = folder_names[0] if folder_names else 'root' 94 | 95 | content = user_input 96 | 97 | file_metadata = {'name': title} 98 | media = MediaIoBaseUpload( 99 | io.BytesIO(content.encode()), mimetype=mime_type 100 | ) 101 | 102 | if folder_name != 'root': 103 | folder_id = 'GPT_ALL' 104 | file_metadata['parents'] = [folder_id] 105 | 106 | file = drive_service.files().create( 107 | body=file_metadata, media_body=media, fields='id' 108 | ).execute() 109 | 110 | return f"Uploaded/Updated file with ID: {file.get('id')}" 111 | 112 | except Exception as e: 113 | return f"An error occurred while uploading the file: {e}" 114 | 115 | 116 | async def download_file(drive_service, user_input, local_path): 117 | """ 118 | Download a file from Google Drive by file name or folder name. 119 | """ 120 | try: 121 | file_names = extract_file_names(user_input) 122 | folder_names = extract_folder_names(user_input) 123 | 124 | file_name = file_names[0] if file_names else None 125 | folder_name = folder_names[0] if folder_names else None 126 | 127 | folder_id = None 128 | if folder_name: 129 | folder_query = f"name = '{folder_name}' and mimeType = 'application/vnd.google-apps.folder'" 130 | folder_response = drive_service.files().list(q=folder_query, fields="files(id)").execute() 131 | folders = folder_response.get('files', []) 132 | if folders: 133 | folder_id = folders[0].get('id') 134 | 135 | return f"Downloaded file '{file_name}' to {local_path}" 136 | 137 | except Exception as e: 138 | return f"An error occurred while downloading the file: {e}" 139 | 140 | 141 | async def list_files(drive_service, folder_name='root', max_results=10): 142 | """ 143 | List files in Google Drive within a specified folder. 144 | """ 145 | try: 146 | folder_query = f"name = '{folder_name}' and mimeType = 'application/vnd.google-apps.folder'" 147 | folder_response = drive_service.files().list( 148 | q=folder_query, fields="files(id, name)" 149 | ).execute() 150 | folders = folder_response.get('files', []) 151 | 152 | if not folders: 153 | return [] 154 | 155 | folder_id = folders[0].get('id') 156 | 157 | query = "'%s' in parents" % folder_id 158 | response = drive_service.files().list( 159 | q=query, pageSize=max_results, fields="nextPageToken, files(id, name)" 160 | ).execute() 161 | files_info = [ 162 | {'name': file.get('name'), 'id': file.get('id')} for file in response.get('files', []) 163 | ] 164 | 165 | return files_info 166 | 167 | except Exception as e: 168 | return f"An error occurred while listing the files: {e}" 169 | 170 | 171 | async def create_folder(drive_service, folder_name, parent_id='root'): 172 | """ 173 | Create a new folder in Google Drive. 174 | """ 175 | file_metadata = { 176 | 'name': folder_name, 177 | 'mimeType': 'application/vnd.google-apps.folder', 178 | 'parents': [parent_id] 179 | } 180 | folder = drive_service.files().create(body=file_metadata, fields='id').execute() 181 | return folder.get('id') 182 | 183 | 184 | async def search_files(drive_service, query): 185 | """ 186 | Search for files and folders in Google Drive. 187 | """ 188 | results = drive_service.files().list(q=query, fields="files(id, name)").execute() 189 | items = results.get('files', []) 190 | return items 191 | 192 | 193 | async def share_file(drive_service, file_id, user_email, role='reader'): 194 | """ 195 | Share a file or folder with a user. 196 | """ 197 | user_permission = { 198 | 'type': 'user', 199 | 'role': role, 200 | 'emailAddress': user_email 201 | } 202 | drive_service.permissions().create( 203 | fileId=file_id, 204 | body=user_permission, 205 | fields='id' 206 | ).execute() 207 | 208 | 209 | async def move_file(drive_service, file_id, folder_id): 210 | """ 211 | Move a file or folder to a different folder. 212 | """ 213 | file = drive_service.files().get(fileId=file_id, fields='parents').execute() 214 | previous_parents = ",".join(file.get('parents')) 215 | file = drive_service.files().update( 216 | fileId=file_id, 217 | addParents=folder_id, 218 | removeParents=previous_parents, 219 | fields='id, parents' 220 | ).execute() 221 | 222 | 223 | drive_tools_list = [ 224 | { 225 | "type": "function", 226 | "function": { 227 | "name": "upload_file", 228 | "description": "Upload or update a file to Google Drive based on user input.", 229 | "parameters": { 230 | "type": "object", 231 | "properties": { 232 | "user_input": { 233 | "type": "string", 234 | "description": "The user input containing file name, content, and other metadata.", 235 | } 236 | }, 237 | "required": ["user_input"], 238 | }, 239 | }, 240 | }, 241 | { 242 | "type": "function", 243 | "function": { 244 | "name": "download_file", 245 | "description": "Download a file from Google Drive based on user input.", 246 | "parameters": { 247 | "type": "object", 248 | "properties": { 249 | "user_input": { 250 | "type": "string", 251 | "description": "The user input containing the file ID and local path information.", 252 | } 253 | }, 254 | "required": ["user_input"], 255 | } 256 | }, 257 | }, 258 | { 259 | "type": "function", 260 | "function": { 261 | "name": "list_files", 262 | "description": "List files in a specified Google Drive folder.", 263 | "parameters": { 264 | "type": "object", 265 | "properties": { 266 | "folder_name": { 267 | "type": "string", 268 | "description": "The name of the folder to list files from. Defaults to 'root' if not specified.", 269 | "default": "root" 270 | }, 271 | "max_results": { 272 | "type": "integer", 273 | "description": "The maximum number of file results to retrieve. Defaults to 10 if not specified.", 274 | "default": 10 275 | } 276 | }, 277 | "required": [] 278 | } 279 | }, 280 | }, 281 | { 282 | "type": "function", 283 | "function": { 284 | "name": "create_folder", 285 | "description": "Create a new folder in Google Drive.", 286 | "parameters": { 287 | "type": "object", 288 | "properties": { 289 | "folder_name": { 290 | "type": "string", 291 | "description": "The name of the folder to create.", 292 | }, 293 | "parent_id": { 294 | "type": "string", 295 | "description": "The ID of the parent folder. Defaults to 'root' if not specified.", 296 | "default": "root" 297 | } 298 | }, 299 | "required": ["folder_name"] 300 | } 301 | }, 302 | }, 303 | { 304 | "type": "function", 305 | "function": { 306 | "name": "search_files", 307 | "description": "Search for files and folders in Google Drive.", 308 | "parameters": { 309 | "type": "object", 310 | "properties": { 311 | "query": { 312 | "type": "string", 313 | "description": "The search query to use.", 314 | } 315 | }, 316 | "required": ["query"] 317 | } 318 | }, 319 | }, 320 | { 321 | "type": "function", 322 | "function": { 323 | "name": "share_file", 324 | "description": "Share a file or folder with a user.", 325 | "parameters": { 326 | "type": "object", 327 | "properties": { 328 | "file_id": { 329 | "type": "string", 330 | "description": "The ID of the file or folder to share.", 331 | }, 332 | "user_email": { 333 | "type": "string", 334 | "description": "The email address of the user to share with.", 335 | }, 336 | "role": { 337 | "type": "string", 338 | "description": "The role to grant to the user. Defaults to 'reader' if not specified.", 339 | "default": "reader" 340 | } 341 | }, 342 | "required": ["file_id", "user_email"] 343 | } 344 | }, 345 | }, 346 | { 347 | "type": "function", 348 | "function": { 349 | "name": "move_file", 350 | "description": "Move a file or folder to a different folder.", 351 | "parameters": { 352 | "type": "object", 353 | "properties": { 354 | "file_id": { 355 | "type": "string", 356 | "description": "The ID of the file or folder to move.", 357 | }, 358 | "folder_id": { 359 | "type": "string", 360 | "description": "The ID of the folder to move the file or folder to.", 361 | } 362 | }, 363 | "required": ["file_id", "folder_id"] 364 | } 365 | }, 366 | } 367 | ] 368 | 369 | available_functions = { 370 | "upload_file": upload_file, 371 | "download_file": download_file, 372 | "list_files": list_files, 373 | "create_folder": create_folder, 374 | "search_files": search_files, 375 | "share_file": share_file, 376 | "move_file": move_file, 377 | } 378 | -------------------------------------------------------------------------------- /plugins/_gmail_plugin/email_tools.py: -------------------------------------------------------------------------------- 1 | 2 | # !/usr/bin/env python 3 | # coding: utf-8 4 | # Filename: email_tools.py 5 | # Path: plugins/_gmail_plugin/email_tools.py 6 | 7 | """ 8 | This module contains the Gmail Email functions and tools. 9 | """ 10 | 11 | import os 12 | import base64 13 | import binascii 14 | from bs4 import BeautifulSoup 15 | from googleapiclient.errors import HttpError 16 | 17 | 18 | async def gmail_read_message(gmail_service): 19 | """Retrieve a list of emails from Gmail.""" 20 | try: 21 | results = gmail_service.users().messages().list( 22 | userId='me', labelIds=['INBOX'], maxResults=5).execute() 23 | messages = results.get('messages', []) 24 | 25 | emails = [] 26 | for message in messages: 27 | msg = gmail_service.users().messages().get( 28 | userId='me', id=message['id']).execute() 29 | 30 | sender, subject, body = await extract_email_data(msg) 31 | 32 | snippet = body[:300] if body else 'N/A' 33 | emails.append( 34 | { 35 | 'id': message['id'], 'from': sender, 36 | 'subject': subject, 'snippet': snippet 37 | } 38 | ) 39 | return emails 40 | 41 | except HttpError: 42 | return [] 43 | 44 | 45 | async def gmail_send_message(gmail_service, **kwargs): 46 | """Send an email message.""" 47 | subject = kwargs.get('subject') 48 | body = kwargs.get('body') 49 | to = kwargs.get('to', os.getenv("GMAIL_ADDRESS")) 50 | from_email = os.getenv("GMAIL_ADDRESS") 51 | 52 | email_message = f"From: {from_email}\r\n" 53 | email_message += f"To: {to}\r\n" 54 | email_message += f"Subject: {subject}\r\n\r\n" 55 | email_message += body 56 | 57 | message_bytes = email_message.encode("utf-8") 58 | base64_bytes = base64.urlsafe_b64encode(message_bytes) 59 | base64_message = base64_bytes.decode("utf-8") 60 | 61 | raw_message = {"raw": base64_message} 62 | try: 63 | send_message = ( 64 | gmail_service.users().messages().send(userId="me", body=raw_message).execute() 65 | ) 66 | return f"Message Id: {send_message['id']}" 67 | except HttpError as error: 68 | return f"An error occurred while sending the email: {error}" 69 | 70 | 71 | async def gmail_delete_message(gmail_service, message_id): 72 | """Delete an email message by ID.""" 73 | try: 74 | gmail_service.users().messages().delete(userId='me', id=message_id).execute() 75 | except HttpError: 76 | pass 77 | 78 | 79 | async def extract_email_data(msg): 80 | """Extract the sender, subject, and body from an email message.""" 81 | headers = msg['payload']['headers'] 82 | sender = '' 83 | subject = '' 84 | for header in headers: 85 | if header['name'].lower() == 'from': 86 | sender = header['value'] 87 | elif header['name'].lower() == 'subject': 88 | subject = header['value'] 89 | 90 | body = await _get_email_body(msg['payload']) 91 | return sender, subject, body 92 | 93 | 94 | async def _get_email_body(payload): 95 | """Get the body of an email message.""" 96 | if 'parts' in payload: 97 | parts = payload['parts'] 98 | body = '' 99 | for part in parts: 100 | part_body = part['body'].get('data', '') 101 | body += await _decode_base64(part_body, part['mimeType']) 102 | return body 103 | else: 104 | body = payload['body'].get('data', '') 105 | return await _decode_base64(body) 106 | 107 | 108 | async def _decode_base64(data, mime_type='text/plain'): 109 | """Decode base64-encoded data.""" 110 | try: 111 | decoded_body_bytes = base64.urlsafe_b64decode(data) 112 | decoded_body = decoded_body_bytes.decode(errors='ignore') 113 | except binascii.Error: 114 | decoded_body = '' 115 | 116 | if mime_type == 'text/html': 117 | soup = BeautifulSoup(decoded_body, 'html.parser') 118 | return soup.get_text() 119 | else: 120 | return decoded_body 121 | 122 | 123 | email_tools_list = [ 124 | { 125 | "type": "function", 126 | "function": { 127 | "name": "gmail_read_message", 128 | "description": "Searches and retrieves emails from the users Gmail.", 129 | "parameters": { 130 | "type": "object", 131 | "properties": { 132 | "object_id": { 133 | "type": "string", 134 | "description": "Searches for messages by object ID to filter returned emails.", 135 | }, 136 | "query": { 137 | "type": "string", 138 | "description": "Search query to filter emails by: `query:`", 139 | }, 140 | "subject": { 141 | "type": "string", 142 | "description": "Words in the subject line to filter by: `subject:`", 143 | }, 144 | "to": { 145 | "type": "string", 146 | "description": "Specify a recipient to filter by: `to:`", 147 | }, 148 | "from": { 149 | "type": "string", 150 | "description": "Specify the sender to filter by: `from:`", 151 | }, 152 | "cc": { 153 | "type": "string", 154 | "description": "Recipient to filter by `cc:`", 155 | }, 156 | "bcc": { 157 | "type": "string", 158 | "description": "Filter by recipient who received a copy to filter by `bcc:`", 159 | }, 160 | "OR": { 161 | "type": "string", 162 | "description": "Find messages that match multiple terms `OR` or `{ }`, Example: from:amy OR from:david, Example: {from:amy from:david}", 163 | }, 164 | "-": { 165 | "type": "string", 166 | "description": "Remove messages from your results `-`, Example: dinner -movie", 167 | }, 168 | "AROUND": { 169 | "type": "string", 170 | "description": "Find messages with words near each other. Use the number to say how many words apart the words can be, Example: holiday AROUND 10 vacation", 171 | }, 172 | "label": { 173 | "type": "string", 174 | "description": "Find messages that have a certain label, Example: label:friends", 175 | }, 176 | "has": { 177 | "type": "string", 178 | "description": "Search for messages that have an attachment `has:`, Example: has:attachment, has:drive, has:document, has:spreadsheet, has:presentation, has:youtube, has:yellow-star, has:blue-start, has:userlabels, has:nouserlabels", 179 | }, 180 | "filename": { 181 | "type": "string", 182 | "description": "Messages that have attachments of a certain type `filename:`, Example: filename:pdf, filename:homework.txt", 183 | }, 184 | "deliveredto": { 185 | "type": "string", 186 | "description": "Search by email for delivered messages `deliveredto:`, Example: deliveredto:", 187 | }, 188 | "category": { 189 | "type": "string", 190 | "description": "Search for messages in a certain category `category:`, Example: category:primary, category:social, category:promotions, category:updates, category:forums, category:reservations, category:purchases", 191 | }, 192 | "size": { 193 | "type": "string", 194 | "description": "Messages larger than a certain size in bytes `size:` `larger:` `smaller:`, Example: size:1000000, larger:10M", 195 | }, 196 | "+": { 197 | "type": "string", 198 | "description": "Search for results that match a word exactly `+`, Example: +unicorn", 199 | }, 200 | "rfc822msgid": { 201 | "type": "string", 202 | "description": "Search for messages with a certain message-id header `rfc822msgid:`, Example: rfc822msgid:200503292@example.com", 203 | }, 204 | "list": { 205 | "type": "string", 206 | "description": "Optional Messages from a mailing list `list:`, Example: list:info@example.com", 207 | }, 208 | "": { 209 | "type": "string", 210 | "description": "Search for an exact word or phrase `""`, Example: \"dinner and movie tonight\"", 211 | }, 212 | "()": { 213 | "type": "string", 214 | "description": "Group multiple search terms together `( )`, Example: subject:(dinner movie)", 215 | }, 216 | "in": { 217 | "type": "string", 218 | "description": "Messages in any folder, including Spam and Trash `in:anywhere`, Example: in:anywhere movie", 219 | }, 220 | "is": { 221 | "type": "string", 222 | "description": "Search for messages that are marked: `is:important` `is:starred`, Example: is:important", 223 | }, 224 | "after": { 225 | "type": "string", 226 | "description": "Search for messages sent during a certain time period `after:`, `before:`, `older:`, `newer:`. Example: after:2004/04/16", 227 | }, 228 | "older_than": { 229 | "type": "string", 230 | "description": "Search for messages older or newer than a time period using d (day), m (month), and y (year), Example: newer_than:2d", 231 | }, 232 | "newer_than": { 233 | "type": "string", 234 | "description": "Search for messages older or newer than a time period using d (day), m (month), and y (year), Example: newer_than:2d", 235 | }, 236 | "order_by": { 237 | "type": "string", 238 | "description": "Optional order by field.", 239 | }, 240 | "order_direction": { 241 | "type": "string", 242 | "description": "Optional order direction.", 243 | }, 244 | "fields": { 245 | "type": "string", 246 | "description": "Optional fields to return.", 247 | }, 248 | }, 249 | "required": [], 250 | }, 251 | }, 252 | }, 253 | { 254 | "type": "function", 255 | "function": { 256 | "name": "gmail_send_message", 257 | "description": "Send an email message.", 258 | "parameters": { 259 | "type": "object", 260 | "properties": { 261 | "from": { 262 | "type": "string", 263 | "description": "The sender of the email.", 264 | }, 265 | "to": { 266 | "type": "string", 267 | "description": "The recipient of the email.", 268 | }, 269 | "subject": { 270 | "type": "string", 271 | "description": "The subject of the email.", 272 | }, 273 | "body": { 274 | "type": "string", 275 | "description": "The body of the email.", 276 | }, 277 | }, 278 | "required": ["subject", "body"], 279 | }, 280 | }, 281 | }, 282 | { 283 | "type": "function", 284 | "function": { 285 | "name": "gmail_delete_message", 286 | "description": "Delete an email message by ID.", 287 | "parameters": { 288 | "type": "object", 289 | "properties": { 290 | "message_id": { 291 | "type": "string", 292 | "description": "The ID of the email to delete.", 293 | }, 294 | }, 295 | "required": ["message_id"], 296 | }, 297 | }, 298 | }, 299 | ] 300 | 301 | available_functions = { 302 | "gmail_read_message": gmail_read_message, 303 | "gmail_send_message": gmail_send_message, 304 | "gmail_delete_message": gmail_delete_message, 305 | } 306 | -------------------------------------------------------------------------------- /plugins/_gmail_plugin/gmail_base.py: -------------------------------------------------------------------------------- 1 | 2 | # !/usr/bin/env python 3 | # coding: utf-8 4 | # Filename: gmail_base.py 5 | # Path: plugins/_gmail_plugin/gmail_base.py 6 | 7 | """ 8 | This module contains the GmailToolsPlugin class. 9 | """ 10 | 11 | import os 12 | import functools 13 | from google.auth.transport.requests import Request 14 | from google.oauth2.credentials import Credentials 15 | from google_auth_oauthlib.flow import InstalledAppFlow 16 | from googleapiclient.discovery import build 17 | from plugins._gmail_plugin.email_tools import ( 18 | email_tools_list, 19 | available_functions as email_functions 20 | ) 21 | from plugins._gmail_plugin.calendar_tools import ( 22 | calendar_tools_list, 23 | available_functions as calendar_functions 24 | ) 25 | # Make sure to import drive_tools correctly 26 | from plugins._gmail_plugin.drive_tools import ( 27 | drive_tools_list, 28 | available_functions as drive_functions 29 | ) 30 | from plugins.plugin_base import PluginBase 31 | 32 | 33 | class GmailPlugin(PluginBase): 34 | """ 35 | This class defines the GmailPlugin. 36 | """ 37 | # If modifying these scopes, delete the file token.json. 38 | SCOPES = [ 39 | "https://mail.google.com/", 40 | "https://www.googleapis.com/auth/calendar", 41 | "https://www.googleapis.com/auth/drive" 42 | ] 43 | 44 | def __init__(self): 45 | self.creds = None 46 | self._load_credentials() 47 | self.gmail_service = build("gmail", "v1", credentials=self.creds) 48 | self.calendar_service = build("calendar", "v3", credentials=self.creds) 49 | self.drive_service = build("drive", "v3", credentials=self.creds) 50 | 51 | super().__init__() 52 | 53 | async def initialize(self): 54 | """ 55 | Initialize the plugin. 56 | """ 57 | await self.load_plugin_tools() 58 | 59 | async def load_plugin_tools(self): 60 | """ 61 | Load tools and functions from accompanying scripts. 62 | """ 63 | # Load tools and functions from email_tools.py 64 | self.tools.extend(email_tools_list) 65 | for func_name, func in email_functions.items(): 66 | self.available_functions[func_name] = functools.partial( 67 | func, 68 | self.gmail_service 69 | ) 70 | 71 | # Load tools and functions from calendar_tools.py 72 | self.tools.extend(calendar_tools_list) 73 | for func_name, func in calendar_functions.items(): 74 | self.available_functions[func_name] = functools.partial( 75 | func, 76 | self.calendar_service 77 | ) 78 | 79 | # Load tools and functions from drive_tools.py 80 | self.tools.extend(drive_tools_list) 81 | for func_name, func in drive_functions.items(): 82 | # Pass the drive_service to the drive functions 83 | self.available_functions[func_name] = functools.partial( 84 | func, 85 | self.drive_service 86 | ) 87 | 88 | def _load_credentials(self): 89 | if os.path.exists("plugins/_gmail_plugin/token.json"): 90 | self.creds = Credentials.from_authorized_user_file( 91 | "plugins/_gmail_plugin/token.json", 92 | self.SCOPES 93 | ) 94 | if not self.creds or not self.creds.valid: 95 | if self.creds and self.creds.expired and self.creds.refresh_token: 96 | self.creds.refresh(Request()) 97 | else: 98 | flow = InstalledAppFlow.from_client_config( 99 | { 100 | "installed": { 101 | "client_id": os.getenv("GOOGLE_CLIENT_ID"), 102 | "client_secret": os.getenv( 103 | "GOOGLE_CLIENT_SECRET" 104 | ), 105 | "redirect_uris": [ 106 | os.getenv("GOOGLE_REDIRECT_URI") 107 | ], 108 | "auth_uri": ( 109 | "https://accounts.google.com/o/oauth2/auth" 110 | ), 111 | "token_uri": ( 112 | "https://oauth2.googleapis.com/token" 113 | ), 114 | } 115 | }, 116 | self.SCOPES, 117 | ) 118 | self.creds = flow.run_local_server(port=0) 119 | with open( 120 | "plugins/_gmail_plugin/token.json", 121 | "w", 122 | encoding="utf-8" 123 | ) as token: 124 | token.write(self.creds.to_json()) 125 | -------------------------------------------------------------------------------- /plugins/_google_search_plugin/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eloquent-Algorithmics/GPT_ALL/2ad8d7df177274104bf70a302b240a2f49b283da/plugins/_google_search_plugin/__init__.py -------------------------------------------------------------------------------- /plugins/_google_search_plugin/google_search_base.py: -------------------------------------------------------------------------------- 1 | 2 | # !/usr/bin/env python 3 | # coding: utf-8 4 | # Filename: google_search_base.py 5 | # Path: plugins/_google_search_plugin/google_search_base.py 6 | 7 | """ 8 | This module defines the Google Search plugin. 9 | """ 10 | 11 | from plugins.plugin_base import PluginBase 12 | 13 | from plugins._google_search_plugin.google_search_tools import ( 14 | search_google_tools, 15 | available_functions as google_functions 16 | ) 17 | 18 | 19 | # Create the GoogleSearchPlugin class 20 | class GoogleSearchPlugin(PluginBase): 21 | """ 22 | This class defines the Google Search plugin. 23 | """ 24 | 25 | async def initialize(self): 26 | """ 27 | Initialize the plugin. 28 | """ 29 | self.load_plugin_tools() 30 | 31 | def load_plugin_tools(self): 32 | """ 33 | Load tools and functions from accompanying scripts. 34 | """ 35 | # Load tools and functions from google_search_tools.py 36 | self.tools.extend(search_google_tools) 37 | self.available_functions.update(google_functions) 38 | -------------------------------------------------------------------------------- /plugins/_google_search_plugin/google_search_tools.py: -------------------------------------------------------------------------------- 1 | 2 | # !/usr/bin/env python 3 | # coding: utf-8 4 | # Filename: google_search_tools.py 5 | # Path: plugins/_google_search_plugin/google_search_tools.py 6 | 7 | """ 8 | Tools for interacting with Google Search API. 9 | """ 10 | 11 | import os 12 | import asyncio 13 | from typing import List 14 | import aiohttp 15 | import requests 16 | 17 | TOOL_API_KEY = os.getenv("GOOGLE_API_KEY") 18 | CSE_ID = os.getenv("GOOGLE_CSE_ID") 19 | 20 | 21 | def search_google_synchronous(query: str, num: [int] = 10, start: [int] = 1, fileType: [str] = None, lr: [str] = None, safe: [str] = "off") -> List: 22 | """ 23 | Search Google and return results. 24 | 25 | :param query: The search query string. 26 | :param num: Number of search results to return. 27 | :param start: The first result to retrieve (starts at 1). 28 | :param fileType: Filter results to a specific file type. 29 | :param lr: Restricts search to documents written in a particular language. 30 | :param safe: Search safety level (e.g., off, medium, high). 31 | :return: A list of search results. 32 | """ 33 | 34 | url = "https://www.googleapis.com/customsearch/v1" 35 | params = { 36 | "key": TOOL_API_KEY, 37 | "cx": CSE_ID, 38 | "q": query, 39 | "num": num, 40 | "start": start, 41 | "safe": safe, 42 | } 43 | 44 | # Add optional parameters if they are provided 45 | if fileType: 46 | params["fileType"] = fileType 47 | if lr: 48 | params["lr"] = lr 49 | 50 | try: 51 | res = requests.get(url, params=params, timeout=5) 52 | data = res.json() 53 | results = [] 54 | if data.get("items"): 55 | for item in data["items"]: 56 | results.append( 57 | { 58 | "title": item["title"], 59 | "description": item["snippet"], 60 | "link": item["link"], 61 | } 62 | ) 63 | 64 | return results 65 | 66 | except requests.exceptions.RequestException: 67 | return [] 68 | 69 | 70 | async def search_google_asynchronous(query: str, num: [int] = 10, start: [int] = 1, fileType: [str] = None, lr: [str] = None, safe: [str] = "off") -> List: 71 | """ 72 | Search Google and return results. 73 | 74 | :param query: The search query string. 75 | :param num: Number of search results to return. 76 | :param start: The first result to retrieve (starts at 1). 77 | :param fileType: Filter results to a specific file type. 78 | :param lr: Restricts search to documents written in a particular language. 79 | :param safe: Search safety level (e.g., off, medium, high). 80 | :return: A list of search results. 81 | """ 82 | 83 | url = "https://www.googleapis.com/customsearch/v1" 84 | params = { 85 | "key": TOOL_API_KEY, 86 | "cx": CSE_ID, 87 | "q": query, 88 | "num": num, 89 | "start": start, 90 | "safe": safe, 91 | } 92 | 93 | if fileType: 94 | params["fileType"] = fileType 95 | if lr: 96 | params["lr"] = lr 97 | 98 | async with aiohttp.ClientSession() as session: 99 | try: 100 | async with session.get(url, params=params, timeout=5) as res: 101 | data = await res.json() 102 | 103 | results = [] 104 | if data.get("items"): 105 | for item in data["items"]: 106 | results.append( 107 | { 108 | "title": item["title"], 109 | "description": item["snippet"], 110 | "link": item["link"], 111 | } 112 | ) 113 | 114 | return results 115 | 116 | except (KeyError, TypeError): 117 | 118 | return [] 119 | 120 | except asyncio.TimeoutError: 121 | return [] 122 | 123 | except aiohttp.ClientError: 124 | return [] 125 | 126 | 127 | search_google_tools = [ 128 | { 129 | "type": "function", 130 | "function": { 131 | "name": "search_google_synchronous", 132 | "description": "This function allows you to use the Google custom search engine API synchronously.", 133 | "parameters": { 134 | "type": "object", 135 | "properties": { 136 | "query": { 137 | "type": "string", 138 | "description": "Query to perform the search on.", 139 | }, 140 | "num": { 141 | "type": "integer", 142 | "description": "Number of search results to return.", 143 | }, 144 | "start": { 145 | "type": "integer", 146 | "description": "The first result to retrieve (starts at 1).", 147 | }, 148 | "fileType": { 149 | "type": "string", 150 | "description": "Filter results to a specific file type.", 151 | }, 152 | "lr": { 153 | "type": "string", 154 | "description": "Restricts the search to documents written in a particular language.", 155 | }, 156 | "safe": { 157 | "type": "string", 158 | "description": "Search safety level.", 159 | }, 160 | }, 161 | "required": ["query"], 162 | }, 163 | }, 164 | }, 165 | { 166 | "type": "function", 167 | "function": { 168 | "name": "search_google_asynchronous", 169 | "description": "This function allows you to use the Google custom search engine API asynchronously.", 170 | "parameters": { 171 | "type": "object", 172 | "properties": { 173 | "query": { 174 | "type": "string", 175 | "description": "Query to perform the search on.", 176 | }, 177 | "num": { 178 | "type": "integer", 179 | "description": "Number of search results to return.", 180 | }, 181 | "start": { 182 | "type": "integer", 183 | "description": "The first result to retrieve (starts at 1).", 184 | }, 185 | "fileType": { 186 | "type": "string", 187 | "description": "Filter results to a specific file type.", 188 | }, 189 | "lr": { 190 | "type": "string", 191 | "description": "Restricts the search to documents written in a particular language.", 192 | }, 193 | "safe": { 194 | "type": "string", 195 | "description": "Search safety level.", 196 | }, 197 | }, 198 | "required": ["query"], 199 | }, 200 | }, 201 | }, 202 | ] 203 | 204 | available_functions = { 205 | "search_google_synchronous": search_google_synchronous, 206 | "search_google_asynchronous": search_google_asynchronous, 207 | } 208 | -------------------------------------------------------------------------------- /plugins/_news_plugin/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eloquent-Algorithmics/GPT_ALL/2ad8d7df177274104bf70a302b240a2f49b283da/plugins/_news_plugin/__init__.py -------------------------------------------------------------------------------- /plugins/_news_plugin/news_base.py: -------------------------------------------------------------------------------- 1 | 2 | # !/usr/bin/env python 3 | # coding: utf-8 4 | # Filename: news_expert.py 5 | # Path: plugins/_news_expert/news_expert.py 6 | 7 | """ 8 | This module defines the News Expert plugin. 9 | """ 10 | 11 | from plugins._news_plugin.newsapi_tools import ( 12 | newsorg_tool_list, 13 | available_functions as newsapi_functions 14 | ) 15 | from plugins._news_plugin.nytimes_tools import ( 16 | nytimes_tool_list, 17 | available_functions as nytimes_functions 18 | ) 19 | 20 | from plugins.plugin_base import PluginBase 21 | 22 | 23 | class NewsPlugin(PluginBase): 24 | """ 25 | This class defines the News Expert plugin. 26 | """ 27 | async def initialize(self): 28 | """ 29 | Initialize the plugin. 30 | """ 31 | self.load_plugin_tools() 32 | 33 | def load_plugin_tools(self): 34 | """ 35 | Load tools and functions from accompanying scripts. 36 | """ 37 | # Load tools and functions from newsapi_tools.py 38 | self.tools.extend(newsorg_tool_list) 39 | self.available_functions.update(newsapi_functions) 40 | 41 | # Load tools and functions from nytimes_tools.py 42 | self.tools.extend(nytimes_tool_list) 43 | self.available_functions.update(nytimes_functions) 44 | -------------------------------------------------------------------------------- /plugins/_news_plugin/newsapi_tools.py: -------------------------------------------------------------------------------- 1 | 2 | # !/usr/bin/env python 3 | # coding: utf-8 4 | # Filename: newsapi_tools.py 5 | # Path: plugins/_news_plugin/newsapi_tools.py 6 | 7 | """ 8 | Tools for interacting with NewsAPI.org API. 9 | register for an API key @ https://newsapi.org/ 10 | """ 11 | 12 | import os 13 | from typing import List 14 | import aiohttp 15 | from rich.console import Console 16 | 17 | console = Console() 18 | 19 | TOOL_API_KEY = os.getenv("NEWS_API_KEY") 20 | 21 | 22 | async def get_articles_newsapi(api_key=TOOL_API_KEY, **kwargs) -> List: 23 | """ 24 | Fetch news from NewsAPI based on query parameters 25 | """ 26 | query_params = kwargs 27 | query_params["apiKey"] = api_key 28 | url = "https://newsapi.org/v2/everything" 29 | 30 | async with aiohttp.ClientSession() as session: 31 | try: 32 | async with session.get(url, params=query_params, timeout=5) as res: 33 | data = await res.json() 34 | news = [] 35 | articles = data.get("articles") 36 | if articles: 37 | for article in articles: 38 | news.append( 39 | { 40 | "title": article.get("title", ""), 41 | "description": article.get("description", ""), 42 | "snippet": ( 43 | (article.get("content", "")[:1000] + "...") 44 | if article.get("content") else "" 45 | ), 46 | "source": article.get("source", {}).get("name", ""), 47 | "link": article.get("url", "") 48 | } 49 | ) 50 | elif data.get("status") == "error": 51 | error_message = data.get('message', 'Unknown error') 52 | return [] 53 | else: 54 | return [] 55 | 56 | return news 57 | 58 | except aiohttp.ServerTimeoutError: 59 | return [] 60 | 61 | 62 | async def get_top_headlines_newsapi(api_key=TOOL_API_KEY, **kwargs) -> List: 63 | """ 64 | Fetch news from NewsAPI based on query parameters 65 | """ 66 | query_params = kwargs 67 | query_params["apiKey"] = api_key 68 | url = "https://newsapi.org/v2/everything" 69 | 70 | async with aiohttp.ClientSession() as session: 71 | try: 72 | async with session.get(url, params=query_params, timeout=5) as res: 73 | data = await res.json() 74 | news = [] 75 | articles = data.get("articles") 76 | if articles: 77 | for article in articles: 78 | news.append( 79 | { 80 | "title": article.get("title", ""), 81 | "description": article.get("description", ""), 82 | "snippet": ( 83 | (article.get("content", "")[:500] + "...") 84 | if article.get("content") else "" 85 | ), 86 | "source": article.get("source", {}).get("name", ""), 87 | "link": article.get("url", "") 88 | } 89 | ) 90 | elif data.get("status") == "error": 91 | error_message = data.get('message', 'Unknown error') 92 | return [] 93 | else: 94 | return [] 95 | 96 | return news 97 | 98 | return [] 99 | 100 | 101 | # Define the tool list outside the class 102 | newsorg_tool_list = [ 103 | { 104 | "type": "function", 105 | "function": { 106 | "name": "get_articles_newsapi", 107 | "description": "This function allows you to get news from the NewsAPI.org API.", 108 | "parameters": { 109 | "type": "object", 110 | "properties": { 111 | "q": { 112 | "type": "string", 113 | "description": "The search query to find news articles for. Surround phrases with quotes for exact match. Prepend words or phrases that must appear with a + symbol. Prepend words that must not appear with a - symbol. Alternatively you can use the AND / OR / NOT keywords, and optionally group these with parenthesis. The complete value for q must be URL-encoded. Max length is 500 characters.", 114 | }, 115 | "searchin": { 116 | "type": "string", 117 | "description": "The fields to restrict your q search to. The possible options are: title, description, content. Multiple options can be specified by separating them with a comma. Default: all fields are searched.", 118 | "enum": ["title", "description", "content"], 119 | "default": ["title", "description", "content"], 120 | }, 121 | "sources": { 122 | "type": "string", 123 | "description": "A comma-separated string of identifiers (maximum 20) for the news sources or blogs you want headlines from. Use the /sources endpoint to locate these programmatically. Note: you can't mix this param with the country or category params.", 124 | }, 125 | "domains": { 126 | "type": "string", 127 | "description": "A comma-separated string of domains to restrict the search to.", 128 | }, 129 | "excludeDomains": { 130 | "type": "string", 131 | "description": "A comma-separated string of domains to remove from the results.", 132 | }, 133 | "from": { 134 | "type": "string", 135 | "description": "A date and optional time for the oldest article allowed. This should be in ISO 8601 format. Default: the oldest according to your plan.", 136 | }, 137 | "to": { 138 | "type": "string", 139 | "description": "A date and optional time for the newest article allowed. This should be in ISO 8601 format. Default: the newest according to your plan.", 140 | }, 141 | "language": { 142 | "type": "string", 143 | "description": "Language of News", 144 | "enum": ["en", "es"], 145 | "default": "en", 146 | }, 147 | "sortBy": { 148 | "type": "string", 149 | "description": "The order to sort the articles in. Possible options: relevancy, popularity, publishedAt. Default: publishedAt.", 150 | "enum": ["relevancy", "popularity", "publishedAt"], 151 | "default": "publishedAt", 152 | }, 153 | "pageSize": { 154 | "type": "integer", 155 | "description": "Page Size", 156 | "default": 10, 157 | }, 158 | "page": { 159 | "type": "integer", 160 | "description": "Page Number", 161 | "default": 1, 162 | }, 163 | }, 164 | "required": ["q"], 165 | }, 166 | }, 167 | }, 168 | { 169 | "type": "function", 170 | "function": { 171 | "name": "get_top_headlines_newsapi", 172 | "description": "This function allows you to get news from the NewsAPI.org API.", 173 | "parameters": { 174 | "type": "object", 175 | "properties": { 176 | "category": { 177 | "type": "string", 178 | "description": "The category you want to get headlines for. Possible options: business entertainment general health science sports technology.", 179 | "enum": ["business", "entertainment", "general", "health", "science", "sports", "technology"], 180 | "default": "general", 181 | }, 182 | "sources": { 183 | "type": "string", 184 | "description": "A comma-separated string of identifiers (maximum 20) for the news sources or blogs you want headlines from. Use the /sources endpoint to locate these programmatically. Note: you can't mix this param with the country or category params.", 185 | }, 186 | "q": { 187 | "type": "string", 188 | "description": "The search query to find news articles for. Surround phrases with quotes for exact match. Prepend words or phrases that must appear with a + symbol. Prepend words that must not appear with a - symbol. Alternatively you can use the AND / OR / NOT keywords, and optionally group these with parenthesis. The complete value for q must be URL-encoded. Max length is 500 characters.", 189 | }, 190 | "pageSize": { 191 | "type": "integer", 192 | "description": "Page Size", 193 | "default": 5, 194 | }, 195 | "page": { 196 | "type": "integer", 197 | "description": "Page Number", 198 | "default": 1, 199 | }, 200 | }, 201 | "required": ["q"], 202 | }, 203 | }, 204 | }, 205 | ] 206 | 207 | # Define the available functions outside the class 208 | available_functions = { 209 | "get_articles_newsapi": get_articles_newsapi, 210 | "get_top_headlines_newsapi": get_top_headlines_newsapi, 211 | } 212 | -------------------------------------------------------------------------------- /plugins/_news_plugin/nytimes_tools.py: -------------------------------------------------------------------------------- 1 | 2 | # !/usr/bin/env python 3 | # coding: utf-8 4 | # Filename: nytimes_tools.py 5 | # Path: plugins/_news_plugin/nytimes_tools.py 6 | 7 | """ 8 | This script contains the functions, tools, and available tools lists 9 | to fetch articles from New York Times. 10 | """ 11 | 12 | import os 13 | from typing import List 14 | import aiohttp 15 | from rich.console import Console 16 | 17 | # Initialize the rich console 18 | console = Console() 19 | 20 | # Define the APIs URL and API key 21 | TOOL_URL = os.getenv("NYT_ARTICLE_SEARCH_URL") 22 | TOOL_API_KEY = os.getenv("NYT_API_KEY") 23 | 24 | 25 | async def get_news_from_nytimes(query: str, api_key=TOOL_API_KEY, url=TOOL_URL) -> List: 26 | """ 27 | Asynchronously fetches news articles from the New York Times API based on a search query. 28 | 29 | Args: 30 | - api_key (str): The API key used for authenticating with the New York Times API. 31 | - query (str): The search query string to find articles related to. 32 | - url (str): The base URL of the New York Times API. 33 | 34 | Returns: 35 | - List[Dict[str, str]]: A list of dictionaries, where each dictionary 36 | - contains information about a news article, including 'title', 'description', 'snippet', and 'link'. 37 | """ 38 | # Define the parameters for the request 39 | params = { 40 | "q": query, 41 | "api-key": api_key, 42 | } 43 | 44 | # Make the request to the New York Times API 45 | async with aiohttp.ClientSession() as session: 46 | try: 47 | async with session.get(url, params=params) as res: 48 | res.raise_for_status() 49 | data = await res.json() 50 | nyt_news = [] 51 | for doc in data["response"]["docs"]: 52 | nyt_news.append( 53 | { 54 | "title": doc["headline"]["main"], 55 | "description": doc["abstract"], 56 | "snippet": doc["lead_paragraph"], 57 | "link": doc["web_url"], 58 | } 59 | ) 60 | return nyt_news 61 | 62 | # Handle exceptions 63 | except aiohttp.ServerTimeoutError: 64 | return [] 65 | 66 | 67 | nytimes_tool_list = [ 68 | { 69 | "type": "function", 70 | "function": { 71 | "name": "get_news_from_nytimes", 72 | "description": "Fetch news from New York Times based on a query", 73 | "parameters": { 74 | "type": "object", 75 | "properties": { 76 | "query": { 77 | "type": "string", 78 | "description": "The New York Times search query.", 79 | }, 80 | }, 81 | "required": ["query"], 82 | }, 83 | }, 84 | } 85 | ] 86 | 87 | available_functions = { 88 | "get_news_from_nytimes": get_news_from_nytimes, 89 | } 90 | -------------------------------------------------------------------------------- /plugins/_nhtsa_plugin/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eloquent-Algorithmics/GPT_ALL/2ad8d7df177274104bf70a302b240a2f49b283da/plugins/_nhtsa_plugin/__init__.py -------------------------------------------------------------------------------- /plugins/_nhtsa_plugin/nhtsa_vpic_base.py: -------------------------------------------------------------------------------- 1 | 2 | # !/usr/bin/env python 3 | # coding: utf-8 4 | # Filename: nhtsa_vpic_base.py 5 | # Path: plugins/_nhtsa_plugin/nhtsa_vic_base.py 6 | 7 | """ 8 | This module defines the NHTSA vPic Vehicle Data plugin. 9 | """ 10 | 11 | from plugins._nhtsa_plugin.nhtsa_vpic_tools import ( 12 | nhtsa_vpic_tool_list, 13 | available_functions as nhtsa_vpic_functions 14 | ) 15 | from plugins.plugin_base import PluginBase 16 | 17 | 18 | class NHTSAVPICPlugin(PluginBase): 19 | """ 20 | This class defines the NHTSA vPic Vehicle Data plugin. 21 | """ 22 | async def initialize(self): 23 | """ 24 | Initialize the plugin. 25 | """ 26 | self.load_plugin_tools() 27 | 28 | def load_plugin_tools(self): 29 | """ 30 | Load tools and functions from accompanying scripts. 31 | """ 32 | # Load tools and functions from nhtsa_vpic_tools.py 33 | self.tools.extend(nhtsa_vpic_tool_list) 34 | self.available_functions.update(nhtsa_vpic_functions) 35 | -------------------------------------------------------------------------------- /plugins/_nhtsa_plugin/nhtsa_vpic_tools.py: -------------------------------------------------------------------------------- 1 | 2 | # !/usr/bin/env python 3 | # coding: utf-8 4 | # Filename: nhtsa_vpic_tools.py 5 | # Path: plugins/_nhtsa_plugin/nhtsa_vpic_tools.py 6 | 7 | """ 8 | This module defines the NHTSA vPIC VIN tools. 9 | """ 10 | 11 | import requests 12 | 13 | def get_vehicle_details_by_vin_synchronous(vin): 14 | """Retrieve vehicle details by VIN.""" 15 | endpoint = f"https://vpic.nhtsa.dot.gov/api/vehicles/decodevin/{vin}?format=json" 16 | response = requests.get(endpoint, timeout=5) 17 | data = response.json() 18 | return data['Results'] 19 | 20 | 21 | async def get_vehicle_details_by_vin_asynchronous(vin): 22 | """Retrieve vehicle details by VIN.""" 23 | endpoint = f"https://vpic.nhtsa.dot.gov/api/vehicles/decodevin/{vin}?format=json" 24 | response = requests.get(endpoint, timeout=5) 25 | data = response.json() 26 | return data['Results'] 27 | 28 | 29 | nhtsa_vpic_tool_list = [ 30 | { 31 | "type": "function", 32 | "function": { 33 | "name": "get_vehicle_details_by_vin_synchronous", 34 | "description": "This function allows you to retrieve details of a vehicle by VIN from the NHTSA vPic API synchronously.", 35 | "parameters": { 36 | "type": "object", 37 | "properties": { 38 | "vin": { 39 | "type": "string", 40 | "description": "Vehicle Identification Number", 41 | }, 42 | }, 43 | "required": ["vin"], 44 | }, 45 | }, 46 | }, 47 | { 48 | "type": "function", 49 | "function": { 50 | "name": "get_vehicle_details_by_vin_asynchronous", 51 | "description": "This function allows you to retrieve details of a vehicle by VIN from the NHTSA vPic API asynchronously.", 52 | "parameters": { 53 | "type": "object", 54 | "properties": { 55 | "vin": { 56 | "type": "string", 57 | "description": "Vehicle Identification Number", 58 | }, 59 | }, 60 | "required": ["vin"], 61 | }, 62 | }, 63 | }, 64 | ] 65 | 66 | 67 | available_functions = { 68 | "get_vehicle_details_by_vin_synchronous": get_vehicle_details_by_vin_synchronous, 69 | "get_vehicle_details_by_vin_asynchronous": get_vehicle_details_by_vin_asynchronous, 70 | } 71 | -------------------------------------------------------------------------------- /plugins/_system_commands/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eloquent-Algorithmics/GPT_ALL/2ad8d7df177274104bf70a302b240a2f49b283da/plugins/_system_commands/__init__.py -------------------------------------------------------------------------------- /plugins/_system_commands/system_commands_base.py: -------------------------------------------------------------------------------- 1 | 2 | # !/usr/bin/env python 3 | # coding: utf-8 4 | # Filename: system_commands_base.py 5 | # Path: plugins/_system_commands/system_commands_base.py 6 | 7 | """ 8 | This file contains the System Commands plugin class. 9 | """ 10 | 11 | from plugins._system_commands.system_commands_tools import ( 12 | system_commands_tool_list, 13 | available_functions as system_commands_functions 14 | ) 15 | from plugins.plugin_base import PluginBase 16 | 17 | 18 | class SystemCommandsPlugin(PluginBase): 19 | """ 20 | This class defines the System Commands plugin. 21 | """ 22 | async def initialize(self): 23 | """ 24 | Initialize the plugin. 25 | """ 26 | self.load_plugin_tools() 27 | 28 | def load_plugin_tools(self): 29 | """ 30 | Load tools and functions from accompanying scripts. 31 | """ 32 | # Load tools and functions from system_commands_tools.py 33 | self.tools.extend(system_commands_tool_list) 34 | self.available_functions.update(system_commands_functions) 35 | -------------------------------------------------------------------------------- /plugins/_system_commands/system_commands_tools.py: -------------------------------------------------------------------------------- 1 | 2 | # !/usr/bin/env python 3 | # coding: utf-8 4 | # Filename: system_commands_tools.py 5 | # Path: plugins/_system_commands/system_commands_tools.py 6 | 7 | """ 8 | This file contains the System Commands function definitions and tools. 9 | """ 10 | 11 | import sys 12 | import platform 13 | import subprocess 14 | import json 15 | 16 | 17 | async def get_system_information(): 18 | """ 19 | Gets system information. 20 | """ 21 | # Get system architecture 22 | is_64bits = sys.maxsize > 2**32 23 | arch = "64bit" if is_64bits else "32bit" 24 | 25 | # Get system distribution (works on Linux only) 26 | if platform.system() == "Linux": 27 | try: 28 | distro_info = " ".join(platform.freedesktop_os_release().values()) 29 | except (OSError, AttributeError): 30 | distro_info = None 31 | else: 32 | distro_info = None 33 | 34 | # Get Windows version (works on Windows only) 35 | if platform.system() == "Windows": 36 | win_ver = platform.win32_ver() 37 | win_version = f"{win_ver[0]} {win_ver[1]} {win_ver[3]}" 38 | else: 39 | win_version = None 40 | 41 | # Get macOS version (works on macOS only) 42 | if platform.system() == "Darwin": 43 | mac_ver = platform.mac_ver() 44 | mac_version = f"{mac_ver[0]} {mac_ver[2]}" 45 | else: 46 | mac_version = None 47 | 48 | # Build the system information dictionary 49 | system_info = { 50 | "architecture": arch, 51 | "distribution": distro_info, 52 | "windows_version": win_version, 53 | "mac_version": mac_version, 54 | "system": platform.system(), 55 | "node": platform.node(), 56 | "release": platform.release(), 57 | "version": platform.version(), 58 | "machine": platform.machine(), 59 | "processor": platform.processor(), 60 | } 61 | 62 | return json.dumps(system_info) 63 | 64 | 65 | async def run_system_command(command): 66 | """ 67 | Runs a system command and returns the output. 68 | """ 69 | try: 70 | result = subprocess.run( 71 | command, 72 | shell=True, 73 | check=True, 74 | stdout=subprocess.PIPE, 75 | stderr=subprocess.PIPE, 76 | text=True 77 | ) 78 | return json.dumps({ 79 | "stdout": result.stdout.strip(), 80 | "stderr": result.stderr.strip(), 81 | "returncode": result.returncode 82 | }) 83 | except subprocess.CalledProcessError as e: 84 | return json.dumps({ 85 | "error": "Command execution failed", 86 | "details": str(e), 87 | "returncode": e.returncode 88 | }) 89 | 90 | 91 | async def read_python_script(file_path): 92 | """ 93 | Reads the content of a Python script. 94 | """ 95 | try: 96 | with open(file_path, 'r', encoding='utf-8') as file: 97 | content = file.read() 98 | return json.dumps({ 99 | "content": content 100 | }) 101 | except PermissionError as e: 102 | return json.dumps({ 103 | "error": "Failed to write the Python script", 104 | "details": str(e) 105 | }) 106 | except FileNotFoundError as e: 107 | return json.dumps({ 108 | "error": "File not found", 109 | "details": str(e) 110 | }) 111 | except IsADirectoryError as e: 112 | return json.dumps({ 113 | "error": "The path is a directory", 114 | "details": str(e) 115 | }) 116 | except OSError as e: 117 | return json.dumps({ 118 | "error": "OS error", 119 | "details": str(e) 120 | }) 121 | 122 | 123 | async def write_python_script(file_path, content): 124 | """ 125 | Writes content to a Python script. 126 | """ 127 | try: 128 | with open(file_path, 'w', encoding='utf-8') as file: 129 | file.write(content) 130 | return json.dumps({ 131 | "message": "Python script written successfully" 132 | }) 133 | except PermissionError as e: 134 | return json.dumps({ 135 | "error": "Failed to write the Python script", 136 | "details": str(e) 137 | }) 138 | except FileNotFoundError as e: 139 | return json.dumps({ 140 | "error": "File not found", 141 | "details": str(e) 142 | }) 143 | except IsADirectoryError as e: 144 | return json.dumps({ 145 | "error": "The path is a directory", 146 | "details": str(e) 147 | }) 148 | except OSError as e: 149 | return json.dumps({ 150 | "error": "OS error", 151 | "details": str(e) 152 | }) 153 | 154 | 155 | async def amend_python_script(file_path, content): 156 | """ 157 | Amends a Python script by appending content to it. 158 | """ 159 | try: 160 | with open(file_path, 'a', encoding='utf-8') as file: 161 | file.write(content) 162 | return json.dumps({ 163 | "message": "Python script amended successfully" 164 | }) 165 | except IOError as e: 166 | return json.dumps({ 167 | "error": "Failed to amend the Python script", 168 | "details": str(e) 169 | }) 170 | 171 | 172 | async def execute_python_script(file_path): 173 | """ 174 | Executes a Python script and returns the output. 175 | """ 176 | try: 177 | result = subprocess.run( 178 | ['python', file_path], 179 | stdout=subprocess.PIPE, 180 | stderr=subprocess.PIPE, 181 | text=True, 182 | check=True 183 | ) 184 | return json.dumps({ 185 | "stdout": result.stdout.strip(), 186 | "stderr": result.stderr.strip(), 187 | "returncode": result.returncode 188 | }) 189 | except subprocess.CalledProcessError as e: 190 | return json.dumps({ 191 | "error": "Python script execution failed", 192 | "details": str(e), 193 | "returncode": e.returncode 194 | }) 195 | 196 | 197 | system_commands_tool_list = [ 198 | { 199 | "type": "function", 200 | "function": { 201 | "name": "get_system_information", 202 | "description": "This function allows you to gather information about the local machine.", 203 | }, 204 | }, 205 | { 206 | "type": "function", 207 | "function": { 208 | "name": "run_system_command", 209 | "description": "This function allows you to run a system command.", 210 | "parameters": { 211 | "type": "object", 212 | "properties": { 213 | "command": { 214 | "type": "string", 215 | "description": "The command to run.", 216 | }, 217 | }, 218 | "required": ["command"], 219 | }, 220 | }, 221 | }, 222 | { 223 | "type": "function", 224 | "function": { 225 | "name": "read_python_script", 226 | "description": "This function allows you to read a Python script.", 227 | "parameters": { 228 | "type": "object", 229 | "properties": { 230 | "file_path": { 231 | "type": "string", 232 | "description": "path to the Python script.", 233 | }, 234 | }, 235 | "required": ["file_path"], 236 | }, 237 | }, 238 | }, 239 | { 240 | "type": "function", 241 | "function": { 242 | "name": "write_python_script", 243 | "description": "This function allows you to write a Python script.", 244 | "parameters": { 245 | "type": "object", 246 | "properties": { 247 | "file_path": { 248 | "type": "string", 249 | "description": "path to the Python script.", 250 | }, 251 | "content": { 252 | "type": "string", 253 | "description": "Content to write to the script.", 254 | }, 255 | }, 256 | "required": ["file_path", "content"], 257 | }, 258 | }, 259 | }, 260 | { 261 | "type": "function", 262 | "function": { 263 | "name": "amend_python_script", 264 | "description": "This function allows you to amend a Python script.", 265 | "parameters": { 266 | "type": "object", 267 | "properties": { 268 | "file_path": { 269 | "type": "string", 270 | "description": "path to the Python script.", 271 | }, 272 | "content": { 273 | "type": "string", 274 | "description": "The content to append to the script.", 275 | }, 276 | }, 277 | "required": ["file_path", "content"], 278 | }, 279 | }, 280 | }, 281 | { 282 | "type": "function", 283 | "function": { 284 | "name": "execute_python_script", 285 | "description": "This function allows you to execute a Python script.", 286 | "parameters": { 287 | "type": "object", 288 | "properties": { 289 | "file_path": { 290 | "type": "string", 291 | "description": "path to the Python script.", 292 | }, 293 | }, 294 | "required": ["file_path"], 295 | }, 296 | }, 297 | }, 298 | ] 299 | 300 | available_functions = { 301 | "get_system_information": get_system_information, 302 | "run_system_command": run_system_command, 303 | "read_python_script": read_python_script, 304 | "write_python_script": write_python_script, 305 | "amend_python_script": amend_python_script, 306 | "execute_python_script": execute_python_script, 307 | } 308 | -------------------------------------------------------------------------------- /plugins/plugin_base.py: -------------------------------------------------------------------------------- 1 | 2 | # !/usr/bin/env python 3 | # coding: utf-8 4 | # Filename: plugin_base.py 5 | # Path: plugins/_plugin_name/plugin_name_base.py 6 | 7 | """ 8 | Base class for the plugins. 9 | """ 10 | 11 | 12 | class PluginBase: 13 | """ 14 | Plugin Base Class. 15 | """ 16 | def __init__(self, **kwargs): 17 | self.tools = [] 18 | self.available_functions = {} 19 | self.__dict__.update(kwargs) 20 | 21 | async def initialize(self): 22 | """ 23 | Initialize the plugin. 24 | """ 25 | raise NotImplementedError 26 | 27 | def load_plugin_tools(self): 28 | """ 29 | Load tools and functions from accompanying scripts. 30 | """ 31 | raise NotImplementedError 32 | 33 | def get_tools(self): 34 | """ 35 | Get the tools. 36 | """ 37 | return self.tools 38 | 39 | def get_available_functions(self): 40 | """ 41 | Get the available functions. 42 | """ 43 | return self.available_functions 44 | -------------------------------------------------------------------------------- /plugins/plugins_enabled.py: -------------------------------------------------------------------------------- 1 | 2 | # !/usr/bin/env python 3 | # coding: utf-8 4 | # Filename: plugin_enabled.py 5 | # Path: plugins/plugin_enabled.py 6 | 7 | """ 8 | Enable plugins. 9 | """ 10 | 11 | import os 12 | import importlib.util 13 | import inspect 14 | from rich.console import Console 15 | from plugins.plugin_base import PluginBase 16 | 17 | console = Console() 18 | 19 | 20 | async def enable_plugins(available_functions, tools): 21 | """ 22 | Enable plugins. 23 | """ 24 | plugins_folder = "plugins" 25 | 26 | for root, dirs, files in os.walk(plugins_folder): 27 | for file in files: 28 | if file.endswith(".py") and not file.startswith("_"): 29 | file_path = os.path.join(root, file) 30 | 31 | spec = importlib.util.spec_from_file_location( 32 | file[:-3], file_path 33 | ) 34 | module = importlib.util.module_from_spec(spec) 35 | try: 36 | spec.loader.exec_module(module) 37 | except Exception as e: 38 | continue 39 | 40 | for _, cls in inspect.getmembers(module, inspect.isclass): 41 | if issubclass(cls, PluginBase) and cls is not PluginBase: 42 | 43 | env_var_name = "ENABLE_%s" % cls.__name__.upper() 44 | plugin_enabled = os.getenv(env_var_name, "false").lower() == "true" 45 | 46 | if plugin_enabled and cls.__name__ not in available_functions: 47 | 48 | plugin = cls() 49 | await plugin.initialize() 50 | plugin_tools = plugin.get_tools() 51 | available_functions.update( 52 | plugin.get_available_functions() 53 | ) 54 | tools.extend(plugin_tools) 55 | else: 56 | console.print( 57 | f"Plugin {cls.__name__} is not enabled. Set {env_var_name} to true to enable it." 58 | ) 59 | 60 | return available_functions, tools 61 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aiohttp>=3.9.5 2 | appdirs>=1.4.4 3 | BeautifulSoup4>=4.12.3 4 | elevenlabs==0.2.27 5 | fsspec>=2024.3.1 6 | funcy>=2.0 7 | google-auth-oauthlib>=1.2.0 8 | google-api-python-client>=2.126.0 9 | google-cloud-aiplatform>=1.47.0 10 | google-generativeai>=0.5.1 11 | openai>=1.20.0 12 | pygame>=2.5.2 13 | pyttsx3>=2.90 14 | python-dotenv>=1.0.1 15 | pytz>=2024.1 16 | rich>=13.7.1 17 | spacy>=3.7.4 18 | tiktoken>=0.6.0 19 | tzlocal>=5.2 -------------------------------------------------------------------------------- /static/J5.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eloquent-Algorithmics/GPT_ALL/2ad8d7df177274104bf70a302b240a2f49b283da/static/J5.webp -------------------------------------------------------------------------------- /static/U1.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eloquent-Algorithmics/GPT_ALL/2ad8d7df177274104bf70a302b240a2f49b283da/static/U1.webp -------------------------------------------------------------------------------- /static/chat-style.css: -------------------------------------------------------------------------------- 1 | @import url("https://fonts.googleapis.com/css2?family=Roboto:wght@400;700&display=swap"); 2 | 3 | body { 4 | font-family: "Roboto", sans-serif; 5 | display: flex; 6 | align-items: center; 7 | justify-content: center; 8 | height: 95vh; 9 | background-color: #f0f0f0; 10 | background-image: linear-gradient(120deg, #84fab0 0%, #8fd3f4 100%); 11 | } 12 | 13 | h1 { 14 | display: flex; 15 | justify-content: center; 16 | margin: 0px; 17 | padding: 0px; 18 | font-size: 3rem; 19 | font-weight: 700; 20 | color: #333; 21 | text-shadow: 2px 2px 4px rgba(0, 0, 0, 0.5); 22 | background-color: rgba(0, 0, 139, 0.371); 23 | border-radius: 7px 7px 0px 0px; 24 | } 25 | 26 | h2 { 27 | display: flex; 28 | font-size: 1.5em; 29 | margin-bottom: 0px; 30 | margin-top: 0px; 31 | justify-content: center; 32 | color: #666; 33 | text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.5); 34 | border: solid 1px #666; 35 | border-radius: 7px; 36 | } 37 | 38 | form { 39 | display: flex; 40 | flex-direction: column; 41 | justify-content: center; 42 | padding-left: 5px; 43 | padding-right: 5px; 44 | background-color: #17224440; 45 | border-radius: 10px; 46 | width: 100%; /* Add this line */ 47 | } 48 | 49 | .chat-button { 50 | display: inline-block; 51 | background-color: #ff0000; 52 | color: white; 53 | padding: 10px 20px; 54 | border: none; 55 | border-radius: 4px; 56 | cursor: pointer; 57 | text-decoration: none; 58 | margin-top: 10px; 59 | transition: all 0.3s; 60 | } 61 | 62 | .chat-button:hover { 63 | background-color: #490000; 64 | transform: translateY(-2px); 65 | box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); 66 | } 67 | 68 | .chat-container { 69 | width: 80%; 70 | margin: 0 auto; 71 | display: flex; 72 | flex-direction: column; 73 | justify-content: space-between; 74 | height: 90vh; 75 | padding: 0px 20px 20px 20px; 76 | box-sizing: border-box; 77 | border-radius: 10px; 78 | box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1); 79 | } 80 | 81 | .chat-window { 82 | height: calc(100vh - 120px); 83 | display: flex; 84 | flex-direction: column; 85 | gap: 10px; 86 | flex-grow: 1; 87 | overflow-y: auto; 88 | padding: 20px; 89 | background-color: #ffffff; 90 | border-radius: 0px 0px 7px 7px; 91 | box-shadow: 0 1px 3px rgba(0, 0, 0, 0.5); 92 | margin-bottom: 10px; 93 | } 94 | 95 | .user-message, 96 | .ai-message { 97 | display: flex; 98 | align-items: center; 99 | padding: 10px 15px; 100 | border-radius: 13px; 101 | margin-bottom: 5px; 102 | font-size: 1.5rem; 103 | line-height: 1.4; 104 | width: fit-content; 105 | word-wrap: break-word; 106 | animation: fadeIn 0.5s; 107 | } 108 | 109 | @keyframes fadeIn { 110 | from { 111 | opacity: 0; 112 | transform: translateY(20px); 113 | } 114 | 115 | to { 116 | opacity: 1; 117 | transform: translateY(0); 118 | } 119 | } 120 | 121 | .user-message { 122 | background-color: #007bff; 123 | color: #ffffff; 124 | } 125 | 126 | .ai-message { 127 | background-color: #8d8d8d; 128 | color: #333333; 129 | } 130 | 131 | #chat-form { 132 | display: flex; 133 | align-items: center; 134 | justify-content: space-between; 135 | } 136 | 137 | #user-input { 138 | flex-grow: 1; 139 | padding: 10px; 140 | border: 1px solid #ccc; 141 | border-radius: 10px; 142 | outline: none; 143 | margin: 5px auto; 144 | margin-bottom: 10px; 145 | transition: border-color 0.3s; 146 | font-size: 1.5rem; 147 | width: calc(100% - 20px); 148 | } 149 | 150 | #user-input:focus { 151 | border-color: #007bff; 152 | } 153 | 154 | input[type="submit"] { 155 | background-color: #007bff; 156 | color: #ffffff; 157 | padding: 20px 40px; 158 | border: none; 159 | border-radius: 20px; 160 | cursor: pointer; 161 | font-size: 1.5rem; 162 | transition: all 0.3s; 163 | } 164 | 165 | input[type="submit"]:hover { 166 | background-color: #0056b3; 167 | transform: translateY(-2px); 168 | box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); 169 | } 170 | 171 | .tabBar { 172 | display: flex; 173 | justify-content: center; 174 | margin-bottom: 10px; 175 | margin-top: 10px; 176 | gap: 50px; 177 | } 178 | 179 | #clear-chat-btn { 180 | background-color: #dc3545; 181 | color: #ffffff; 182 | padding: 20px 40px; 183 | border: none; 184 | border-radius: 20px; 185 | cursor: pointer; 186 | font-size: 1.5rem; 187 | transition: all 0.3s; 188 | align-self: center; 189 | } 190 | 191 | #clear-chat-btn:hover { 192 | background-color: #c82333; 193 | transform: translateY(-2px); 194 | box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); 195 | } 196 | 197 | .aiavatar { 198 | width: 40px; 199 | height: 40px; 200 | border-radius: 50%; 201 | margin-right: 10px; 202 | margin-top: 5px; 203 | object-fit: cover; 204 | } 205 | 206 | .useravatar { 207 | width: 40px; 208 | height: 40px; 209 | border-radius: 50%; 210 | margin-left: 10px; 211 | margin-top: 5px; 212 | object-fit: cover; 213 | } 214 | 215 | .message-wrapper { 216 | display: flex; 217 | justify-content: flex-end; 218 | } 219 | 220 | .message-wrapper.ai { 221 | justify-content: flex-start; 222 | } 223 | 224 | @media screen and (max-width: 768px) { 225 | .chat-container { 226 | width: 100%; 227 | height: 95vh; 228 | border-radius: 7px; 229 | } 230 | 231 | .chat-window { 232 | height: 95vh; 233 | } 234 | } 235 | -------------------------------------------------------------------------------- /static/chat.js: -------------------------------------------------------------------------------- 1 | $(document).ready(function () { 2 | function clearChatHistory() { 3 | $("#chat-window").empty(); 4 | } 5 | 6 | function scrollToBottom() { 7 | const chatWindow = $("#chat-window")[0]; 8 | chatWindow.scrollTop = chatWindow.scrollHeight; 9 | } 10 | 11 | let memory = []; 12 | 13 | function showTypingAnimation() { 14 | const typingDots = '...'; 15 | $("#chat-window").append(`
${typingDots}
`); 16 | scrollToBottom(); 17 | } 18 | 19 | function removeTypingAnimation() { 20 | $("#typing-animation").remove(); 21 | } 22 | 23 | // Preload images 24 | function preloadImages() { 25 | const imagesToPreload = ["/static/U1.webp", "/static/J5.webp"]; 26 | imagesToPreload.forEach(imageSrc => { 27 | const img = new Image(); 28 | img.src = imageSrc; 29 | }); 30 | } 31 | 32 | preloadImages(); 33 | 34 | $("#input-form").submit(async function (event) { 35 | event.preventDefault(); 36 | const userText = $("#user-input").val().trim(); 37 | if (!userText) return; 38 | 39 | $("#chat-window").append(`
${userText}
`); 40 | $("#user-input").val(""); 41 | 42 | showTypingAnimation(); 43 | 44 | try { 45 | const response = await $.ajax({ 46 | url: "/chat", // Make sure this endpoint is correct 47 | type: "POST", 48 | contentType: "application/json", 49 | data: JSON.stringify({ user_input: userText, memory }), 50 | }); 51 | 52 | removeTypingAnimation(); 53 | $("#chat-window").append(`
${response.response}
`); 54 | memory = response.memory; 55 | scrollToBottom(); 56 | } catch (error) { 57 | console.error("Error: Unable to get a response from the assistant.", error); 58 | $("#chat-window").append(`
Error: Unable to get a response from the assistant.
`); 59 | } 60 | }); 61 | 62 | $("#clear-chat-btn").click(function () { 63 | clearChatHistory(); 64 | }); 65 | }); -------------------------------------------------------------------------------- /templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | GPT_ALL 7 | 8 | 9 | 10 | 11 |
12 |

GPT_ALL

13 |
14 |
15 | 21 |
22 | 23 | 24 |
25 |
26 |
27 | 28 | 29 | 34 | 35 | -------------------------------------------------------------------------------- /uploads/test_pic.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eloquent-Algorithmics/GPT_ALL/2ad8d7df177274104bf70a302b240a2f49b283da/uploads/test_pic.jpg -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eloquent-Algorithmics/GPT_ALL/2ad8d7df177274104bf70a302b240a2f49b283da/utils/__init__.py -------------------------------------------------------------------------------- /utils/core_tools.py: -------------------------------------------------------------------------------- 1 | 2 | # !/usr/bin/env python 3 | # coding: utf-8 4 | # Filename: core_tools.py 5 | # Path: utils/core_tools.py 6 | 7 | """ 8 | 9 | Core Tools 10 | =============== 11 | This module contains the core system tools that use the local machine only. 12 | 13 | 14 | Functions 15 | --------- 16 | display_help(tools) 17 | Display the available tools. 18 | get_current_date_time() 19 | Get the current EST date and time. 20 | 21 | """ 22 | from datetime import datetime 23 | import tzlocal 24 | import pytz 25 | from rich.console import Console 26 | 27 | console = Console() 28 | 29 | 30 | def display_help(tools): 31 | """ 32 | Display the available enabled tools and or functions. 33 | 34 | Args: 35 | tools (list): A list of tools. 36 | # Add more information to the docstring. 37 | 38 | """ 39 | console.print("\n[bold]Available Tools:[/bold]\n", style="bold blue") 40 | for tool in tools: 41 | if isinstance(tool, dict) and "function" in tool: 42 | function_info = tool["function"] 43 | name = function_info.get("name", "Unnamed") 44 | description = function_info.get( 45 | "description", "No description available." 46 | ) 47 | console.print(f"[bold]{name}[/bold]: {description}") 48 | else: 49 | console.print(f"[red]Invalid tool format: {tool}[/red]") 50 | 51 | 52 | async def get_current_date_time() -> str: 53 | """ 54 | Get the current EST date and time. 55 | 56 | Returns: 57 | str: The current UTC date and time. 58 | """ 59 | local_timezone = tzlocal.get_localzone() 60 | now = datetime.now(local_timezone) 61 | now_est = now.astimezone(pytz.timezone("US/Eastern")) 62 | return now_est.strftime( 63 | "The current date and time is %B %d, %Y, %I:%M %p EST." 64 | ) 65 | -------------------------------------------------------------------------------- /utils/openai_dalle_tools.py: -------------------------------------------------------------------------------- 1 | 2 | # !/usr/bin/env python 3 | # coding: utf-8 4 | # Filename: openai_dalle_tools.py 5 | # Path: utils/openai_dalle_tools.py 6 | 7 | """ 8 | This file contains the OpenAi Dall-e tools for the AI Assistant. 9 | """ 10 | 11 | from openai import OpenAI, AsyncOpenAI 12 | from config import ( 13 | OPENAI_API_KEY, 14 | OPENAI_ORG_ID 15 | ) 16 | 17 | api_key = OPENAI_API_KEY 18 | openai_org_id = OPENAI_ORG_ID 19 | 20 | # Create an OpenAI client instance using keyword arguments 21 | client = OpenAI(api_key=api_key, organization=openai_org_id, timeout=60) 22 | 23 | # Create an AsyncOpenAI client instance using keyword arguments 24 | client_async = AsyncOpenAI(api_key=api_key, organization=openai_org_id, timeout=60) 25 | 26 | 27 | async def generate_an_image_with_dalle3(**kwargs) -> str: 28 | """ 29 | Generate an image with DALL-E 3. 30 | """ 31 | prompt = kwargs.get("prompt", "") 32 | n = kwargs.get("n", 1) 33 | size = kwargs.get("size", "1024x1024") 34 | quality = kwargs.get("quality", "hd") 35 | style = kwargs.get("style", "natural") 36 | response_format = kwargs.get("response_format", "url") 37 | 38 | response = await client_async.images.generate( 39 | model="dall-e-3", # The model identifier 40 | prompt=prompt, # Prompt required for image generation 4000 characters max 41 | n=n, # Must be between 1 and 10 42 | size=size, # 1024x1024, 1792x1024, or 1024x1792 dall-e-3 model 43 | quality=quality, # hd or standard 44 | style=style, # natural or vivid 45 | response_format=response_format, # b64_json or url 46 | ) 47 | await client_async.close() 48 | return response.data[0].url 49 | -------------------------------------------------------------------------------- /utils/openai_model_tools.py: -------------------------------------------------------------------------------- 1 | 2 | # !/usr/bin/env python 3 | # coding: utf-8 4 | # Filename: openai_model_tools.py 5 | # Path: utils/openai_model_tools.py 6 | 7 | """ 8 | Core tools. 9 | 10 | This file contains the core tools for the AI Assistant. 11 | """ 12 | import requests 13 | import base64 14 | import mimetypes 15 | from openai import OpenAI, AsyncOpenAI 16 | from rich.console import Console 17 | from pathlib import Path 18 | from plugins._gmail_plugin.drive_tools import ( 19 | available_functions, 20 | ) 21 | from config import ( 22 | OPENAI_API_KEY, 23 | OPENAI_ORG_ID, 24 | ) 25 | console = Console() 26 | 27 | api_key = OPENAI_API_KEY 28 | openai_org_id = OPENAI_ORG_ID 29 | 30 | # Create an OpenAI client instance using keyword arguments 31 | gpt4_client = OpenAI(api_key=api_key, organization=openai_org_id, timeout=10) 32 | 33 | # Create an AsyncOpenAI client instance using keyword arguments 34 | gpt4_client_async = AsyncOpenAI(api_key=api_key, organization=openai_org_id, timeout=10) 35 | 36 | 37 | def ask_chat_gpt_4_0314_synchronous(**kwargs) -> str: 38 | """ 39 | Ask ChatGPT a question and return the response. 40 | 41 | Args: 42 | kwargs (dict): The keyword arguments to pass to the function. 43 | Returns: 44 | str: The response from ChatGPT. 45 | """ 46 | 47 | question = kwargs.get("question", "") 48 | text = kwargs.get("text", "") 49 | 50 | messages = [ 51 | { 52 | "role": "system", 53 | "content": "You are a specialized AI language model designed to act as an expert tool within a larger conversational system. Your role is to provide detailed and expert-level responses to queries directed to you by the controller AI. You should focus on delivering precise information and insights based on your specialized knowledge and capabilities. Your responses should be concise, relevant, and strictly within the scope of the expertise you represent. You are not responsible for maintaining the overall conversation with the end user, but rather for supporting the controller AI by processing and responding to specific requests for information or analysis. Adhere to the constraints provided by the controller, such as token limits and context relevance, and ensure that your contributions are well-reasoned and can be seamlessly integrated into the broader conversation managed by the controller AI.", 54 | }, 55 | {"role": "user", "content": question}, 56 | {"role": "assistant", "content": text}, 57 | ] 58 | 59 | response = gpt4_client.chat.completions.create( 60 | model="gpt-4-0314", 61 | messages=messages, 62 | temperature=0, 63 | max_tokens=2048, 64 | top_p=0.3, 65 | frequency_penalty=0, 66 | presence_penalty=0, 67 | ) 68 | 69 | if ( 70 | response.choices 71 | and response.choices[0].message 72 | and response.choices[0].message.content 73 | ): 74 | return response.choices[0].message.content 75 | else: 76 | return "An error occurred or no content was returned." 77 | 78 | 79 | async def ask_chat_gpt_4_0314_asynchronous(**kwargs) -> str: 80 | """ 81 | Ask ChatGPT a question and return the response. 82 | 83 | Args: 84 | kwargs (dict): The keyword arguments to pass to the function. 85 | Returns: 86 | str: The response from ChatGPT. 87 | """ 88 | 89 | question = kwargs.get("question", "") 90 | text = kwargs.get("text", "") 91 | 92 | messages = [ 93 | { 94 | "role": "system", 95 | "content": "You are a specialized AI language model designed to act as an expert tool within a larger conversational system. Your role is to provide detailed and expert-level responses to queries directed to you by the controller AI. You should focus on delivering precise information and insights based on your specialized knowledge and capabilities. Your responses should be concise, relevant, and strictly within the scope of the expertise you represent. You are not responsible for maintaining the overall conversation with the end user, but rather for supporting the controller AI by processing and responding to specific requests for information or analysis. Adhere to the constraints provided by the controller, such as token limits and context relevance, and ensure that your contributions are well-reasoned and can be seamlessly integrated into the broader conversation managed by the controller AI.", 96 | }, 97 | {"role": "user", "content": question}, 98 | {"role": "assistant", "content": text}, 99 | ] 100 | 101 | response = await gpt4_client_async.chat.completions.create( 102 | model="gpt-4-0314", 103 | messages=messages, 104 | temperature=0.2, 105 | max_tokens=2048, 106 | top_p=0.5, 107 | frequency_penalty=0, 108 | presence_penalty=0, 109 | ) 110 | 111 | if ( 112 | response.choices 113 | and response.choices[0].message 114 | and response.choices[0].message.content 115 | ): 116 | return response.choices[0].message.content 117 | else: 118 | return "An error occurred or no content was returned." 119 | 120 | 121 | def ask_chat_gpt_4_0613_synchronous(**kwargs) -> str: 122 | """ 123 | Ask ChatGPT a question and return the response. 124 | 125 | Args: 126 | kwargs (dict): The keyword arguments to pass to the function. 127 | Returns: 128 | str: The response from ChatGPT. 129 | """ 130 | 131 | question = kwargs.get("question", "") 132 | text = kwargs.get("text", "") 133 | 134 | messages = [ 135 | { 136 | "role": "system", 137 | "content": "You are a specialized AI language model designed to act as an expert tool within a larger conversational system. Your role is to provide detailed and expert-level responses to queries directed to you by the controller AI. You should focus on delivering precise information and insights based on your specialized knowledge and capabilities. Your responses should be concise, relevant, and strictly within the scope of the expertise you represent. You are not responsible for maintaining the overall conversation with the end user, but rather for supporting the controller AI by processing and responding to specific requests for information or analysis. Adhere to the constraints provided by the controller, such as token limits and context relevance, and ensure that your contributions are well-reasoned and can be seamlessly integrated into the broader conversation managed by the controller AI.", 138 | }, 139 | {"role": "user", "content": question}, 140 | {"role": "assistant", "content": text}, 141 | ] 142 | 143 | response = gpt4_client.chat.completions.create( 144 | model="gpt-4-613", 145 | messages=messages, 146 | temperature=0.2, 147 | max_tokens=2048, 148 | top_p=0.5, 149 | frequency_penalty=0, 150 | presence_penalty=0, 151 | ) 152 | 153 | # Check if the response has the expected structure and content 154 | if ( 155 | response.choices 156 | and response.choices[0].message 157 | and response.choices[0].message.content 158 | ): 159 | return response.choices[0].message.content 160 | else: 161 | # Handle the case where the expected content is not available 162 | return "An error occurred or no content was returned." 163 | 164 | 165 | async def ask_chat_gpt_4_0613_asynchronous(**kwargs) -> str: 166 | """ 167 | Ask ChatGPT a question and return the response. 168 | 169 | Args: 170 | kwargs (dict): The keyword arguments to pass to the function. 171 | Returns: 172 | str: The response from ChatGPT. 173 | """ 174 | 175 | question = kwargs.get("question", "") 176 | text = kwargs.get("text", "") 177 | 178 | messages = [ 179 | { 180 | "role": "system", 181 | "content": "You are a specialized AI language model designed to act as an expert tool within a larger conversational system. Your role is to provide detailed and expert-level responses to queries directed to you by the controller AI. You should focus on delivering precise information and insights based on your specialized knowledge and capabilities. Your responses should be concise, relevant, and strictly within the scope of the expertise you represent. You are not responsible for maintaining the overall conversation with the end user, but rather for supporting the controller AI by processing and responding to specific requests for information or analysis. Adhere to the constraints provided by the controller, such as token limits and context relevance, and ensure that your contributions are well-reasoned and can be seamlessly integrated into the broader conversation managed by the controller AI.", 182 | }, 183 | {"role": "user", "content": question}, 184 | {"role": "assistant", "content": text}, 185 | ] 186 | 187 | response = await gpt4_client_async.chat.completions.create( 188 | model="gpt-4-0613", 189 | messages=messages, 190 | temperature=0.2, 191 | max_tokens=2048, 192 | top_p=0.5, 193 | frequency_penalty=0, 194 | presence_penalty=0, 195 | ) 196 | 197 | if ( 198 | response.choices 199 | and response.choices[0].message 200 | and response.choices[0].message.content 201 | ): 202 | return response.choices[0].message.content 203 | else: 204 | return "An error occurred or no content was returned." 205 | 206 | 207 | # Function to encode the image 208 | def encode_image(image_path): 209 | mime_type, _ = mimetypes.guess_type(image_path) 210 | if mime_type is None: 211 | raise ValueError("Could not determine the MIME type of the image.") 212 | 213 | with open(image_path, "rb") as image_file: 214 | encoded_string = base64.b64encode(image_file.read()).decode('utf-8') 215 | return f"data:{mime_type};base64,{encoded_string}" 216 | 217 | 218 | def ask_chat_gpt_4_32k_0314_synchronous(**kwargs) -> str: 219 | """ 220 | Ask ChatGPT a question and return the response. 221 | 222 | Args: 223 | kwargs (dict): The keyword arguments to pass to the function. 224 | Returns: 225 | str: The response from ChatGPT. 226 | """ 227 | 228 | question = kwargs.get("question", "") 229 | text = kwargs.get("text", "") 230 | 231 | messages = [ 232 | { 233 | "role": "system", 234 | "content": "You are a specialized AI language model designed to act as an expert tool within a larger conversational system. Your role is to provide detailed and expert-level responses to queries directed to you by the controller AI. You should focus on delivering precise information and insights based on your specialized knowledge and capabilities. Your responses should be concise, relevant, and strictly within the scope of the expertise you represent. You are not responsible for maintaining the overall conversation with the end user, but rather for supporting the controller AI by processing and responding to specific requests for information or analysis. Adhere to the constraints provided by the controller, such as token limits and context relevance, and ensure that your contributions are well-reasoned and can be seamlessly integrated into the broader conversation managed by the controller AI.", 235 | }, 236 | {"role": "user", "content": question}, 237 | {"role": "assistant", "content": text}, 238 | ] 239 | 240 | response = gpt4_client.chat.completions.create( 241 | model="gpt-4-32k-0314", 242 | messages=messages, 243 | temperature=0, 244 | max_tokens=2048, 245 | top_p=0.3, 246 | frequency_penalty=0, 247 | presence_penalty=0, 248 | ) 249 | 250 | if ( 251 | response.choices 252 | and response.choices[0].message 253 | and response.choices[0].message.content 254 | ): 255 | return response.choices[0].message.content 256 | else: 257 | return "An error occurred or no content was returned." 258 | 259 | 260 | async def ask_chat_gpt_4_32k_0314_asynchronous(**kwargs) -> str: 261 | """ 262 | Ask ChatGPT a question and return the response. 263 | 264 | Args: 265 | kwargs (dict): The keyword arguments to pass to the function. 266 | Returns: 267 | str: The response from ChatGPT. 268 | """ 269 | 270 | question = kwargs.get("question", "") 271 | text = kwargs.get("text", "") 272 | 273 | messages = [ 274 | { 275 | "role": "system", 276 | "content": "You are a specialized AI language model designed to act as an expert tool within a larger conversational system. Your role is to provide detailed and expert-level responses to queries directed to you by the controller AI. You should focus on delivering precise information and insights based on your specialized knowledge and capabilities. Your responses should be concise, relevant, and strictly within the scope of the expertise you represent. You are not responsible for maintaining the overall conversation with the end user, but rather for supporting the controller AI by processing and responding to specific requests for information or analysis. Adhere to the constraints provided by the controller, such as token limits and context relevance, and ensure that your contributions are well-reasoned and can be seamlessly integrated into the broader conversation managed by the controller AI.", 277 | }, 278 | {"role": "user", "content": question}, 279 | {"role": "assistant", "content": text}, 280 | ] 281 | 282 | response = await gpt4_client_async.chat.completions.create( 283 | model="gpt-4-32k-0314", 284 | messages=messages, 285 | temperature=0.2, 286 | max_tokens=2048, 287 | top_p=0.5, 288 | frequency_penalty=0, 289 | presence_penalty=0, 290 | ) 291 | 292 | if ( 293 | response.choices 294 | and response.choices[0].message 295 | and response.choices[0].message.content 296 | ): 297 | return response.choices[0].message.content 298 | else: 299 | return "An error occurred or no content was returned." 300 | 301 | 302 | # Function to send the image to the vision model 303 | async def ask_gpt_4_vision(image_name, drive_service=None): 304 | # Check if the image exists in the local uploads folder 305 | local_image_path = Path("uploads") / image_name 306 | if local_image_path.is_file(): 307 | base64_image = encode_image(local_image_path) 308 | else: 309 | # If not found locally, search in Google Drive (if drive_service is provided) 310 | if drive_service: 311 | files_info = await available_functions["list_files"](drive_service, "MyDrive/GPT_ALL/uploads") 312 | file_id = next((f['id'] for f in files_info if f['name'] == image_name), None) 313 | if file_id: 314 | # Download the file from Google Drive 315 | local_image_path = await available_functions["download_file"](drive_service, file_id, "uploads/") 316 | base64_image = encode_image(local_image_path) 317 | else: 318 | return "Image not found in local uploads folder or Google Drive." 319 | else: 320 | return "Image not found in local uploads folder." 321 | 322 | # Send the request to the vision model 323 | headers = { 324 | "Content-Type": "application/json", 325 | "Authorization": f"Bearer {OPENAI_API_KEY}" 326 | } 327 | 328 | payload = { 329 | "model": "gpt-4-vision-preview", 330 | "messages": [ 331 | { 332 | "role": "user", 333 | "content": [ 334 | { 335 | "type": "text", 336 | "text": "question" 337 | }, 338 | { 339 | "type": "image_url", 340 | "image_url": { 341 | "url": base64_image 342 | } 343 | } 344 | ] 345 | } 346 | ], 347 | "max_tokens": 600 348 | } 349 | 350 | response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload) 351 | return response.json() 352 | -------------------------------------------------------------------------------- /web_app.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | import asyncio 4 | from quart import Quart, request, jsonify, send_file, send_from_directory 5 | from quart_cors import cors 6 | from hypercorn.config import Config 7 | from hypercorn.asyncio import serve 8 | from config import MAIN_SYSTEM_PROMPT 9 | from app import run_conversation, enable_plugins 10 | from utils.openai_model_tools import ( 11 | ask_chat_gpt_4_0314_synchronous, 12 | ask_chat_gpt_4_0314_asynchronous, 13 | ask_chat_gpt_4_32k_0314_synchronous, 14 | ask_chat_gpt_4_32k_0314_asynchronous, 15 | ask_chat_gpt_4_0613_synchronous, 16 | ask_chat_gpt_4_0613_asynchronous, 17 | ask_gpt_4_vision, 18 | ) 19 | from utils.openai_dalle_tools import generate_an_image_with_dalle3 20 | from utils.core_tools import get_current_date_time 21 | 22 | app = Quart(__name__) 23 | app = cors(app, allow_origin="*") 24 | 25 | 26 | @app.route("/") 27 | async def index(): 28 | return await send_file("templates/index.html") 29 | 30 | 31 | @app.route("/static/") 32 | async def send_static(path): 33 | return await send_from_directory("static", path) 34 | 35 | 36 | def format_response_text(response_text): 37 | response_text = response_text.replace("[View Image]", "View Image") 38 | 39 | # Match any character that's not a whitespace until the next space or end of line 40 | url_pattern = r"(https://oaidalleapiprodscus/.blob/.core/.windows/.net/private/org-[^\s]+)" 41 | response_text = re.sub(url_pattern, r'View Image', response_text) 42 | 43 | lines = response_text.split("\n") 44 | lines = [line.strip() for line in lines if line.strip()] 45 | 46 | response_text = "
".join(lines) 47 | 48 | return response_text 49 | 50 | 51 | available_functions = { 52 | "get_current_date_time": get_current_date_time, 53 | "ask_chat_gpt_4_0314_synchronous": ask_chat_gpt_4_0314_synchronous, 54 | "ask_chat_gpt_4_0314_asynchronous": ask_chat_gpt_4_0314_asynchronous, 55 | "ask_chat_gpt_4_32k_0314_synchronous": ask_chat_gpt_4_32k_0314_synchronous, 56 | "ask_chat_gpt_4_32k_0314_asynchronous": ask_chat_gpt_4_32k_0314_asynchronous, 57 | "ask_chat_gpt_4_0613_synchronous": ask_chat_gpt_4_0613_synchronous, 58 | "ask_chat_gpt_4_0613_asynchronous": ask_chat_gpt_4_0613_asynchronous, 59 | "generate_an_image_with_dalle3": generate_an_image_with_dalle3, 60 | "ask_gpt_4_vision": ask_gpt_4_vision, 61 | } 62 | 63 | tools = [ 64 | { 65 | "type": "function", 66 | "function": { 67 | "name": "get_current_date_time", 68 | "description": "Get the current date and time from the local machine.", 69 | }, 70 | }, 71 | { 72 | "type": "function", 73 | "function": { 74 | "name": "ask_chat_gpt_4_0314_synchronous", 75 | "description": "Ask a more experienced colleague for assistance.", 76 | "parameters": { 77 | "type": "object", 78 | "properties": { 79 | "temperature": { 80 | "type": "integer", 81 | "description": "The temperature associated with request: 0 for factual, 2 for creative.", 82 | }, 83 | "question": { 84 | "type": "string", 85 | "description": "What are you, the ai assistant, requesting to be done with the text you are providing?", 86 | }, 87 | "text": { 88 | "type": "string", 89 | "description": "The text to be analyzed", 90 | }, 91 | }, 92 | "required": ["question", "text"], 93 | }, 94 | }, 95 | }, 96 | { 97 | "type": "function", 98 | "function": { 99 | "name": "ask_chat_gpt_4_0314_asynchronous", 100 | "description": "Ask a more experienced colleague for assistance.", 101 | "parameters": { 102 | "type": "object", 103 | "properties": { 104 | "temperature": { 105 | "type": "integer", 106 | "description": "The temperature associated with request: 0 for factual, 2 for creative.", 107 | }, 108 | "question": { 109 | "type": "string", 110 | "description": "What are you, the ai assistant, requesting to be done with the text you are providing?", 111 | }, 112 | "text": { 113 | "type": "string", 114 | "description": "The text to be analyzed", 115 | }, 116 | }, 117 | "required": ["question", "text"], 118 | }, 119 | }, 120 | }, 121 | { 122 | "type": "function", 123 | "function": { 124 | "name": "ask_chat_gpt_4_32k_0314_synchronous", 125 | "description": "Ask a more experienced colleague for assistance.", 126 | "parameters": { 127 | "type": "object", 128 | "properties": { 129 | "temperature": { 130 | "type": "integer", 131 | "description": "The temperature associated with request: 0 for factual, 2 for creative.", 132 | }, 133 | "question": { 134 | "type": "string", 135 | "description": "What are you, the ai assistant, requesting to be done with the text you are providing?", 136 | }, 137 | "text": { 138 | "type": "string", 139 | "description": "The text to be analyzed", 140 | }, 141 | }, 142 | "required": ["question", "text"], 143 | }, 144 | }, 145 | }, 146 | { 147 | "type": "function", 148 | "function": { 149 | "name": "ask_chat_gpt_4_32k_0314_asynchronous", 150 | "description": "Ask a more experienced colleague for assistance.", 151 | "parameters": { 152 | "type": "object", 153 | "properties": { 154 | "temperature": { 155 | "type": "integer", 156 | "description": "The temperature associated with request: 0 for factual, 2 for creative.", 157 | }, 158 | "question": { 159 | "type": "string", 160 | "description": "What are you, the ai assistant, requesting to be done with the text you are providing?", 161 | }, 162 | "text": { 163 | "type": "string", 164 | "description": "The text to be analyzed", 165 | }, 166 | }, 167 | "required": ["question", "text"], 168 | }, 169 | }, 170 | }, 171 | { 172 | "type": "function", 173 | "function": { 174 | "name": "ask_chat_gpt_4_0613_synchronous", 175 | "description": "Ask a more experienced colleague for assistance.", 176 | "parameters": { 177 | "type": "object", 178 | "properties": { 179 | "temperature": { 180 | "type": "integer", 181 | "description": "The temperature associated with request: 0 for factual, 2 for creative.", 182 | }, 183 | "question": { 184 | "type": "string", 185 | "description": "What are you, the ai assistant, requesting to be done with the text you are providing?", 186 | }, 187 | "text": { 188 | "type": "string", 189 | "description": "The text to be analyzed", 190 | }, 191 | "tools": { 192 | "type": "string", 193 | "description": "The tools to use for the request.", 194 | }, 195 | "tool_choice": { 196 | "type": "string", 197 | "description": "The tool choice to use for the request.", 198 | }, 199 | }, 200 | "required": ["question", "text"], 201 | }, 202 | }, 203 | }, 204 | { 205 | "type": "function", 206 | "function": { 207 | "name": "ask_chat_gpt_4_0613_asynchronous", 208 | "description": "Ask a more experienced colleague for assistance.", 209 | "parameters": { 210 | "type": "object", 211 | "properties": { 212 | "temperature": { 213 | "type": "integer", 214 | "description": "The temperature associated with request: 0 for factual, 2 for creative.", 215 | }, 216 | "question": { 217 | "type": "string", 218 | "description": "What are you, the ai assistant, requesting to be done with the text you are providing?", 219 | }, 220 | "text": { 221 | "type": "string", 222 | "description": "The text to be analyzed", 223 | }, 224 | "tools": { 225 | "type": "string", 226 | "description": "The tools to use for the request.", 227 | }, 228 | "tool_choice": { 229 | "type": "string", 230 | "description": "The tool choice to use for the request.", 231 | }, 232 | }, 233 | "required": ["question", "text"], 234 | }, 235 | }, 236 | }, 237 | { 238 | "type": "function", 239 | "function": { 240 | "name": "ask_gpt_4_vision", 241 | "description": "Ask GPT-4 Vision a question about a specific image file located in the 'uploads' folder.", 242 | "parameters": { 243 | "type": "object", 244 | "properties": { 245 | "image_name": { 246 | "type": "string", 247 | "description": "The name of the image file in the 'uploads' folder.", 248 | }, 249 | }, 250 | "required": ["image_name"], 251 | }, 252 | }, 253 | }, 254 | { 255 | "type": "function", 256 | "function": { 257 | "name": "generate_an_image_with_dalle3", 258 | "description": "Generate an image with DALL-E 3.", 259 | "parameters": { 260 | "type": "object", 261 | "properties": { 262 | "prompt": { 263 | "type": "string", 264 | "description": "The prompt to use for image generation.", 265 | }, 266 | "n": { 267 | "type": "integer", 268 | "description": "The number of images to generate.", 269 | }, 270 | "size": { 271 | "type": "string", 272 | "description": "The image size to generate.", 273 | }, 274 | "quality": { 275 | "type": "string", 276 | "description": "The image quality to generate.", 277 | }, 278 | "style": { 279 | "type": "string", 280 | "description": "The image style to generate. natural or vivid", 281 | }, 282 | "response_format": { 283 | "type": "string", 284 | "description": "The response format to use for image generation b64_json or url.", 285 | }, 286 | }, 287 | "required": ["prompt"], 288 | }, 289 | }, 290 | }, 291 | ] 292 | 293 | 294 | @app.route("/chat", methods=["POST"]) 295 | async def chat(): 296 | data = await request.json 297 | user_input = data.get("user_input") 298 | if not user_input: 299 | return jsonify({"error": "User input is required"}), 400 300 | 301 | memory = data.get("memory", []) 302 | mem_size = data.get("mem_size", 200) 303 | 304 | base_functions, plugin_tools = await enable_plugins({}, []) 305 | all_functions = {**available_functions, **base_functions} 306 | 307 | final_response, memory = await run_conversation( 308 | messages=[ 309 | {"role": "system", "content": f"{MAIN_SYSTEM_PROMPT}"}, 310 | {"role": "assistant", "content": "Understood. As we continue, feel free to direct any requests or tasks you'd like assistance with. Whether it's querying information, managing schedules, processing data, or utilizing any of the tools and functionalities I have available."}, 311 | {"role": "user", "content": f"{user_input}"}, 312 | ], 313 | tools=tools + plugin_tools, 314 | available_functions=all_functions, 315 | original_user_input=user_input, 316 | mem_size=mem_size, 317 | memory=memory, 318 | ) 319 | 320 | response_message = final_response.choices[0].message 321 | response_text = response_message.content if response_message.content is not None else "I'm not sure how to help with that." 322 | 323 | response_text = format_response_text(response_text) 324 | 325 | return jsonify({"response": response_text, "memory": memory}) 326 | 327 | 328 | if __name__ == "__main__": 329 | config = Config() 330 | port = int(os.environ.get("PORT", 8080)) 331 | config.bind = [f"0.0.0.0:{port}"] 332 | asyncio.run(serve(app, config)) 333 | --------------------------------------------------------------------------------