├── .env.example ├── .gitignore ├── LICENSE ├── README.md ├── app ├── __init__.py ├── app.py ├── static │ ├── dashboard.js │ └── styles.css └── templates │ ├── dashboard.html │ └── index.html ├── integrations ├── __init__.py └── email │ ├── __init__.py │ ├── fetcher.py │ └── gmail.py ├── main.py ├── poetry.lock ├── public ├── client_id_secret.png ├── dashboard.png ├── gmail_api_circled.png ├── google_api_library.png ├── google_api_oauth.png ├── google_cloud_api.png ├── google_credentials_oauth.png ├── google_oauth_json_download.png ├── google_uris.png └── test_user.png ├── pyproject.toml ├── tasks ├── __init__.py ├── agents.py ├── execution.py ├── processor.py └── storage.py ├── tests ├── babyagi.py ├── colorlogs.py ├── embedding.py ├── entity_add.py ├── graph_agent.py ├── ollama_raw.py └── ollama_streaming.py └── utils ├── __init__.py ├── custom_log_formatter.py └── ollama.py /.env.example: -------------------------------------------------------------------------------- 1 | GOOGLE_CLIENT_ID="YOUR_GOOGLE_CLIENT_ID" 2 | GOOGLE_CLIENT_SECRET="YOUR_GOOGLE_CLIENT_SECRET" 3 | 4 | GOOGLE_REDIRECT_URI=http://localhost:8080/oauth2callback 5 | GOOGLE_LOGIN_URI=http://localhost:8080/authorize 6 | 7 | FLASK_SECRET_KEY="YOUR_FLASK_SECRET_KEY" 8 | 9 | NEXUSDB_API_KEY="YOUR_NEXUSDB_API_KEY" 10 | 11 | MAX_THREADS=4 12 | INITIAL_EMAILS=1 -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | poetry.lock 131 | 132 | # Spyder project settings 133 | .spyderproject 134 | .spyproject 135 | 136 | # Rope project settings 137 | .ropeproject 138 | 139 | # mkdocs documentation 140 | /site 141 | 142 | # mypy 143 | .mypy_cache/ 144 | .dmypy.json 145 | dmypy.json 146 | 147 | # Pyre type checker 148 | .pyre/ 149 | 150 | # pytype static type analyzer 151 | .pytype/ 152 | 153 | # Cython debug symbols 154 | cython_debug/ 155 | 156 | # VS Code 157 | .vscode/ 158 | 159 | # Other 160 | other/ 161 | client_secret.json 162 | 163 | # PyCharm 164 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 165 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 166 | # and can be added to the global gitignore or merged into this file. For a more nuclear 167 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 168 | #.idea/ 169 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Non-Compete Open License 2 | 3 | Copyright (c) 2024 Astra Analytics, Inc. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software, including without limitation the rights to use, copy, modify, 8 | merge, publish, distribute, sublicense, and/or sell copies of the Software, 9 | and to permit persons to whom the Software is furnished to do so, subject to 10 | the following conditions: 11 | 12 | **Non-Compete Restriction** 13 | You may not modify, change, or replace the database system (NexusDB) that the Software 14 | uses. The Software is designed to work with NexusDB, and any alteration to the 15 | database system is prohibited. 16 | 17 | The above copyright notice and this permission notice shall be included in all 18 | copies or substantial portions of the Software. 19 | 20 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 23 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 24 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 25 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 26 | SOFTWARE. 27 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Task Agent Starter Kit with NexusDB 2 | 3 | ![Task Dashboard](public/dashboard.png) 4 | 5 | ## Table of Contents 6 | 7 | - [Task Agent Starter Kit with NexusDB](#task-agent-starter-kit-with-nexusdb) 8 | - [Table of Contents](#table-of-contents) 9 | - [Setup](#setup) 10 | - [Connecting to Gmail](#connecting-to-gmail) 11 | - [Step 1: Create a Project and Enable the Gmail API](#step-1-create-a-project-and-enable-the-gmail-api) 12 | - [Step 2: Configure OAuth Consent Screen](#step-2-configure-oauth-consent-screen) 13 | - [Step 3: Create Credentials](#step-3-create-credentials) 14 | - [Configure The AI Models](#configure-the-ai-models) 15 | - [Step 1: Install Ollama](#step-1-install-ollama) 16 | - [Environment Variables](#environment-variables) 17 | - [NexusDB API Key](#nexusdb-api-key) 18 | - [Max Threads](#max-threads) 19 | - [Initial Emails](#initial-emails) 20 | - [Installation](#installation) 21 | - [Running the app](#running-the-app) 22 | - [Using Poetry Shell](#using-poetry-shell) 23 | - [Using Poetry Run](#using-poetry-run) 24 | - [Contributing](#contributing) 25 | - [License](#license) 26 | - [Non-Compete Open License](#non-compete-open-license) 27 | - [Permissions](#permissions) 28 | - [Conditions](#conditions) 29 | - [Summary](#summary) 30 | - [NexusDB](#nexusdb) 31 | - [Earn 30% Commission on referrals](#earn-30-commission-on-referrals) 32 | - [Partner with us](#partner-with-us) 33 | 34 | ## Setup 35 | 36 | To run this application there are a few items that must be set up separately: 37 | 38 | - Gmail: For the app to connect to your Gmail account securely, you should create your own API credentials. We'll walk through the steps to create an application in Google Cloud and enable the Gmail API. 39 | - AI Models: This application uses [Llama 3](https://ai.meta.com/blog/meta-llama-3/) for the AI agents and [mxbai-embed-large](https://www.mixedbread.ai/docs//mxbai-embed-large-v1#model-description), which are not included as part of the application and must be downloaded separately. Like with Gmail, the steps for setting this up are below. 40 | 41 | ### Connecting to Gmail 42 | 43 | #### Step 1: Create a Project and Enable the Gmail API 44 | 45 | 1. Navigate to [Google Cloud Console](https://console.cloud.google.com/). 46 | 2. Create a new project if you don't already have one. 47 | 3. Enable the Gmail API: In the navigation menu, select “APIs & Services” -> “Library” 48 | 49 | It might not look like this, you may have to search for it in the search bar at the top. 50 | 51 | ![Google Cloud API](public/google_cloud_api.png) 52 | 53 | ![API Library](public/google_api_library.png) 54 | 55 | 4. Search for “Gmail API” and enable it for your project. 56 | 57 | ![Gmail API circled](public/gmail_api_circled.png) 58 | 59 | #### Step 2: Configure OAuth Consent Screen 60 | 61 | 1. In the Google Cloud Console, go to “OAuth consent screen” under “APIs & Services”. 62 | 63 | ![Oauth](public/google_api_oauth.png) 64 | 65 | 2. Set the User Type to “External” and create. 66 | 3. Fill in the required fields like App name, User support email, and Developer contact information. 67 | 4. Save and continue until you finish the setup. 68 | 69 | _Note: be sure to add yourself as a test user_ 70 | ![test user](public/test_user.png) 71 | 72 | #### Step 3: Create Credentials 73 | 74 | 1. In the Google Cloud Console, under “Credentials” (still within “APIs & Services”), click “Create Credentials” and choose “OAuth Client ID”. 75 | 76 | ![OAuth Client ID](public/google_credentials_oauth.png) 77 | 78 | 2. Select “Web application” as the Application type and give it a name. 79 | 3. Add `http://localhost` and `http://localhost:8080/` as authorized JavaScript origins 80 | 81 | _Note, Google only allows localhost for testing, which is why main.py sets this as host instead of default Flask 127.0.0.1. If you want to use a different port, be sure to make the change here as well as main.py_ 82 | 83 | 4. Add `http://localhost:8080/oauth2callback` as authorized redirect URI. 84 | 85 | Your setup should look like this: 86 | 87 | ![Google URIs](public/google_uris.png) 88 | 89 | 5. After creating the credentials, click the download button to get your credentials: 90 | 91 | ![Download JSON](public/google_oauth_json_download.png) 92 | 93 | ![Your Client ID and Client Secret](public/client_id_secret.png) 94 | 95 | 6. Copy and paste your Google Client ID and Client Secret into the .env file at the root of your project. 96 | 7. **If you don't rename the file from `.env.example` to `.env` you will get an error! Don't miss this step!** 97 | 98 | ### Configure The AI Models 99 | 100 | #### Step 1: Install Ollama 101 | 102 | 1. Go to [Ollama website](https://ollama.com/) and download the application. 103 | 2. After installing, run the following commands in your terminal: 104 | 105 | ```bash 106 | ollama pull llama3 107 | ollama pull mxbai-embed-large 108 | ``` 109 | 110 | More on each model here: 111 | 112 | - [Llama 3](https://ai.meta.com/blog/meta-llama-3/) 113 | - [mxbai-embed-large](https://www.mixedbread.ai/docs//mxbai-embed-large-v1#model-description) 114 | 115 | 3. Optionally, you can test the installation by running: 116 | 117 | ```bash 118 | ollama run llama3 119 | ``` 120 | 121 | 4. If you want to run more than 1 agent in parallel (recommended), you'll have to configure the ollama server to do so. Make sure the ollama application is not running, then start it yourself in a terminal window using the command: 122 | 123 | ```bash 124 | OLLAMA_NUM_PARALLEL=4 ollama serve 125 | ``` 126 | 127 | this will start the ollama server with the number of parallel processes = 4. You can choose any number you'd like, but it's recommended to set this number to double that of the MAX_THREADS environment variable, which we'll discuss in more detail later. 128 | 129 | ### Environment Variables 130 | 131 | **Make sure you have renamed the file from `.env.example` to `.env` or you will get an error!** 132 | 133 | #### NexusDB API Key 134 | 135 | This app runs on NexusDB, so if you don't have an API key yet, go to [nexusdb.io](https://www.nexusdb.io) and sign up for an account. After signing up you will be able to get your API key from the dashboard and paste it into .env 136 | 137 | #### Max Threads 138 | 139 | The MAX_THREADS variable determines the number of emails that can be processed simultaneously. To allow the task agent and graph creation agents to run concurrently for each email, you should set the ollama server to run twice this number of parallel processes 140 | 141 | #### Initial Emails 142 | 143 | This variable sets the number of emails in the inbox the application should add to the queue before waiting for new ones to come in. 144 | 145 | ## Installation 146 | 147 | 1. If you don't have Poetry installed, do that first: 148 | 149 | - **Install Poetry**: 150 | Poetry provides an easy way to install itself. Run the following command: 151 | 152 | ```bash 153 | curl -sSL https://install.python-poetry.org | python3 - 154 | ``` 155 | 156 | Alternatively, you can follow the instructions on the [Poetry documentation](https://python-poetry.org/docs/) 157 | 158 | 2. Clone the repository 159 | 160 | 3. Install Dependencies 161 | 162 | Navigate to the project directory and install project dependencies 163 | 164 | ```bash 165 | poetry install 166 | ``` 167 | 168 | ## Running the app 169 | 170 | To run the project, activate the Poetry shell or use Poetry's `run` command. 171 | 172 | ### Using Poetry Shell 173 | 174 | Activate the Poetry shell: 175 | 176 | ```bash 177 | poetry shell 178 | ``` 179 | 180 | Then, run the project in development mode: 181 | 182 | ```bash 183 | python main.py 184 | ``` 185 | 186 | ### Using Poetry Run 187 | 188 | Alternatively, you can use the poetry run command to execute scripts without activating the shell: 189 | 190 | ```bash 191 | poetry run python main.py 192 | ``` 193 | 194 | ## Contributing 195 | 196 | We welcome contributions! Please follow these steps to contribute to the project: 197 | 198 | 1. Fork the repository. 199 | 2. Create a new branch (git checkout -b feature-branch). 200 | 3. Make your changes and commit them (git commit -m 'Add some feature'). 201 | 4. Push to the branch (git push origin feature-branch). 202 | 5. Open a pull request. 203 | 204 | ## License 205 | 206 | ### Non-Compete Open License 207 | 208 | **Non-Compete Restriction**: This software is designed to work exclusively with NexusDB. You are not permitted to modify, change, or replace the database system used by the software. 209 | 210 | ### Permissions 211 | 212 | You are granted the following rights, free of charge: 213 | 214 | - **Use**: You can use the software for any purpose. 215 | - **Copy**: You can make copies of the software. 216 | - **Modify**: You can modify the software as long as the database system remains NexusDB. 217 | - **Merge**: You can merge the software with other projects. 218 | - **Publish**: You can publish the software. 219 | - **Distribute**: You can distribute the software. 220 | - **Sublicense**: You can sublicense the software. 221 | 222 | ### Conditions 223 | 224 | - Any distribution of the software must include the original copyright notice and this permission notice. 225 | - The software is provided "as-is" without any warranty, express or implied. The authors are not liable for any damages or claims arising from the use of the software. 226 | 227 | ### Summary 228 | 229 | This license grants you broad rights to use, modify, and distribute the software, with the specific condition that the underlying database system must remain NexusDB. This ensures the software's core functionality remains intact and aligned with the intended database system. 230 | 231 | For the full license text, please see the LICENSE file included with this project. 232 | 233 | ## NexusDB 234 | 235 | We on the NexusDB team are building the world's first `data web`, connecting public and private information in a way that's secure, fast, flexible, and highly querable. It's also 20x faster to set up and 50% cheaper for comparable use cases than leading competitors! 236 | 237 | Whether you need to store tables, graphs, embeddings, json objects, or blobs, NexusDB is the solution. See [documentation](https://docs.nexusdb.io) for more details. 238 | 239 | ### Earn 30% Commission on referrals 240 | 241 | If you like NexusDB and want to spread the love, you can get paid to do so through our [referral program](https://www.nexusdb.io/affiliates)! See the website for terms and additional details. 242 | 243 | ### Partner with us 244 | 245 | We're growing fast and are actively seeking design partners and investors! If you enjoyed this demo, we'd love to work with you and tell you about our plans. Reach out to CEO Will Humble at [w@astraanalytics.co](mailto:w@astraanalytics.co) for more info. 246 | -------------------------------------------------------------------------------- /app/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | 4 | from flask import Flask 5 | 6 | from utils.custom_log_formatter import ThreadNameColoredFormatter 7 | 8 | from .app import main 9 | 10 | # Configure colorlog with the custom formatter 11 | formatter = ThreadNameColoredFormatter( 12 | "%(log_color)s[%(threadName)s] - %(message)s", 13 | ) 14 | 15 | handler = logging.StreamHandler() 16 | handler.setFormatter(formatter) 17 | 18 | # Suppress logging from specific third-party libraries 19 | logging.getLogger("google_auth_httplib2").setLevel(logging.WARNING) 20 | logging.getLogger("googleapiclient.discovery").setLevel(logging.WARNING) 21 | logging.getLogger("googleapiclient.discovery_cache").setLevel(logging.WARNING) 22 | logging.getLogger("httpcore.http11").setLevel(logging.WARNING) 23 | logging.getLogger("urllib3.connectionpool").setLevel(logging.WARNING) 24 | logging.getLogger("httpcore.connection").setLevel(logging.WARNING) 25 | logging.getLogger("werkzeug").setLevel(logging.WARNING) 26 | 27 | logger = logging.getLogger() 28 | logger.addHandler(handler) 29 | logger.setLevel(logging.INFO) 30 | 31 | 32 | def create_app(): 33 | app = Flask(__name__, template_folder="./templates", static_folder="./static") 34 | app.secret_key = os.getenv("FLASK_SECRET_KEY") 35 | 36 | app.register_blueprint(main) 37 | 38 | return app 39 | -------------------------------------------------------------------------------- /app/app.py: -------------------------------------------------------------------------------- 1 | import os 2 | import threading 3 | from calendar import c 4 | 5 | import flask 6 | import google.oauth2.credentials 7 | import google_auth_oauthlib.flow 8 | import requests 9 | from flask import Blueprint, g, jsonify, redirect, render_template, session, url_for 10 | 11 | from integrations.email.fetcher import email_fetcher 12 | from tasks.processor import start_processing, tasks_storage 13 | 14 | CLIENT_ID = os.getenv("GOOGLE_CLIENT_ID") 15 | CLIENT_SECRET = os.getenv("GOOGLE_CLIENT_SECRET") 16 | REDIRECT_URI = os.getenv("GOOGLE_REDIRECT_URI") 17 | GOOGLE_LOGIN_URI = os.getenv("GOOGLE_LOGIN_URI") 18 | SCOPES = [ 19 | "https://www.googleapis.com/auth/gmail.readonly", 20 | "openid", 21 | "https://www.googleapis.com/auth/userinfo.profile", 22 | "https://www.googleapis.com/auth/userinfo.email", 23 | ] 24 | 25 | main = Blueprint("main", __name__) 26 | 27 | 28 | @main.route("/") 29 | def index(): 30 | return render_template("index.html", google_client_id=CLIENT_ID) 31 | 32 | 33 | @main.route("/dashboard", methods=["GET", "POST"]) 34 | def dashboard(): 35 | tasks = tasks_storage.get_tasks(condition="actionStatus = 'Active'") 36 | agent_tasks = [task for task in tasks.values() if task["agent"] == "AI"] 37 | human_tasks = [task for task in tasks.values() if task["agent"] != "AI"] 38 | 39 | if "credentials" not in session: 40 | return redirect(url_for("main.index")) 41 | 42 | # Start the background processes for email fetching and task processing 43 | if not hasattr(g, "email_fetcher_thread"): 44 | start_processing() 45 | email_fetcher_thread = threading.Thread( 46 | target=email_fetcher, 47 | args=(session["credentials"],), 48 | daemon=True, 49 | name="email_fetcher", 50 | ) 51 | 52 | email_fetcher_thread.start() 53 | g.email_fetcher_thread = email_fetcher_thread 54 | 55 | return render_template( 56 | "dashboard.html", agent_tasks=agent_tasks, human_tasks=human_tasks 57 | ) 58 | 59 | 60 | @main.route("/tasks") 61 | def get_tasks(): 62 | tasks = tasks_storage.get_tasks(condition="actionStatus = 'Active'") 63 | agent_tasks = [task for task in tasks.values() if task["agent"] == "AI"] 64 | human_tasks = [task for task in tasks.values() if task["agent"] != "AI"] 65 | return jsonify(agent_tasks=agent_tasks, human_tasks=human_tasks) 66 | 67 | 68 | @main.route("/authorize", methods=["GET", "POST"]) 69 | def authorize(): 70 | flow = google_auth_oauthlib.flow.Flow.from_client_config( 71 | { 72 | "web": { 73 | "client_id": CLIENT_ID, 74 | "client_secret": CLIENT_SECRET, 75 | "redirect_uris": [REDIRECT_URI], 76 | "auth_uri": "https://accounts.google.com/o/oauth2/auth", 77 | "token_uri": "https://oauth2.googleapis.com/token", 78 | } 79 | }, 80 | scopes=SCOPES, 81 | ) 82 | flow.redirect_uri = url_for("main.oauth2callback", _external=True) 83 | authorization_url, state = flow.authorization_url( 84 | access_type="offline", include_granted_scopes="true" 85 | ) 86 | session["state"] = state 87 | return redirect(authorization_url) 88 | 89 | 90 | @main.route("/oauth2callback", methods=["GET", "POST"]) 91 | def oauth2callback(): 92 | state = session["state"] 93 | flow = google_auth_oauthlib.flow.Flow.from_client_config( 94 | { 95 | "web": { 96 | "client_id": CLIENT_ID, 97 | "client_secret": CLIENT_SECRET, 98 | "redirect_uris": [REDIRECT_URI], 99 | "auth_uri": "https://accounts.google.com/o/oauth2/auth", 100 | "token_uri": "https://oauth2.googleapis.com/token", 101 | } 102 | }, 103 | scopes=SCOPES, 104 | state=state, 105 | ) 106 | flow.redirect_uri = url_for("main.oauth2callback", _external=True) 107 | authorization_response = flask.request.url 108 | flow.fetch_token(authorization_response=authorization_response) 109 | 110 | credentials = flow.credentials 111 | session["credentials"] = credentials_to_dict(credentials) 112 | 113 | return redirect(url_for("main.dashboard")) 114 | 115 | 116 | @main.route("/revoke") 117 | def revoke(): 118 | if "credentials" not in session: 119 | return 'You need to authorize before testing the code to revoke credentials.' 120 | 121 | credentials = google.oauth2.credentials.Credentials(**session["credentials"]) 122 | 123 | revoke = requests.post( 124 | "https://oauth2.googleapis.com/revoke", 125 | params={"token": credentials.token}, 126 | headers={"content-type": "application/x-www-form-urlencoded"}, 127 | ) 128 | 129 | status_code = getattr(revoke, "status_code") 130 | if status_code == 200: 131 | return "Credentials successfully revoked." 132 | else: 133 | return "An error occurred." 134 | 135 | 136 | @main.route("/clear") 137 | def clear_credentials(): 138 | if "credentials" in session: 139 | del session["credentials"] 140 | return "Credentials have been cleared." 141 | 142 | 143 | def credentials_to_dict(credentials): 144 | return { 145 | "token": credentials.token, 146 | "refresh_token": credentials.refresh_token, 147 | "token_uri": credentials.token_uri, 148 | "client_id": credentials.client_id, 149 | "client_secret": credentials.client_secret, 150 | "scopes": credentials.scopes, 151 | } 152 | -------------------------------------------------------------------------------- /app/static/dashboard.js: -------------------------------------------------------------------------------- 1 | document.addEventListener("DOMContentLoaded", function () { 2 | function fetchTasks() { 3 | console.log("Fetching tasks..."); 4 | fetch("/tasks") 5 | .then((response) => { 6 | console.log("Response received:", response); 7 | return response.json(); 8 | }) 9 | .then((data) => { 10 | console.log("Data received:", data); 11 | updateTasks(data.agent_tasks, "agent-tasks"); 12 | updateTasks(data.human_tasks, "human-tasks"); 13 | }) 14 | .catch((error) => console.error("Error fetching tasks:", error)); 15 | } 16 | 17 | function updateTasks(tasks, elementId) { 18 | const tasksList = document.getElementById(elementId); 19 | tasksList.innerHTML = ""; 20 | 21 | tasks.forEach((task) => { 22 | const taskItem = document.createElement("li"); 23 | taskItem.textContent = task.name; 24 | 25 | tasksList.appendChild(taskItem); 26 | 27 | if (task.potentialAction) { 28 | const subTasksList = document.createElement("ul"); 29 | task.potentialAction.forEach((subtask) => { 30 | const subtaskItem = document.createElement("li"); 31 | subtaskItem.textContent = subtask; 32 | subTasksList.appendChild(subtaskItem); 33 | }); 34 | tasksList.appendChild(subTasksList); 35 | } 36 | }); 37 | } 38 | 39 | // Fetch tasks every 10 seconds 40 | setInterval(fetchTasks, 5000); 41 | fetchTasks(); 42 | }); 43 | -------------------------------------------------------------------------------- /app/static/styles.css: -------------------------------------------------------------------------------- 1 | /* Basic reset */ 2 | * { 3 | margin: 0; 4 | padding: 0; 5 | box-sizing: border-box; 6 | } 7 | 8 | /* Body styling */ 9 | body { 10 | font-family: Arial, sans-serif; 11 | background-color: #f4f4f9; 12 | color: #333; 13 | line-height: 1.6; 14 | padding: 20px; 15 | } 16 | 17 | /* Main title */ 18 | h1 { 19 | text-align: center; 20 | margin-bottom: 20px; 21 | } 22 | 23 | /* Section titles */ 24 | h2 { 25 | color: #555; 26 | margin-bottom: 10px; 27 | } 28 | 29 | /* Task lists */ 30 | ul { 31 | list-style-type: none; 32 | margin-bottom: 20px; 33 | } 34 | 35 | li { 36 | background: #fff; 37 | margin: 5px 0; 38 | padding: 10px; 39 | border-radius: 5px; 40 | box-shadow: 0 0 10px rgba(0, 0, 0, 0.1); 41 | } 42 | 43 | /* Subtask lists */ 44 | ul ul { 45 | margin-left: 20px; 46 | } 47 | 48 | ul ul li { 49 | background: #f9f9f9; 50 | margin: 3px 0; 51 | padding: 8px; 52 | border-radius: 3px; 53 | } 54 | 55 | /* Links */ 56 | a { 57 | color: #333; 58 | text-decoration: none; 59 | } 60 | 61 | a:hover { 62 | text-decoration: underline; 63 | } 64 | -------------------------------------------------------------------------------- /app/templates/dashboard.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Dashboard 6 | 10 | 11 | 12 |

Task Dashboard

13 |

Agent Tasks

14 | 25 | 26 |

Human Tasks

27 | 38 | 39 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /app/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Connect to Gmail 7 | 24 | 25 | 26 | 27 |
28 |

Task Agent Starter Kit with NexusDB

29 | 35 |
44 |
45 | 46 | 47 | -------------------------------------------------------------------------------- /integrations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Astra-Analytics/Task-Agent-Starter-Kit/011cccfd852920748f66d7e6dd80ca2aaf974f70/integrations/__init__.py -------------------------------------------------------------------------------- /integrations/email/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Astra-Analytics/Task-Agent-Starter-Kit/011cccfd852920748f66d7e6dd80ca2aaf974f70/integrations/email/__init__.py -------------------------------------------------------------------------------- /integrations/email/fetcher.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import time 3 | from queue import Queue 4 | 5 | from .gmail import fetch_latest_email, gmail_service 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | email_queue = Queue() 10 | processed_email_ids = set() 11 | 12 | 13 | def email_fetcher(credentials): 14 | logger.info("Starting email fetcher...") 15 | while True: 16 | service = gmail_service(credentials) 17 | if not service: 18 | logger.warning("Failed to create Gmail service, retrying in 10 seconds...") 19 | time.sleep(10) # Retry after some time if service is not available 20 | continue 21 | 22 | email_data = fetch_latest_email(service) 23 | if email_data and email_data["Message-ID"] not in processed_email_ids: 24 | logger.info(f"New email found: {email_data['Message-ID']}") 25 | email_queue.put(email_data) 26 | processed_email_ids.add(email_data["Message-ID"]) 27 | else: 28 | logger.debug("No new emails found.") 29 | time.sleep(10) # Adjust the interval as needed 30 | -------------------------------------------------------------------------------- /integrations/email/gmail.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import os 3 | import re 4 | 5 | import google.oauth2.credentials 6 | import googleapiclient.discovery 7 | from flask import session 8 | 9 | max_emails = int(os.getenv("INITIAL_EMAILS", 10)) 10 | 11 | 12 | def fetch_latest_email(service): 13 | # Fetch the latest message 14 | results = ( 15 | service.users() 16 | .messages() 17 | .list(userId="me", labelIds=["INBOX"], maxResults=max_emails) 18 | .execute() 19 | ) 20 | messages = results.get("messages", []) 21 | 22 | if not messages: 23 | return None 24 | 25 | msg = messages[0] 26 | txt = ( 27 | service.users() 28 | .messages() 29 | .get(userId="me", id=msg["id"], format="full") 30 | .execute() 31 | ) 32 | 33 | email_data = { 34 | "To": "", 35 | "From": "", 36 | "Subject": "", 37 | "Body": "", 38 | "Timestamp": "", 39 | "Message-ID": msg["id"], 40 | } 41 | 42 | # Parse headers for email details 43 | payload = txt["payload"] 44 | headers = payload["headers"] 45 | 46 | for header in headers: 47 | if header["name"] == "To": 48 | email_data["To"] = header["value"] 49 | elif header["name"] == "From": 50 | email_data["From"] = header["value"] 51 | elif header["name"] == "Subject": 52 | email_data["Subject"] = header["value"] 53 | elif header["name"] == "Date": 54 | email_data["Timestamp"] = header["value"] 55 | 56 | # Handle the body of the email 57 | if "parts" in payload: 58 | # Find 'text/plain' part 59 | for part in payload["parts"]: 60 | if part["mimeType"] == "text/plain": 61 | body_data = part["body"]["data"] 62 | break 63 | else: 64 | # This is a simple email with only one part 65 | body_data = payload["body"]["data"] 66 | 67 | # Decode the email body 68 | decoded_body = base64.urlsafe_b64decode(body_data.encode("ASCII")).decode("utf-8") 69 | 70 | # Optionally, strip out signature if it starts with '--' 71 | email_data["Body"] = re.split(r"\r?\n--\r?\n", decoded_body, 1)[0] 72 | 73 | return email_data 74 | 75 | 76 | def gmail_service(credentials=None): 77 | if not credentials and "credentials" not in session: 78 | return None 79 | 80 | if not credentials: 81 | credentials = session["credentials"] 82 | 83 | credentials = google.oauth2.credentials.Credentials(**credentials) 84 | service = googleapiclient.discovery.build("gmail", "v1", credentials=credentials) 85 | return service 86 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from app import create_app 4 | 5 | # Allow testing on localhost 6 | os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1" 7 | 8 | app = create_app() 9 | 10 | if __name__ == "__main__": 11 | app.run("localhost", 8080, debug=True) 12 | print("Flask started") 13 | -------------------------------------------------------------------------------- /poetry.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. 2 | 3 | [[package]] 4 | name = "anyio" 5 | version = "4.4.0" 6 | description = "High level compatibility layer for multiple asynchronous event loop implementations" 7 | optional = false 8 | python-versions = ">=3.8" 9 | files = [ 10 | {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, 11 | {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, 12 | ] 13 | 14 | [package.dependencies] 15 | idna = ">=2.8" 16 | sniffio = ">=1.1" 17 | 18 | [package.extras] 19 | doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] 20 | test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] 21 | trio = ["trio (>=0.23)"] 22 | 23 | [[package]] 24 | name = "bidict" 25 | version = "0.23.1" 26 | description = "The bidirectional mapping library for Python." 27 | optional = false 28 | python-versions = ">=3.8" 29 | files = [ 30 | {file = "bidict-0.23.1-py3-none-any.whl", hash = "sha256:5dae8d4d79b552a71cbabc7deb25dfe8ce710b17ff41711e13010ead2abfc3e5"}, 31 | {file = "bidict-0.23.1.tar.gz", hash = "sha256:03069d763bc387bbd20e7d49914e75fc4132a41937fa3405417e1a5a2d006d71"}, 32 | ] 33 | 34 | [[package]] 35 | name = "blinker" 36 | version = "1.8.2" 37 | description = "Fast, simple object-to-object and broadcast signaling" 38 | optional = false 39 | python-versions = ">=3.8" 40 | files = [ 41 | {file = "blinker-1.8.2-py3-none-any.whl", hash = "sha256:1779309f71bf239144b9399d06ae925637cf6634cf6bd131104184531bf67c01"}, 42 | {file = "blinker-1.8.2.tar.gz", hash = "sha256:8f77b09d3bf7c795e969e9486f39c2c5e9c39d4ee07424be2bc594ece9642d83"}, 43 | ] 44 | 45 | [[package]] 46 | name = "cachetools" 47 | version = "5.3.3" 48 | description = "Extensible memoizing collections and decorators" 49 | optional = false 50 | python-versions = ">=3.7" 51 | files = [ 52 | {file = "cachetools-5.3.3-py3-none-any.whl", hash = "sha256:0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945"}, 53 | {file = "cachetools-5.3.3.tar.gz", hash = "sha256:ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105"}, 54 | ] 55 | 56 | [[package]] 57 | name = "certifi" 58 | version = "2024.6.2" 59 | description = "Python package for providing Mozilla's CA Bundle." 60 | optional = false 61 | python-versions = ">=3.6" 62 | files = [ 63 | {file = "certifi-2024.6.2-py3-none-any.whl", hash = "sha256:ddc6c8ce995e6987e7faf5e3f1b02b302836a0e5d98ece18392cb1a36c72ad56"}, 64 | {file = "certifi-2024.6.2.tar.gz", hash = "sha256:3cd43f1c6fa7dedc5899d69d3ad0398fd018ad1a17fba83ddaf78aa46c747516"}, 65 | ] 66 | 67 | [[package]] 68 | name = "charset-normalizer" 69 | version = "3.3.2" 70 | description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." 71 | optional = false 72 | python-versions = ">=3.7.0" 73 | files = [ 74 | {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, 75 | {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, 76 | {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, 77 | {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, 78 | {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, 79 | {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, 80 | {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, 81 | {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, 82 | {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, 83 | {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, 84 | {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, 85 | {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, 86 | {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, 87 | {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, 88 | {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, 89 | {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, 90 | {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, 91 | {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, 92 | {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, 93 | {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, 94 | {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, 95 | {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, 96 | {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, 97 | {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, 98 | {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, 99 | {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, 100 | {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, 101 | {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, 102 | {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, 103 | {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, 104 | {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, 105 | {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, 106 | {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, 107 | {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, 108 | {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, 109 | {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, 110 | {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, 111 | {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, 112 | {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, 113 | {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, 114 | {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, 115 | {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, 116 | {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, 117 | {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, 118 | {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, 119 | {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, 120 | {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, 121 | {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, 122 | {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, 123 | {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, 124 | {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, 125 | {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, 126 | {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, 127 | {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, 128 | {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, 129 | {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, 130 | {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, 131 | {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, 132 | {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, 133 | {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, 134 | {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, 135 | {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, 136 | {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, 137 | {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, 138 | {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, 139 | {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, 140 | {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, 141 | {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, 142 | {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, 143 | {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, 144 | {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, 145 | {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, 146 | {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, 147 | {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, 148 | {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, 149 | {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, 150 | {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, 151 | {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, 152 | {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, 153 | {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, 154 | {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, 155 | {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, 156 | {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, 157 | {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, 158 | {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, 159 | {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, 160 | {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, 161 | {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, 162 | {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, 163 | {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, 164 | ] 165 | 166 | [[package]] 167 | name = "click" 168 | version = "8.1.7" 169 | description = "Composable command line interface toolkit" 170 | optional = false 171 | python-versions = ">=3.7" 172 | files = [ 173 | {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, 174 | {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, 175 | ] 176 | 177 | [package.dependencies] 178 | colorama = {version = "*", markers = "platform_system == \"Windows\""} 179 | 180 | [[package]] 181 | name = "colorama" 182 | version = "0.4.6" 183 | description = "Cross-platform colored terminal text." 184 | optional = false 185 | python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" 186 | files = [ 187 | {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, 188 | {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, 189 | ] 190 | 191 | [[package]] 192 | name = "colorlog" 193 | version = "6.8.2" 194 | description = "Add colours to the output of Python's logging module." 195 | optional = false 196 | python-versions = ">=3.6" 197 | files = [ 198 | {file = "colorlog-6.8.2-py3-none-any.whl", hash = "sha256:4dcbb62368e2800cb3c5abd348da7e53f6c362dda502ec27c560b2e58a66bd33"}, 199 | {file = "colorlog-6.8.2.tar.gz", hash = "sha256:3e3e079a41feb5a1b64f978b5ea4f46040a94f11f0e8bbb8261e3dbbeca64d44"}, 200 | ] 201 | 202 | [package.dependencies] 203 | colorama = {version = "*", markers = "sys_platform == \"win32\""} 204 | 205 | [package.extras] 206 | development = ["black", "flake8", "mypy", "pytest", "types-colorama"] 207 | 208 | [[package]] 209 | name = "flask" 210 | version = "3.0.3" 211 | description = "A simple framework for building complex web applications." 212 | optional = false 213 | python-versions = ">=3.8" 214 | files = [ 215 | {file = "flask-3.0.3-py3-none-any.whl", hash = "sha256:34e815dfaa43340d1d15a5c3a02b8476004037eb4840b34910c6e21679d288f3"}, 216 | {file = "flask-3.0.3.tar.gz", hash = "sha256:ceb27b0af3823ea2737928a4d99d125a06175b8512c445cbd9a9ce200ef76842"}, 217 | ] 218 | 219 | [package.dependencies] 220 | blinker = ">=1.6.2" 221 | click = ">=8.1.3" 222 | itsdangerous = ">=2.1.2" 223 | Jinja2 = ">=3.1.2" 224 | Werkzeug = ">=3.0.0" 225 | 226 | [package.extras] 227 | async = ["asgiref (>=3.2)"] 228 | dotenv = ["python-dotenv"] 229 | 230 | [[package]] 231 | name = "flask-socketio" 232 | version = "5.3.6" 233 | description = "Socket.IO integration for Flask applications" 234 | optional = false 235 | python-versions = ">=3.6" 236 | files = [ 237 | {file = "Flask-SocketIO-5.3.6.tar.gz", hash = "sha256:bb8f9f9123ef47632f5ce57a33514b0c0023ec3696b2384457f0fcaa5b70501c"}, 238 | {file = "Flask_SocketIO-5.3.6-py3-none-any.whl", hash = "sha256:9e62d2131842878ae6bfdd7067dfc3be397c1f2b117ab1dc74e6fe74aad7a579"}, 239 | ] 240 | 241 | [package.dependencies] 242 | Flask = ">=0.9" 243 | python-socketio = ">=5.0.2" 244 | 245 | [package.extras] 246 | docs = ["sphinx"] 247 | 248 | [[package]] 249 | name = "flask-sse" 250 | version = "1.0.0" 251 | description = "Server-Sent Events for Flask" 252 | optional = false 253 | python-versions = "*" 254 | files = [ 255 | {file = "Flask-SSE-1.0.0.tar.gz", hash = "sha256:4f84714c2549a45e4f17bfc5f68ee8a9f298b22740a6844404d1c74551f2090d"}, 256 | {file = "Flask_SSE-1.0.0-py2.py3-none-any.whl", hash = "sha256:f86d7ecff0607333755c444130c395e7a133fb7ae6cf76fbd29b1da36d34776b"}, 257 | ] 258 | 259 | [package.dependencies] 260 | flask = ">=0.9" 261 | redis = "*" 262 | six = "*" 263 | 264 | [[package]] 265 | name = "google-api-core" 266 | version = "2.19.0" 267 | description = "Google API client core library" 268 | optional = false 269 | python-versions = ">=3.7" 270 | files = [ 271 | {file = "google-api-core-2.19.0.tar.gz", hash = "sha256:cf1b7c2694047886d2af1128a03ae99e391108a08804f87cfd35970e49c9cd10"}, 272 | {file = "google_api_core-2.19.0-py3-none-any.whl", hash = "sha256:8661eec4078c35428fd3f69a2c7ee29e342896b70f01d1a1cbcb334372dd6251"}, 273 | ] 274 | 275 | [package.dependencies] 276 | google-auth = ">=2.14.1,<3.0.dev0" 277 | googleapis-common-protos = ">=1.56.2,<2.0.dev0" 278 | proto-plus = ">=1.22.3,<2.0.0dev" 279 | protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0" 280 | requests = ">=2.18.0,<3.0.0.dev0" 281 | 282 | [package.extras] 283 | grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0)"] 284 | grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] 285 | grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] 286 | 287 | [[package]] 288 | name = "google-api-python-client" 289 | version = "2.134.0" 290 | description = "Google API Client Library for Python" 291 | optional = false 292 | python-versions = ">=3.7" 293 | files = [ 294 | {file = "google-api-python-client-2.134.0.tar.gz", hash = "sha256:4a8f0bea651a212997cc83c0f271fc86f80ef93d1cee9d84de7dfaeef2a858b6"}, 295 | {file = "google_api_python_client-2.134.0-py2.py3-none-any.whl", hash = "sha256:ba05d60f6239990b7994f6328f17bb154c602d31860fb553016dc9f8ce886945"}, 296 | ] 297 | 298 | [package.dependencies] 299 | google-api-core = ">=1.31.5,<2.0.dev0 || >2.3.0,<3.0.0.dev0" 300 | google-auth = ">=1.32.0,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0.dev0" 301 | google-auth-httplib2 = ">=0.2.0,<1.0.0" 302 | httplib2 = ">=0.19.0,<1.dev0" 303 | uritemplate = ">=3.0.1,<5" 304 | 305 | [[package]] 306 | name = "google-auth" 307 | version = "2.30.0" 308 | description = "Google Authentication Library" 309 | optional = false 310 | python-versions = ">=3.7" 311 | files = [ 312 | {file = "google-auth-2.30.0.tar.gz", hash = "sha256:ab630a1320f6720909ad76a7dbdb6841cdf5c66b328d690027e4867bdfb16688"}, 313 | {file = "google_auth-2.30.0-py2.py3-none-any.whl", hash = "sha256:8df7da660f62757388b8a7f249df13549b3373f24388cb5d2f1dd91cc18180b5"}, 314 | ] 315 | 316 | [package.dependencies] 317 | cachetools = ">=2.0.0,<6.0" 318 | pyasn1-modules = ">=0.2.1" 319 | rsa = ">=3.1.4,<5" 320 | 321 | [package.extras] 322 | aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] 323 | enterprise-cert = ["cryptography (==36.0.2)", "pyopenssl (==22.0.0)"] 324 | pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] 325 | reauth = ["pyu2f (>=0.1.5)"] 326 | requests = ["requests (>=2.20.0,<3.0.0.dev0)"] 327 | 328 | [[package]] 329 | name = "google-auth-httplib2" 330 | version = "0.2.0" 331 | description = "Google Authentication Library: httplib2 transport" 332 | optional = false 333 | python-versions = "*" 334 | files = [ 335 | {file = "google-auth-httplib2-0.2.0.tar.gz", hash = "sha256:38aa7badf48f974f1eb9861794e9c0cb2a0511a4ec0679b1f886d108f5640e05"}, 336 | {file = "google_auth_httplib2-0.2.0-py2.py3-none-any.whl", hash = "sha256:b65a0a2123300dd71281a7bf6e64d65a0759287df52729bdd1ae2e47dc311a3d"}, 337 | ] 338 | 339 | [package.dependencies] 340 | google-auth = "*" 341 | httplib2 = ">=0.19.0" 342 | 343 | [[package]] 344 | name = "google-auth-oauthlib" 345 | version = "1.2.0" 346 | description = "Google Authentication Library" 347 | optional = false 348 | python-versions = ">=3.6" 349 | files = [ 350 | {file = "google-auth-oauthlib-1.2.0.tar.gz", hash = "sha256:292d2d3783349f2b0734a0a0207b1e1e322ac193c2c09d8f7c613fb7cc501ea8"}, 351 | {file = "google_auth_oauthlib-1.2.0-py2.py3-none-any.whl", hash = "sha256:297c1ce4cb13a99b5834c74a1fe03252e1e499716718b190f56bcb9c4abc4faf"}, 352 | ] 353 | 354 | [package.dependencies] 355 | google-auth = ">=2.15.0" 356 | requests-oauthlib = ">=0.7.0" 357 | 358 | [package.extras] 359 | tool = ["click (>=6.0.0)"] 360 | 361 | [[package]] 362 | name = "googleapis-common-protos" 363 | version = "1.63.1" 364 | description = "Common protobufs used in Google APIs" 365 | optional = false 366 | python-versions = ">=3.7" 367 | files = [ 368 | {file = "googleapis-common-protos-1.63.1.tar.gz", hash = "sha256:c6442f7a0a6b2a80369457d79e6672bb7dcbaab88e0848302497e3ec80780a6a"}, 369 | {file = "googleapis_common_protos-1.63.1-py2.py3-none-any.whl", hash = "sha256:0e1c2cdfcbc354b76e4a211a35ea35d6926a835cba1377073c4861db904a1877"}, 370 | ] 371 | 372 | [package.dependencies] 373 | protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0" 374 | 375 | [package.extras] 376 | grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] 377 | 378 | [[package]] 379 | name = "h11" 380 | version = "0.14.0" 381 | description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" 382 | optional = false 383 | python-versions = ">=3.7" 384 | files = [ 385 | {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, 386 | {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, 387 | ] 388 | 389 | [[package]] 390 | name = "httpcore" 391 | version = "1.0.5" 392 | description = "A minimal low-level HTTP client." 393 | optional = false 394 | python-versions = ">=3.8" 395 | files = [ 396 | {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, 397 | {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, 398 | ] 399 | 400 | [package.dependencies] 401 | certifi = "*" 402 | h11 = ">=0.13,<0.15" 403 | 404 | [package.extras] 405 | asyncio = ["anyio (>=4.0,<5.0)"] 406 | http2 = ["h2 (>=3,<5)"] 407 | socks = ["socksio (==1.*)"] 408 | trio = ["trio (>=0.22.0,<0.26.0)"] 409 | 410 | [[package]] 411 | name = "httplib2" 412 | version = "0.22.0" 413 | description = "A comprehensive HTTP client library." 414 | optional = false 415 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 416 | files = [ 417 | {file = "httplib2-0.22.0-py3-none-any.whl", hash = "sha256:14ae0a53c1ba8f3d37e9e27cf37eabb0fb9980f435ba405d546948b009dd64dc"}, 418 | {file = "httplib2-0.22.0.tar.gz", hash = "sha256:d7a10bc5ef5ab08322488bde8c726eeee5c8618723fdb399597ec58f3d82df81"}, 419 | ] 420 | 421 | [package.dependencies] 422 | pyparsing = {version = ">=2.4.2,<3.0.0 || >3.0.0,<3.0.1 || >3.0.1,<3.0.2 || >3.0.2,<3.0.3 || >3.0.3,<4", markers = "python_version > \"3.0\""} 423 | 424 | [[package]] 425 | name = "httpx" 426 | version = "0.27.0" 427 | description = "The next generation HTTP client." 428 | optional = false 429 | python-versions = ">=3.8" 430 | files = [ 431 | {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"}, 432 | {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"}, 433 | ] 434 | 435 | [package.dependencies] 436 | anyio = "*" 437 | certifi = "*" 438 | httpcore = "==1.*" 439 | idna = "*" 440 | sniffio = "*" 441 | 442 | [package.extras] 443 | brotli = ["brotli", "brotlicffi"] 444 | cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] 445 | http2 = ["h2 (>=3,<5)"] 446 | socks = ["socksio (==1.*)"] 447 | 448 | [[package]] 449 | name = "idna" 450 | version = "3.7" 451 | description = "Internationalized Domain Names in Applications (IDNA)" 452 | optional = false 453 | python-versions = ">=3.5" 454 | files = [ 455 | {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, 456 | {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, 457 | ] 458 | 459 | [[package]] 460 | name = "itsdangerous" 461 | version = "2.2.0" 462 | description = "Safely pass data to untrusted environments and back." 463 | optional = false 464 | python-versions = ">=3.8" 465 | files = [ 466 | {file = "itsdangerous-2.2.0-py3-none-any.whl", hash = "sha256:c6242fc49e35958c8b15141343aa660db5fc54d4f13a1db01a3f5891b98700ef"}, 467 | {file = "itsdangerous-2.2.0.tar.gz", hash = "sha256:e0050c0b7da1eea53ffaf149c0cfbb5c6e2e2b69c4bef22c81fa6eb73e5f6173"}, 468 | ] 469 | 470 | [[package]] 471 | name = "jinja2" 472 | version = "3.1.4" 473 | description = "A very fast and expressive template engine." 474 | optional = false 475 | python-versions = ">=3.7" 476 | files = [ 477 | {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, 478 | {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, 479 | ] 480 | 481 | [package.dependencies] 482 | MarkupSafe = ">=2.0" 483 | 484 | [package.extras] 485 | i18n = ["Babel (>=2.7)"] 486 | 487 | [[package]] 488 | name = "markupsafe" 489 | version = "2.1.5" 490 | description = "Safely add untrusted strings to HTML/XML markup." 491 | optional = false 492 | python-versions = ">=3.7" 493 | files = [ 494 | {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, 495 | {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, 496 | {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, 497 | {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, 498 | {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, 499 | {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, 500 | {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, 501 | {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, 502 | {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, 503 | {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, 504 | {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, 505 | {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, 506 | {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, 507 | {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, 508 | {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, 509 | {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, 510 | {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, 511 | {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, 512 | {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, 513 | {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, 514 | {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, 515 | {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, 516 | {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, 517 | {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, 518 | {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, 519 | {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, 520 | {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, 521 | {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, 522 | {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, 523 | {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, 524 | {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, 525 | {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, 526 | {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, 527 | {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, 528 | {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, 529 | {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, 530 | {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, 531 | {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, 532 | {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, 533 | {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, 534 | {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, 535 | {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, 536 | {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, 537 | {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, 538 | {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, 539 | {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, 540 | {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, 541 | {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, 542 | {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, 543 | {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, 544 | {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, 545 | {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, 546 | {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, 547 | {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, 548 | {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, 549 | {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, 550 | {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, 551 | {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, 552 | {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, 553 | {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, 554 | ] 555 | 556 | [[package]] 557 | name = "nexus-python" 558 | version = "0.3.1" 559 | description = "A simple interface for interacting with NexusDB." 560 | optional = false 561 | python-versions = "<4.0.0,>=3.11.8" 562 | files = [ 563 | {file = "nexus_python-0.3.1-py3-none-any.whl", hash = "sha256:62f333387598bf353c5b82107b1b0adabe2d1dfb51592eac2d7fa36751655b3c"}, 564 | {file = "nexus_python-0.3.1.tar.gz", hash = "sha256:425ab1afdbbcdd67cbad67e0fa73b9275c4a289ea62b2a36e2eafee182a80c88"}, 565 | ] 566 | 567 | [package.dependencies] 568 | requests = ">=2.25.1,<3.0.0" 569 | tabulate = ">=0.9.0,<0.10.0" 570 | 571 | [[package]] 572 | name = "oauthlib" 573 | version = "3.2.2" 574 | description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" 575 | optional = false 576 | python-versions = ">=3.6" 577 | files = [ 578 | {file = "oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca"}, 579 | {file = "oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918"}, 580 | ] 581 | 582 | [package.extras] 583 | rsa = ["cryptography (>=3.0.0)"] 584 | signals = ["blinker (>=1.4.0)"] 585 | signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] 586 | 587 | [[package]] 588 | name = "ollama" 589 | version = "0.2.1" 590 | description = "The official Python client for Ollama." 591 | optional = false 592 | python-versions = "<4.0,>=3.8" 593 | files = [ 594 | {file = "ollama-0.2.1-py3-none-any.whl", hash = "sha256:b6e2414921c94f573a903d1069d682ba2fb2607070ea9e19ca4a7872f2a460ec"}, 595 | {file = "ollama-0.2.1.tar.gz", hash = "sha256:fa316baa9a81eac3beb4affb0a17deb3008fdd6ed05b123c26306cfbe4c349b6"}, 596 | ] 597 | 598 | [package.dependencies] 599 | httpx = ">=0.27.0,<0.28.0" 600 | 601 | [[package]] 602 | name = "proto-plus" 603 | version = "1.24.0" 604 | description = "Beautiful, Pythonic protocol buffers." 605 | optional = false 606 | python-versions = ">=3.7" 607 | files = [ 608 | {file = "proto-plus-1.24.0.tar.gz", hash = "sha256:30b72a5ecafe4406b0d339db35b56c4059064e69227b8c3bda7462397f966445"}, 609 | {file = "proto_plus-1.24.0-py3-none-any.whl", hash = "sha256:402576830425e5f6ce4c2a6702400ac79897dab0b4343821aa5188b0fab81a12"}, 610 | ] 611 | 612 | [package.dependencies] 613 | protobuf = ">=3.19.0,<6.0.0dev" 614 | 615 | [package.extras] 616 | testing = ["google-api-core (>=1.31.5)"] 617 | 618 | [[package]] 619 | name = "protobuf" 620 | version = "4.25.3" 621 | description = "" 622 | optional = false 623 | python-versions = ">=3.8" 624 | files = [ 625 | {file = "protobuf-4.25.3-cp310-abi3-win32.whl", hash = "sha256:d4198877797a83cbfe9bffa3803602bbe1625dc30d8a097365dbc762e5790faa"}, 626 | {file = "protobuf-4.25.3-cp310-abi3-win_amd64.whl", hash = "sha256:209ba4cc916bab46f64e56b85b090607a676f66b473e6b762e6f1d9d591eb2e8"}, 627 | {file = "protobuf-4.25.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:f1279ab38ecbfae7e456a108c5c0681e4956d5b1090027c1de0f934dfdb4b35c"}, 628 | {file = "protobuf-4.25.3-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:e7cb0ae90dd83727f0c0718634ed56837bfeeee29a5f82a7514c03ee1364c019"}, 629 | {file = "protobuf-4.25.3-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:7c8daa26095f82482307bc717364e7c13f4f1c99659be82890dcfc215194554d"}, 630 | {file = "protobuf-4.25.3-cp38-cp38-win32.whl", hash = "sha256:f4f118245c4a087776e0a8408be33cf09f6c547442c00395fbfb116fac2f8ac2"}, 631 | {file = "protobuf-4.25.3-cp38-cp38-win_amd64.whl", hash = "sha256:c053062984e61144385022e53678fbded7aea14ebb3e0305ae3592fb219ccfa4"}, 632 | {file = "protobuf-4.25.3-cp39-cp39-win32.whl", hash = "sha256:19b270aeaa0099f16d3ca02628546b8baefe2955bbe23224aaf856134eccf1e4"}, 633 | {file = "protobuf-4.25.3-cp39-cp39-win_amd64.whl", hash = "sha256:e3c97a1555fd6388f857770ff8b9703083de6bf1f9274a002a332d65fbb56c8c"}, 634 | {file = "protobuf-4.25.3-py3-none-any.whl", hash = "sha256:f0700d54bcf45424477e46a9f0944155b46fb0639d69728739c0e47bab83f2b9"}, 635 | {file = "protobuf-4.25.3.tar.gz", hash = "sha256:25b5d0b42fd000320bd7830b349e3b696435f3b329810427a6bcce6a5492cc5c"}, 636 | ] 637 | 638 | [[package]] 639 | name = "pyasn1" 640 | version = "0.6.0" 641 | description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" 642 | optional = false 643 | python-versions = ">=3.8" 644 | files = [ 645 | {file = "pyasn1-0.6.0-py2.py3-none-any.whl", hash = "sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473"}, 646 | {file = "pyasn1-0.6.0.tar.gz", hash = "sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c"}, 647 | ] 648 | 649 | [[package]] 650 | name = "pyasn1-modules" 651 | version = "0.4.0" 652 | description = "A collection of ASN.1-based protocols modules" 653 | optional = false 654 | python-versions = ">=3.8" 655 | files = [ 656 | {file = "pyasn1_modules-0.4.0-py3-none-any.whl", hash = "sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b"}, 657 | {file = "pyasn1_modules-0.4.0.tar.gz", hash = "sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6"}, 658 | ] 659 | 660 | [package.dependencies] 661 | pyasn1 = ">=0.4.6,<0.7.0" 662 | 663 | [[package]] 664 | name = "pyparsing" 665 | version = "3.1.2" 666 | description = "pyparsing module - Classes and methods to define and execute parsing grammars" 667 | optional = false 668 | python-versions = ">=3.6.8" 669 | files = [ 670 | {file = "pyparsing-3.1.2-py3-none-any.whl", hash = "sha256:f9db75911801ed778fe61bb643079ff86601aca99fcae6345aa67292038fb742"}, 671 | {file = "pyparsing-3.1.2.tar.gz", hash = "sha256:a1bac0ce561155ecc3ed78ca94d3c9378656ad4c94c1270de543f621420f94ad"}, 672 | ] 673 | 674 | [package.extras] 675 | diagrams = ["jinja2", "railroad-diagrams"] 676 | 677 | [[package]] 678 | name = "python-dotenv" 679 | version = "1.0.1" 680 | description = "Read key-value pairs from a .env file and set them as environment variables" 681 | optional = false 682 | python-versions = ">=3.8" 683 | files = [ 684 | {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"}, 685 | {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"}, 686 | ] 687 | 688 | [package.extras] 689 | cli = ["click (>=5.0)"] 690 | 691 | [[package]] 692 | name = "python-engineio" 693 | version = "4.9.1" 694 | description = "Engine.IO server and client for Python" 695 | optional = false 696 | python-versions = ">=3.6" 697 | files = [ 698 | {file = "python_engineio-4.9.1-py3-none-any.whl", hash = "sha256:f995e702b21f6b9ebde4e2000cd2ad0112ba0e5116ec8d22fe3515e76ba9dddd"}, 699 | {file = "python_engineio-4.9.1.tar.gz", hash = "sha256:7631cf5563086076611e494c643b3fa93dd3a854634b5488be0bba0ef9b99709"}, 700 | ] 701 | 702 | [package.dependencies] 703 | simple-websocket = ">=0.10.0" 704 | 705 | [package.extras] 706 | asyncio-client = ["aiohttp (>=3.4)"] 707 | client = ["requests (>=2.21.0)", "websocket-client (>=0.54.0)"] 708 | docs = ["sphinx"] 709 | 710 | [[package]] 711 | name = "python-socketio" 712 | version = "5.11.3" 713 | description = "Socket.IO server and client for Python" 714 | optional = false 715 | python-versions = ">=3.8" 716 | files = [ 717 | {file = "python_socketio-5.11.3-py3-none-any.whl", hash = "sha256:2a923a831ff70664b7c502df093c423eb6aa93c1ce68b8319e840227a26d8b69"}, 718 | {file = "python_socketio-5.11.3.tar.gz", hash = "sha256:194af8cdbb7b0768c2e807ba76c7abc288eb5bb85559b7cddee51a6bc7a65737"}, 719 | ] 720 | 721 | [package.dependencies] 722 | bidict = ">=0.21.0" 723 | python-engineio = ">=4.8.0" 724 | 725 | [package.extras] 726 | asyncio-client = ["aiohttp (>=3.4)"] 727 | client = ["requests (>=2.21.0)", "websocket-client (>=0.54.0)"] 728 | docs = ["sphinx"] 729 | 730 | [[package]] 731 | name = "redis" 732 | version = "5.0.6" 733 | description = "Python client for Redis database and key-value store" 734 | optional = false 735 | python-versions = ">=3.7" 736 | files = [ 737 | {file = "redis-5.0.6-py3-none-any.whl", hash = "sha256:c0d6d990850c627bbf7be01c5c4cbaadf67b48593e913bb71c9819c30df37eee"}, 738 | {file = "redis-5.0.6.tar.gz", hash = "sha256:38473cd7c6389ad3e44a91f4c3eaf6bcb8a9f746007f29bf4fb20824ff0b2197"}, 739 | ] 740 | 741 | [package.extras] 742 | hiredis = ["hiredis (>=1.0.0)"] 743 | ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==20.0.1)", "requests (>=2.26.0)"] 744 | 745 | [[package]] 746 | name = "requests" 747 | version = "2.32.3" 748 | description = "Python HTTP for Humans." 749 | optional = false 750 | python-versions = ">=3.8" 751 | files = [ 752 | {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, 753 | {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, 754 | ] 755 | 756 | [package.dependencies] 757 | certifi = ">=2017.4.17" 758 | charset-normalizer = ">=2,<4" 759 | idna = ">=2.5,<4" 760 | urllib3 = ">=1.21.1,<3" 761 | 762 | [package.extras] 763 | socks = ["PySocks (>=1.5.6,!=1.5.7)"] 764 | use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] 765 | 766 | [[package]] 767 | name = "requests-oauthlib" 768 | version = "2.0.0" 769 | description = "OAuthlib authentication support for Requests." 770 | optional = false 771 | python-versions = ">=3.4" 772 | files = [ 773 | {file = "requests-oauthlib-2.0.0.tar.gz", hash = "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9"}, 774 | {file = "requests_oauthlib-2.0.0-py2.py3-none-any.whl", hash = "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36"}, 775 | ] 776 | 777 | [package.dependencies] 778 | oauthlib = ">=3.0.0" 779 | requests = ">=2.0.0" 780 | 781 | [package.extras] 782 | rsa = ["oauthlib[signedtoken] (>=3.0.0)"] 783 | 784 | [[package]] 785 | name = "rsa" 786 | version = "4.9" 787 | description = "Pure-Python RSA implementation" 788 | optional = false 789 | python-versions = ">=3.6,<4" 790 | files = [ 791 | {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, 792 | {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, 793 | ] 794 | 795 | [package.dependencies] 796 | pyasn1 = ">=0.1.3" 797 | 798 | [[package]] 799 | name = "simple-websocket" 800 | version = "1.0.0" 801 | description = "Simple WebSocket server and client for Python" 802 | optional = false 803 | python-versions = ">=3.6" 804 | files = [ 805 | {file = "simple-websocket-1.0.0.tar.gz", hash = "sha256:17d2c72f4a2bd85174a97e3e4c88b01c40c3f81b7b648b0cc3ce1305968928c8"}, 806 | {file = "simple_websocket-1.0.0-py3-none-any.whl", hash = "sha256:1d5bf585e415eaa2083e2bcf02a3ecf91f9712e7b3e6b9fa0b461ad04e0837bc"}, 807 | ] 808 | 809 | [package.dependencies] 810 | wsproto = "*" 811 | 812 | [package.extras] 813 | docs = ["sphinx"] 814 | 815 | [[package]] 816 | name = "six" 817 | version = "1.16.0" 818 | description = "Python 2 and 3 compatibility utilities" 819 | optional = false 820 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" 821 | files = [ 822 | {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, 823 | {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, 824 | ] 825 | 826 | [[package]] 827 | name = "sniffio" 828 | version = "1.3.1" 829 | description = "Sniff out which async library your code is running under" 830 | optional = false 831 | python-versions = ">=3.7" 832 | files = [ 833 | {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, 834 | {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, 835 | ] 836 | 837 | [[package]] 838 | name = "tabulate" 839 | version = "0.9.0" 840 | description = "Pretty-print tabular data" 841 | optional = false 842 | python-versions = ">=3.7" 843 | files = [ 844 | {file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"}, 845 | {file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"}, 846 | ] 847 | 848 | [package.extras] 849 | widechars = ["wcwidth"] 850 | 851 | [[package]] 852 | name = "typeid-python" 853 | version = "0.3.1" 854 | description = "Python implementation of TypeIDs: type-safe, K-sortable, and globally unique identifiers inspired by Stripe IDs" 855 | optional = false 856 | python-versions = "<4,>=3.8" 857 | files = [ 858 | {file = "typeid_python-0.3.1-py3-none-any.whl", hash = "sha256:62a6747933b3323d65f0bf91c8e8c7768b0292eaf9c176fb0c934ff3a61acce5"}, 859 | {file = "typeid_python-0.3.1.tar.gz", hash = "sha256:f96a78c5dc6d8df1d058b72598bcc2c1c5bb8d8343f53f910e074dae01458417"}, 860 | ] 861 | 862 | [package.dependencies] 863 | uuid6 = ">=2023.5.2" 864 | 865 | [[package]] 866 | name = "uritemplate" 867 | version = "4.1.1" 868 | description = "Implementation of RFC 6570 URI Templates" 869 | optional = false 870 | python-versions = ">=3.6" 871 | files = [ 872 | {file = "uritemplate-4.1.1-py2.py3-none-any.whl", hash = "sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e"}, 873 | {file = "uritemplate-4.1.1.tar.gz", hash = "sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0"}, 874 | ] 875 | 876 | [[package]] 877 | name = "urllib3" 878 | version = "2.2.2" 879 | description = "HTTP library with thread-safe connection pooling, file post, and more." 880 | optional = false 881 | python-versions = ">=3.8" 882 | files = [ 883 | {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, 884 | {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, 885 | ] 886 | 887 | [package.extras] 888 | brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] 889 | h2 = ["h2 (>=4,<5)"] 890 | socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] 891 | zstd = ["zstandard (>=0.18.0)"] 892 | 893 | [[package]] 894 | name = "uuid6" 895 | version = "2024.1.12" 896 | description = "New time-based UUID formats which are suited for use as a database key" 897 | optional = false 898 | python-versions = ">=3.8" 899 | files = [ 900 | {file = "uuid6-2024.1.12-py3-none-any.whl", hash = "sha256:8150093c8d05a331bc0535bc5ef6cf57ac6eceb2404fd319bc10caee2e02c065"}, 901 | {file = "uuid6-2024.1.12.tar.gz", hash = "sha256:ed0afb3a973057575f9883201baefe402787ca5e11e1d24e377190f0c43f1993"}, 902 | ] 903 | 904 | [[package]] 905 | name = "werkzeug" 906 | version = "3.0.3" 907 | description = "The comprehensive WSGI web application library." 908 | optional = false 909 | python-versions = ">=3.8" 910 | files = [ 911 | {file = "werkzeug-3.0.3-py3-none-any.whl", hash = "sha256:fc9645dc43e03e4d630d23143a04a7f947a9a3b5727cd535fdfe155a17cc48c8"}, 912 | {file = "werkzeug-3.0.3.tar.gz", hash = "sha256:097e5bfda9f0aba8da6b8545146def481d06aa7d3266e7448e2cccf67dd8bd18"}, 913 | ] 914 | 915 | [package.dependencies] 916 | MarkupSafe = ">=2.1.1" 917 | 918 | [package.extras] 919 | watchdog = ["watchdog (>=2.3)"] 920 | 921 | [[package]] 922 | name = "wsproto" 923 | version = "1.2.0" 924 | description = "WebSockets state-machine based protocol implementation" 925 | optional = false 926 | python-versions = ">=3.7.0" 927 | files = [ 928 | {file = "wsproto-1.2.0-py3-none-any.whl", hash = "sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736"}, 929 | {file = "wsproto-1.2.0.tar.gz", hash = "sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065"}, 930 | ] 931 | 932 | [package.dependencies] 933 | h11 = ">=0.9.0,<1" 934 | 935 | [metadata] 936 | lock-version = "2.0" 937 | python-versions = "~3.11.8" 938 | content-hash = "b5ae9bc5a7e6895e3ec4129206c6d5baff7fa33f80a09c1c1a60d872b4034af0" 939 | -------------------------------------------------------------------------------- /public/client_id_secret.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Astra-Analytics/Task-Agent-Starter-Kit/011cccfd852920748f66d7e6dd80ca2aaf974f70/public/client_id_secret.png -------------------------------------------------------------------------------- /public/dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Astra-Analytics/Task-Agent-Starter-Kit/011cccfd852920748f66d7e6dd80ca2aaf974f70/public/dashboard.png -------------------------------------------------------------------------------- /public/gmail_api_circled.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Astra-Analytics/Task-Agent-Starter-Kit/011cccfd852920748f66d7e6dd80ca2aaf974f70/public/gmail_api_circled.png -------------------------------------------------------------------------------- /public/google_api_library.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Astra-Analytics/Task-Agent-Starter-Kit/011cccfd852920748f66d7e6dd80ca2aaf974f70/public/google_api_library.png -------------------------------------------------------------------------------- /public/google_api_oauth.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Astra-Analytics/Task-Agent-Starter-Kit/011cccfd852920748f66d7e6dd80ca2aaf974f70/public/google_api_oauth.png -------------------------------------------------------------------------------- /public/google_cloud_api.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Astra-Analytics/Task-Agent-Starter-Kit/011cccfd852920748f66d7e6dd80ca2aaf974f70/public/google_cloud_api.png -------------------------------------------------------------------------------- /public/google_credentials_oauth.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Astra-Analytics/Task-Agent-Starter-Kit/011cccfd852920748f66d7e6dd80ca2aaf974f70/public/google_credentials_oauth.png -------------------------------------------------------------------------------- /public/google_oauth_json_download.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Astra-Analytics/Task-Agent-Starter-Kit/011cccfd852920748f66d7e6dd80ca2aaf974f70/public/google_oauth_json_download.png -------------------------------------------------------------------------------- /public/google_uris.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Astra-Analytics/Task-Agent-Starter-Kit/011cccfd852920748f66d7e6dd80ca2aaf974f70/public/google_uris.png -------------------------------------------------------------------------------- /public/test_user.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Astra-Analytics/Task-Agent-Starter-Kit/011cccfd852920748f66d7e6dd80ca2aaf974f70/public/test_user.png -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "tasker" 3 | version = "0.1.0" 4 | description = "" 5 | authors = ["Will Humble"] 6 | license = "MIT" 7 | readme = "README.md" 8 | 9 | [tool.poetry.dependencies] 10 | python = "~3.11.8" 11 | flask = "^3.0.3" 12 | google-auth = "^2.29.0" 13 | google-auth-oauthlib = "^1.2.0" 14 | google-auth-httplib2 = "^0.2.0" 15 | google-api-python-client = "^2.127.0" 16 | python-dotenv = "^1.0.1" 17 | nexus-python = "^0.3.1" 18 | ollama = "^0.2.0" 19 | colorlog = "^6.8.2" 20 | typeid-python = "^0.3.0" 21 | flask-sse = "^1.0.0" 22 | flask-socketio = "^5.3.6" 23 | 24 | 25 | [build-system] 26 | requires = ["poetry-core"] 27 | build-backend = "poetry.core.masonry.api" 28 | -------------------------------------------------------------------------------- /tasks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Astra-Analytics/Task-Agent-Starter-Kit/011cccfd852920748f66d7e6dd80ca2aaf974f70/tasks/__init__.py -------------------------------------------------------------------------------- /tasks/agents.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | from typing import List 4 | 5 | from ollama import Message 6 | from typeid import TypeID 7 | 8 | from utils.ollama import ollama_chat, ollama_generate 9 | 10 | from .storage import SingleTaskListStorage 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | storage = SingleTaskListStorage() 15 | 16 | 17 | def objective_agent(to, from_email, subject, timestamp, body, attachments): 18 | prompt = f""" 19 | You are an AI assistant that processes emails. You have received an email with the following details: 20 | To: {to} 21 | From: {from_email} 22 | Subject: {subject} 23 | Timestamp: {timestamp} 24 | Body: {body} 25 | Attachments: {attachments} 26 | Your task is to determine if the email contains any actionable tasks for the recipient. An actionable task should be a specific request or instruction that requires the recipient to take some action. If there are actionable tasks, list each one as a separate item. If there are no actionable tasks, respond with "No tasks found." 27 | RETURN ONLY THIS STRING AND DO NOT INCLUDE ANY OTHER OUTPUT. 28 | """ 29 | response_text = ollama_generate(model="llama3", prompt=prompt, stream=True) 30 | if response_text == "No tasks found." or not response_text: 31 | return {"tasks_found": False, "tasks": []} 32 | else: 33 | tasks = response_text.split("\n") 34 | task_list = [{"name": task.strip()} for task in tasks if task.strip()] 35 | return { 36 | "tasks_found": True, 37 | "tasks": task_list, 38 | } 39 | 40 | 41 | def task_creation_agent(task_name, previous_results): 42 | prompt = f"""You are a task creation AI tasked with creating a list of tasks as a JSON array, considering the ultimate objective of your team: {task_name}. 43 | The result of the previous task(s) are as follows: {previous_results} 44 | If the sub-tasks are dependent, dependencies should be lower on the list (i.e., execution should be bottom-up). 45 | Be sure to specify if the sub-task can be completed by an AI assistant or requires human intervention by specifying agent = 'AI' or 'Human'. 46 | Return the sub-tasks as a structured list of dictionaries with the following format: 47 | [{{"task": str, "agent": str}}, {{"task": str, "agent": str}}, ...] 48 | SHARE ONLY THIS LIST - DO NOT INCLUDE ANYTHING ELSE IN THE RESPONSE. 49 | """ 50 | response_text = ollama_generate(model="llama3", prompt=prompt, stream=True) 51 | logger.debug(f"Task creation agent response: {response_text}") 52 | try: 53 | new_tasks_list = eval(response_text.strip()) 54 | except (SyntaxError, ValueError): 55 | logger.error(f"Failed to parse task creation agent response: {response_text}") 56 | new_tasks_list = None 57 | 58 | return new_tasks_list 59 | 60 | 61 | def entity_extraction_agent(text_input): 62 | prompt = [ 63 | Message( 64 | role="system", 65 | content="""You are an AI expert specializing in entity identification and list creation, with the goal of capturing relationships based on a given input or request. 66 | You are given input in various forms such as paragraph, email, text files, and more. 67 | Your task is to create a entities list based on the input. 68 | Only use organizations, people, and projects as entities and do not include concepts or products. 69 | Organization entities can have attributes: name, type, description, member, memberOf. 70 | Person entities can have attributes: name, type, description, memberOf, parent, sibling, spouse, children, colleague, relatedTo, worksFor. 71 | Project entities can have attributes: name, type, description, department, member, memberOf. 72 | Only add nodes that have a relationship with at least one other node. 73 | Make sure that the node type (people, org, event) matches the to_type or for_type when the entity is part of a relationship. 74 | Return the entities list as a valid JSON object. NEVER INCLUDE COMMENTS THEY ARE NOT VALID JSON. DO NOT INCLUDE ANYTHING ELSE IN THE RESPONSE.""", 75 | ), 76 | Message( 77 | role="user", 78 | content="Can you please help John Smith from IT get access to the system? He needs it as part of the IT Modernization effort.", 79 | ), 80 | Message( 81 | role="assistant", 82 | content="""{ 83 | "entities": [ 84 | { 85 | "name": "Modernization of the IT infrastructure", 86 | "type": "Project", 87 | "description": "A project to modernize the IT infrastructure of the company.", 88 | "department": "IT", 89 | }, 90 | { 91 | "name": "John Smith", 92 | "type": "Person", 93 | "description": "Employee in the IT department." 94 | "memberOf": "IT", 95 | }, 96 | { 97 | "name": "IT", 98 | "type": "Organization", 99 | "description": "The IT department of the company.", 100 | "member": "John Smith", 101 | }, 102 | ] 103 | } 104 | """, 105 | ), 106 | Message(role="user", content=text_input), 107 | ] 108 | response_text = ollama_chat(model="llama3", messages=prompt, stream=True) 109 | return response_text 110 | 111 | 112 | def conditional_entity_addition(data): 113 | entities = data.get("entities", []) 114 | updated_entities = [] 115 | 116 | for entity in entities: 117 | entity_type = entity.get("type") 118 | entity_name = entity.get("name") 119 | 120 | if not entity_type or not entity_name: 121 | logger.warning(f"Entity is missing type or name: {entity}") 122 | continue 123 | 124 | # Look up existing entities by name 125 | condition_str = f"str_includes('name', '{entity_name}')" 126 | search_results = storage.lookup( 127 | entity_type, 128 | ["uuid", "name", "description"], 129 | condition=condition_str, 130 | ) 131 | search_results = json.loads(search_results) 132 | 133 | # Check if the entity exists 134 | if search_results["rows"]: 135 | combined_results = { 136 | result[0]: { 137 | "uuid": result[0], 138 | "name": result[1], 139 | "description": result[2], 140 | } 141 | for result in search_results["rows"] 142 | } 143 | combined_results_str = ", ".join( 144 | json.dumps(result) for result in combined_results.values() 145 | ) 146 | 147 | prompt: List[Message] = [ 148 | Message( 149 | role="system", 150 | content="You are a helpful assistant who's specialty is to decide if new input data matches data already in our database. Review the search results provided, compare against the input data, and if there's a match respond with the ID number of the match, and only the ID number. If there are no matches, respond with 'No Matches'. Your response is ALWAYS an ID number alone, or 'No Matches'. When reviewing whether a match existings in our search results to our new input, take into account that the name may not match perfectly (for example, one might have just a first name, or a nick name, while the other has a full name), in which case look at the additional information about the user to determine if there's a strong likelihood they are the same person. For companies, you should consider different names of the same company as the same, such as EA and Electronic Arts (make your best guess). If the likelihood is strong, respond with and only with the ID number. If likelihood is low, respond with 'No Matches'.", 151 | ), 152 | Message( 153 | role="user", 154 | content=f"Here are the search results: {combined_results_str}. Does any entry match the input data: {data}?", 155 | ), 156 | ] 157 | 158 | response_text = ollama_chat(model="llama3", messages=prompt, stream=True) 159 | if response_text.lower() == "no matches": 160 | entity_id = str(TypeID(prefix=entity_type.lower())) 161 | logger.info(f"Creating new entity: {entity_name}, ID: {entity_id}") 162 | else: 163 | entity_id = response_text.strip() 164 | logger.info(f"Found existing entity: {entity_name}, ID: {entity_id}") 165 | 166 | else: 167 | entity_id = str(TypeID(prefix=entity_type.lower())) 168 | logger.info(f"Creating new entity: {entity_name}, ID: {entity_id}") 169 | 170 | entity["uuid"] = entity_id 171 | updated_entities.append(entity) 172 | 173 | # Replace references in entities with the appropriate UUIDs 174 | uuid_map = {entity["name"]: entity["uuid"] for entity in updated_entities} 175 | 176 | for entity in updated_entities: 177 | for key, value in entity.items(): 178 | if isinstance(value, str) and value in uuid_map: 179 | entity[key] = uuid_map[value] 180 | 181 | return {"entities": updated_entities}, 200 182 | -------------------------------------------------------------------------------- /tasks/execution.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from utils.ollama import ollama_generate 4 | 5 | logger = logging.getLogger(__name__) 6 | 7 | 8 | def execution_agent(task_name: str, previous_results: list, context: list) -> str: 9 | try: 10 | prompt = f""" 11 | Perform the following task: {task_name}. 12 | Take into account these previously completed tasks and their results: {previous_results}. 13 | Additionally, consider these similar tasks and their contexts: {context}. 14 | If you can complete the task based on the context provided, execute it and respond with the result. 15 | If more context is needed, respond with "More context needed" - DO NOT SAY ANYTHING ELSE. 16 | Response: 17 | """ 18 | response_text = ollama_generate(model="llama3", prompt=prompt, stream=True) 19 | return response_text 20 | except Exception as e: 21 | logger.error(f"Error in execution_agent: {e}") 22 | raise 23 | -------------------------------------------------------------------------------- /tasks/processor.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import os 4 | import re 5 | import threading 6 | import time 7 | 8 | import flask 9 | from dotenv import load_dotenv 10 | from flask import Flask 11 | from flask import current_app as app 12 | 13 | from integrations.email.fetcher import email_queue 14 | from tasks.agents import ( 15 | conditional_entity_addition, 16 | entity_extraction_agent, 17 | objective_agent, 18 | task_creation_agent, 19 | ) 20 | from tasks.execution import execution_agent 21 | from tasks.storage import SingleTaskListStorage 22 | 23 | # Load environment variables from .env file 24 | load_dotenv() 25 | 26 | MAX_THREADS = int(os.getenv("MAX_THREADS", 4)) 27 | 28 | logger = logging.getLogger(__name__) 29 | 30 | # Initialize task storage 31 | tasks_storage = SingleTaskListStorage() 32 | 33 | # Ensure the app context is created 34 | app = Flask(__name__) 35 | 36 | 37 | def sanitize_json_response(response): 38 | # Remove trailing commas before closing brackets or braces 39 | sanitized_response = re.sub(r",\s*([\]}])", r"\1", response) 40 | return sanitized_response 41 | 42 | 43 | def process_entity_extraction_and_addition(email_data): 44 | try: 45 | body = email_data["Body"] 46 | logger.debug("Calling entity_extraction_agent...") 47 | entity_extraction_response = entity_extraction_agent(body) 48 | logger.debug(f"Entity extraction response: {entity_extraction_response}") 49 | 50 | if entity_extraction_response: 51 | sanitized_response = sanitize_json_response(entity_extraction_response) 52 | entity_data = json.loads(sanitized_response) 53 | 54 | # Process the entire entity data in one call to conditional_entity_addition 55 | addition_response = conditional_entity_addition( 56 | {"entities": entity_data["entities"]} 57 | ) 58 | logger.info(f"Entity addition response: {addition_response}") 59 | 60 | else: 61 | logger.info("No entities extracted.") 62 | except json.JSONDecodeError as e: 63 | logger.error(f"JSON decode error: {e}") 64 | logger.error(f"Entity extraction response was: {entity_extraction_response}") 65 | except Exception as e: 66 | logger.error( 67 | f"Error processing entity extraction and addition: {e}", exc_info=True 68 | ) 69 | 70 | 71 | def process_email(email_data): 72 | try: 73 | email_id = email_data["Message-ID"] 74 | existing_tasks = tasks_storage.get_tasks(object=email_id) 75 | email_subject = email_data["Subject"] 76 | logger.info(f"Existing tasks for email '{email_subject}': {existing_tasks}") 77 | 78 | # Check if the task with identifier 0 is complete 79 | if any( 80 | task["identifier"] == 0 and task["actionStatus"] == "Complete" 81 | for task in existing_tasks.values() 82 | ): 83 | logger.info( 84 | f"Email with ID {email_id} has already been fully processed. Skipping." 85 | ) 86 | return 87 | 88 | # If no existing tasks, proceed with objective agent and primary task creation 89 | if not existing_tasks: 90 | to = email_data["To"] 91 | from_email = email_data["From"] 92 | subject = email_data["Subject"] 93 | timestamp = email_data["Timestamp"] 94 | body = email_data["Body"] 95 | attachments = "" # Assuming no attachments for simplicity 96 | 97 | logger.info(f"Starting entity extraction for email ID {email_id}") 98 | entity_extraction_processor(email_data) 99 | 100 | logger.debug("Calling objective_agent...") 101 | objective_response = objective_agent( 102 | to, from_email, subject, timestamp, body, attachments 103 | ) 104 | 105 | if not objective_response["tasks_found"]: 106 | logger.info("No tasks identified in the email.") 107 | return 108 | 109 | OBJECTIVE = objective_response["tasks"][0]["name"] 110 | logger.info(f"OBJECTIVE: {OBJECTIVE}") 111 | 112 | primary_task = { 113 | "uuid": tasks_storage.next_task_id(), 114 | "name": OBJECTIVE, 115 | "agent": "AI", 116 | "actionStatus": "Active", 117 | "identifier": 0, 118 | "object": email_id, 119 | } 120 | tasks_storage.append(primary_task) 121 | logger.debug(f"Primary task created: {primary_task}") 122 | 123 | # Add this new task to the existing_tasks dictionary 124 | existing_tasks[primary_task["uuid"]] = primary_task 125 | 126 | # Update the dashboard after processing 127 | with app.app_context(): 128 | update_dashboard() 129 | 130 | current_identifier = 0 131 | max_identifier = 0 132 | 133 | else: 134 | # Set current_identifier to the largest identifier that is not complete 135 | incomplete_tasks = [ 136 | task 137 | for task in existing_tasks.values() 138 | if task["actionStatus"] != "Complete" 139 | ] 140 | if incomplete_tasks: 141 | max_identifier = max(task["identifier"] for task in incomplete_tasks) 142 | current_identifier = max_identifier 143 | else: 144 | logger.info( 145 | f"All tasks for email ID {email_id} are complete. Skipping." 146 | ) 147 | return 148 | 149 | tasks = existing_tasks 150 | 151 | while current_identifier >= 0: 152 | # Find the task with the current_identifier and agent as AI 153 | task = next( 154 | ( 155 | t 156 | for t in tasks.values() 157 | if t["identifier"] == current_identifier and t["agent"] == "AI" 158 | ), 159 | None, 160 | ) 161 | 162 | if not task: 163 | logger.info("No more AI tasks to process.") 164 | break 165 | 166 | logger.info( 167 | f"Processing task: {task['name']} with identifier {current_identifier}" 168 | ) 169 | 170 | previous_results = tasks_storage.get_previous_results(email_id) 171 | context = tasks_storage.get_context(task["name"], 5) 172 | result = execution_agent(task["name"], previous_results, context) 173 | 174 | if result == "More context needed": 175 | new_tasks = task_creation_agent(task["name"], previous_results) 176 | current_identifier, tasks = tasks_storage.add_subtasks( 177 | current_task_id=task["uuid"], 178 | current_task_name=task["name"], 179 | potential_actions=new_tasks, 180 | max_identifier=max_identifier, 181 | ) 182 | max_identifier = current_identifier 183 | logger.info(f"Created new sub-tasks: {new_tasks}") 184 | else: 185 | task["actionStatus"] = "Complete" 186 | tasks_storage.update_task_status( 187 | task["uuid"], task["name"], "Complete", result 188 | ) 189 | current_identifier -= 1 190 | 191 | time.sleep(1) 192 | 193 | # Update the dashboard after processing 194 | with app.app_context(): 195 | update_dashboard() 196 | 197 | except Exception as e: 198 | logger.error(f"Error processing email: {e}", exc_info=True) 199 | finally: 200 | email_queue.task_done() 201 | 202 | 203 | def entity_extraction_processor(email_data): 204 | entity_thread = threading.Thread( 205 | target=process_entity_extraction_and_addition, 206 | args=(email_data,), 207 | daemon=True, 208 | name=f"EntityExtraction-{email_data['Message-ID']}", 209 | ) 210 | entity_thread.start() 211 | 212 | 213 | def update_dashboard(): 214 | logger.info("Updating dashboard...") 215 | tasks = tasks_storage.get_tasks() 216 | agent_tasks = [task for task in tasks.values() if task["agent"] == "AI"] 217 | human_tasks = [task for task in tasks.values() if task["agent"] != "AI"] 218 | flask.g.agent_tasks = agent_tasks 219 | flask.g.human_tasks = human_tasks 220 | 221 | 222 | def email_processor(): 223 | active_threads = {} 224 | while True: 225 | if len(active_threads) < MAX_THREADS: 226 | email_data = email_queue.get() 227 | email_id = email_data["Subject"] 228 | if email_id not in active_threads: 229 | thread = threading.Thread( 230 | target=process_email, 231 | args=(email_data,), 232 | daemon=True, 233 | name=f"EmailProcessor-{email_id}", 234 | ) 235 | active_threads[email_id] = thread 236 | thread.start() 237 | # Clean up finished threads 238 | for email_id, thread in list(active_threads.items()): 239 | if not thread.is_alive(): 240 | del active_threads[email_id] 241 | 242 | 243 | def start_processing(): 244 | processor_thread = threading.Thread( 245 | target=email_processor, daemon=True, name="EmailProcessorThread" 246 | ) 247 | processor_thread.start() 248 | -------------------------------------------------------------------------------- /tasks/storage.py: -------------------------------------------------------------------------------- 1 | import ast 2 | import json 3 | import logging 4 | from re import S 5 | from typing import Dict, List 6 | 7 | from nexus_python.nexusdb import NexusDB 8 | from typeid import TypeID 9 | 10 | from utils.ollama import get_ollama_embedding 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | 15 | class SingleTaskListStorage(NexusDB): 16 | def __init__(self): 17 | super().__init__() 18 | 19 | def append(self, task: Dict): 20 | logger.debug(f"Appending task: {task}") 21 | if "uuid" not in task or not task["uuid"]: 22 | task_id = str(TypeID(prefix="action")) 23 | task["uuid"] = task_id 24 | task["actionStatus"] = "Active" 25 | if "potentialAction" not in task: 26 | task["potentialAction"] = None 27 | fields = list(task.keys()) 28 | values = [list(task.values())] 29 | self.insert("Action", fields, values) 30 | 31 | def next_task_id(self): 32 | return str(TypeID(prefix="action")) 33 | 34 | def get_tasks(self, object=None, condition=None): 35 | conditions = self.prepare_conditions(object, condition) 36 | return self.fetch_tasks(conditions) 37 | 38 | def prepare_conditions(self, object, condition): 39 | conditions = [condition] if condition else [] 40 | 41 | if object: 42 | conditions.extend(self.get_conditions_for_object(object)) 43 | 44 | return " , ".join(conditions) if conditions else "" 45 | 46 | def get_conditions_for_object(self, object): 47 | objective_ids = self.get_objective_ids(object) 48 | logger.debug(f"Objective IDs: {objective_ids}\n\n\n") 49 | if objective_ids == [] or not objective_ids: 50 | return [f"object = '{object}'"] 51 | 52 | try: 53 | related_uuids = self.get_related_uuids(objective_ids[0]) 54 | if related_uuids: 55 | uuid_list = ", ".join([f"'{uuid}'" for uuid in related_uuids]) 56 | return [f"is_in('uuid', [{uuid_list}])"] 57 | else: 58 | return [f"uuid = '{objective_ids[0]}'"] 59 | except Exception as e: 60 | logger.error(f"Error executing recursive query: {e}") 61 | return [] 62 | 63 | def get_objective_ids(self, object=None): 64 | if object: 65 | objective = self.lookup( 66 | "Action", condition=f"object = '{object}', identifier = 0" 67 | ) 68 | logger.debug(f"Objective: {objective}\n\n\n") 69 | else: 70 | objective = self.lookup("Action", condition="identifier = 0") 71 | logger.debug(f"Objective: {objective}") 72 | return [row[0] for row in json.loads(objective)["rows"]] 73 | 74 | def get_related_uuids(self, objective_id): 75 | result = self.recursive_query( 76 | relation_name="Graph", 77 | source_field="sourceId", 78 | target_field="targetId", 79 | starting_condition=f"targetId = '{objective_id}'", 80 | ) 81 | return [row[0] for row in json.loads(result)["rows"]] 82 | 83 | def fetch_tasks(self, condition_str): 84 | fields = [ 85 | "name", 86 | "uuid", 87 | "object", 88 | "identifier", 89 | "actionStatus", 90 | "agent", 91 | "potentialAction", 92 | ] 93 | if condition_str: 94 | tasks = self.lookup("Action", fields, condition=condition_str) 95 | else: 96 | tasks = self.lookup("Action", fields) 97 | 98 | return self.process_tasks(json.loads(tasks)) 99 | 100 | def process_tasks(self, tasks): 101 | 102 | logger.debug(f"Processing Tasks from lookup: {tasks}\n\n") 103 | 104 | # Create a dictionary to map UUIDs to task names 105 | uuid_to_name = {} 106 | 107 | # First pass: Collect UUIDs and their corresponding task names 108 | for task in tasks["rows"]: 109 | uuid = task[1] 110 | name = task[0] 111 | uuid_to_name[uuid] = name 112 | 113 | # Second pass: Construct task data with potentialAction names 114 | task_data = {} 115 | for task in tasks["rows"]: 116 | uuid = task[1] 117 | 118 | potential_actions = None 119 | if task[6] != "Null": 120 | try: 121 | potential_actions = task[6] 122 | 123 | action_names = [ 124 | uuid_to_name.get(action, action) for action in potential_actions 125 | ] 126 | 127 | except (TypeError, KeyError) as e: 128 | logger.error(f"Error parsing potentialAction for task {uuid}: {e}") 129 | potential_actions = task[6] 130 | 131 | task_data[uuid] = { 132 | "name": task[0], 133 | "uuid": uuid, 134 | "object": task[2], 135 | "identifier": task[3], 136 | "actionStatus": task[4], 137 | "agent": task[5], 138 | "potentialAction": action_names if potential_actions else None, 139 | } 140 | 141 | logger.debug(f"Tasks: {task_data}") 142 | return task_data 143 | 144 | def add_subtasks( 145 | self, 146 | current_task_id: str, 147 | current_task_name: str, 148 | potential_actions: List[Dict[str, str]] | None, 149 | max_identifier: int, 150 | ): 151 | current_identifier = max_identifier + 1 152 | subtasks = [] 153 | task_data = {} 154 | 155 | if not potential_actions or potential_actions == []: 156 | return current_identifier, task_data 157 | 158 | for action in potential_actions: 159 | current_identifier += 1 160 | task_id = self.next_task_id() 161 | self.upsert( 162 | "Action", 163 | ["uuid", "name", "actionStatus", "identifier", "object", "agent"], 164 | [ 165 | [ 166 | task_id, 167 | action["task"], 168 | "Active", 169 | current_identifier, 170 | current_task_id, 171 | action.get("agent", "Human"), 172 | ] 173 | ], 174 | ) 175 | 176 | subtasks.append(task_id) 177 | 178 | task_data[task_id] = { 179 | "name": action["task"], 180 | "uuid": task_id, 181 | "object": current_task_id, 182 | "identifier": current_identifier, 183 | "actionStatus": "Active", 184 | "agent": action.get("agent", "Human"), 185 | } 186 | 187 | self.update( 188 | "Action", 189 | ["uuid", "name", "potentialAction"], 190 | [[current_task_id, current_task_name, subtasks]], 191 | ) 192 | logger.debug( 193 | f"Updated potentialAction for task UUID '{current_task_id}' with: {subtasks}" 194 | ) 195 | 196 | return current_identifier, task_data 197 | 198 | def update_task_status( 199 | self, task_uuid: str, task_name: str, status: str, result: str 200 | ): 201 | raw_result = f'___"{result}"___' # formatting like this allows us to store newlines, tabs and other special characters in the databse without breaking the query 202 | 203 | vector_embeddings = get_ollama_embedding(result) 204 | 205 | # Update method here so we don't overwrite any field that is not being updated 206 | self.update( 207 | "Action", 208 | ["uuid", "name", "actionStatus", "result"], 209 | [[task_uuid, task_name, status, raw_result]], 210 | ) 211 | 212 | # Need to do this part separately because Update will fail if the text field does not already exist 213 | self.upsert( 214 | "Action", 215 | text=raw_result, 216 | embeddings=vector_embeddings, 217 | references=[["Action", [task_uuid]]], 218 | ) 219 | logger.debug(f"Updated actionStatus for task UUID '{task_uuid}' to '{status}'") 220 | 221 | def get_previous_results(self, email_id: str): 222 | results = self.lookup("Action", ["result"], condition=f"object = '{email_id}'") 223 | results = json.loads(results) 224 | return [result[0] for result in results["rows"]] 225 | 226 | def get_context(self, query: str, top_results_num: int): 227 | query_embedding = get_ollama_embedding(query) 228 | results = self.vector_search( 229 | query_vector=query_embedding, number_of_results=top_results_num 230 | ) 231 | try: 232 | results = json.loads(results) 233 | except json.JSONDecodeError as e: 234 | logger.error(f"Failed to decode JSON results: {e}") 235 | return [] 236 | 237 | context_list = [] 238 | 239 | if "rows" not in results: 240 | logger.error("Malformed results: missing 'rows'") 241 | return context_list 242 | 243 | for row in results["rows"]: 244 | if len(row) > 1 and isinstance(row[1], str): 245 | context_text = row[1] 246 | context_list.append(context_text.strip('"')) 247 | return context_list 248 | -------------------------------------------------------------------------------- /tests/babyagi.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import re 4 | import time 5 | from collections import deque 6 | from typing import Dict, List 7 | 8 | import ollama 9 | from dotenv import load_dotenv 10 | 11 | from other.nexusdb import NexusDB 12 | 13 | # Email data 14 | email_data = [ 15 | { 16 | "Body": '

Hello,

Please write a haiku about golf. If you have any questions, please email me at john.doe@example.com.

Thank you,
John Doe

\r\n\r\n', 17 | "From": "John Doe ", 18 | "Message-ID": "01j0r9h83tfjyrjj8trk7zxe6v", 19 | "Subject": "Haiku about golf", 20 | "Timestamp": "Tue, 14 May 2024 19:14:56 +0000", 21 | "To": "test@gmail.com", 22 | } 23 | ] 24 | 25 | 26 | # Task storage supporting only a single instance of BabyAGI 27 | class SingleTaskListStorage: 28 | def __init__(self): 29 | self.tasks = deque([]) 30 | self.task_id_counter = 0 31 | 32 | def append(self, task: Dict): 33 | self.tasks.append(task) 34 | 35 | def replace(self, tasks: List[Dict]): 36 | self.tasks = deque(tasks) 37 | 38 | def popleft(self): 39 | return self.tasks.popleft() 40 | 41 | def is_empty(self): 42 | return False if self.tasks else True 43 | 44 | def next_task_id(self): 45 | self.task_id_counter += 1 46 | return self.task_id_counter 47 | 48 | def get_task_names(self): 49 | return [t["task_name"] for t in self.tasks] 50 | 51 | 52 | # Initialize tasks storage 53 | tasks_storage = SingleTaskListStorage() 54 | 55 | 56 | def get_ollama_embedding(text): 57 | text = text.replace("\n", " ") 58 | response = ollama.(model="mxbai-embed-large", prompt=text) 59 | return response["embedding"] 60 | 61 | 62 | def task_creation_agent( 63 | objective: str, result: Dict, task_description: str, task_list: List[str] 64 | ): 65 | prompt = f""" 66 | You are to use the result from an execution agent to create new tasks with the following objective: {objective}. 67 | The last completed task has the result: \n{result["data"]} 68 | This result was based on this task description: {task_description}.\n""" 69 | 70 | if task_list: 71 | prompt += f"These are incomplete tasks: {', '.join(task_list)}\n" 72 | prompt += "Based on the result, return a list of tasks to be completed in order to meet the objective. " 73 | if task_list: 74 | prompt += "These new tasks must not overlap with incomplete tasks. " 75 | 76 | prompt += """ 77 | Return one task per line in your response. The result must be a numbered list in the format: 78 | 79 | #. First task 80 | #. Second task 81 | 82 | The number of each entry must be followed by a period. If your list is empty, write "There are no tasks to add at this time." 83 | Unless your list is empty, do not include any headers before your numbered list or follow your numbered list with any other output.""" 84 | 85 | print(f"\n*****TASK CREATION AGENT PROMPT****\n{prompt}\n") 86 | response = ollama.generate( 87 | model="llama3", 88 | prompt=prompt, 89 | ) 90 | 91 | if isinstance(response, dict): 92 | if "response" in response: 93 | response_text = response["response"] 94 | print(f"\n****TASK CREATION AGENT RESPONSE****\n{response_text}\n") 95 | new_tasks = response_text.split("\n") 96 | new_tasks_list = [] 97 | for task_string in new_tasks: 98 | task_parts = task_string.strip().split(".", 1) 99 | if len(task_parts) == 2: 100 | task_id = "".join(s for s in task_parts[0] if s.isnumeric()) 101 | task_name = re.sub(r"[^\w\s_]+", "", task_parts[1]).strip() 102 | if task_name.strip() and task_id.isnumeric(): 103 | new_tasks_list.append(task_name) 104 | out = [{"task_name": task_name} for task_name in new_tasks_list] 105 | return out 106 | else: 107 | raise Exception("No 'response' found in the API response") 108 | else: 109 | raise Exception("Response is not a dictionary") 110 | 111 | 112 | def prioritization_agent(): 113 | task_names = tasks_storage.get_task_names() 114 | bullet_string = "\n" 115 | 116 | prompt = f""" 117 | You are tasked with prioritizing the following tasks: {bullet_string + bullet_string.join(task_names)} 118 | Consider the ultimate objective of your team: {OBJECTIVE}. 119 | Tasks should be sorted from highest to lowest priority, where higher-priority tasks are those that act as pre-requisites or are more essential for meeting the objective. 120 | Do not remove any tasks. Return the ranked tasks as a numbered list in the format: 121 | 122 | #. First task 123 | #. Second task 124 | 125 | The entries must be consecutively numbered, starting with 1. The number of each entry must be followed by a period. 126 | Do not include any headers before your ranked list or follow your list with any other output.""" 127 | 128 | print(f"\n****TASK PRIORITIZATION AGENT PROMPT****\n{prompt}\n") 129 | response = ollama.generate( 130 | model="llama3", 131 | prompt=prompt, 132 | ) 133 | 134 | if isinstance(response, dict): 135 | if "response" in response: 136 | response_text = response["response"] 137 | new_tasks = response_text.strip().split("\n") 138 | else: 139 | raise Exception(f"Unexpected response structure: {response}") 140 | else: 141 | raise Exception("Response is not a dictionary") 142 | print(f"\n****TASK PRIORITIZATION AGENT RESPONSE****\n{response}\n") 143 | if not response: 144 | print( 145 | "Received empty response from prioritization agent. Keeping task list unchanged." 146 | ) 147 | return 148 | new_tasks = response_text.split("\n") if "\n" in response_text else [response_text] 149 | new_tasks_list = [] 150 | for task_string in new_tasks: 151 | task_parts = task_string.strip().split(".", 1) 152 | if len(task_parts) == 2: 153 | task_id = "".join(s for s in task_parts[0] if s.isnumeric()) 154 | task_name = re.sub(r"[^\w\s_]+", "", task_parts[1]).strip() 155 | if task_name.strip(): 156 | new_tasks_list.append({"task_id": task_id, "task_name": task_name}) 157 | 158 | return new_tasks_list 159 | 160 | 161 | def execution_agent(db, objective: str, task: str) -> str: 162 | context = context_agent(db, query=objective, top_results_num=5) 163 | prompt = f"Perform one task based on the following objective: {objective}.\n" 164 | if context: 165 | prompt += "Take into account these previously completed tasks:" + "\n".join( 166 | context 167 | ) 168 | prompt += f"\nYour task: {task}\nResponse:" 169 | response = ollama.generate( 170 | model="llama3", 171 | prompt=f"You are an AI who performs one task based on the following objective: {objective}. Your task: {task}\nResponse:", 172 | stream=False, 173 | ) 174 | 175 | if isinstance(response, dict): 176 | if "response" in response: 177 | response_text = response["response"] 178 | new_tasks = response_text.strip().split("\n") 179 | return [{"task_name": task_name} for task_name in new_tasks] 180 | else: 181 | raise Exception(f"Unexpected response structure: {response}") 182 | else: 183 | raise Exception("Response is not a dictionary") 184 | 185 | 186 | def context_agent(db, query: str, top_results_num: int): 187 | query_embedding = get_ollama_embedding(query) 188 | results = db.vector_search( 189 | query_vector=query_embedding, number_of_results=top_results_num 190 | ) 191 | results = json.loads(results) 192 | print(f"\n\nContext search results:\n{results}\n\n") 193 | return [row[1].strip('"') for row in results.get("rows", [])] 194 | 195 | 196 | def store_results(db, task: Dict, result: str, result_id: str): 197 | vector = get_ollama_embedding(result) 198 | db.insert_with_vector( 199 | relation_name="tasks", 200 | task_id=result_id, 201 | text=result, 202 | embeddings=vector, 203 | metadata={"task": task["task_name"], "result": result}, 204 | ) 205 | 206 | 207 | def objective_agent(to, from_email, subject, timestamp, body, attachments): 208 | prompt = f""" 209 | You are an AI assistant that processes emails. You have received an email with the following details: 210 | To: {to}, From: {from_email}, Subject: {subject}, Timestamp: {timestamp}, Body: {body}, Attachments: {attachments}. 211 | Based on this information, determine if the email contains any tasks for the recipient, if any, and return it as a string. 212 | If you don't believe there are any tasks, return the string, "No tasks found." Do not include quotes. RETURN ONLY THIS STRING AND DO NOT INCLUDE ANY OTHER OUTPUT. 213 | """ 214 | 215 | print(prompt) 216 | response = ollama.generate( 217 | model="llama3", 218 | prompt=prompt, 219 | ) 220 | 221 | # Print the full response for debugging 222 | print(f"Full response: {response}") 223 | 224 | if isinstance(response, dict): 225 | if "response" in response: 226 | response_text = response["response"].strip() 227 | if response_text == "No tasks found." or not response_text: 228 | return {"tasks_found": False, "tasks": []} 229 | else: 230 | new_tasks = response_text.split("\n") 231 | return { 232 | "tasks_found": True, 233 | "tasks": [{"task_name": task_name} for task_name in new_tasks], 234 | } 235 | else: 236 | raise Exception(f"Unexpected response structure: {response}") 237 | else: 238 | raise Exception("Response is not a dictionary") 239 | 240 | 241 | def main(): 242 | # Load environment variables from .env file 243 | load_dotenv() 244 | 245 | # Extract email information 246 | email = email_data[0] 247 | to = email["To"] 248 | from_email = email["From"] 249 | subject = email["Subject"] 250 | timestamp = email["Timestamp"] 251 | body = email["Body"] 252 | attachments = "" # Assuming no attachments for simplicity 253 | 254 | # Determine the objective dynamically 255 | objective_response = objective_agent( 256 | to, from_email, subject, timestamp, body, attachments 257 | ) 258 | 259 | if not objective_response["tasks_found"]: 260 | print("No tasks identified in the email. Exiting.") 261 | return 262 | 263 | # Set the first task as the objective 264 | OBJECTIVE = objective_response["tasks"][0]["task_name"] 265 | print("\033[96m\033[1m" + "\n*****OBJECTIVE*****\n" + "\033[0m\033[0m") 266 | print(OBJECTIVE) 267 | 268 | # Initialize NexusDB 269 | db = NexusDB() 270 | 271 | JOIN_EXISTING_OBJECTIVE = False 272 | 273 | # Add the initial task if starting new objective 274 | if not JOIN_EXISTING_OBJECTIVE: 275 | initial_task = { 276 | "task_id": tasks_storage.next_task_id(), 277 | "task_name": "Develop a task list.", 278 | } 279 | tasks_storage.append(initial_task) 280 | 281 | # Main loop 282 | loop = True 283 | while loop: 284 | if not tasks_storage.is_empty(): 285 | print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m") 286 | for t in tasks_storage.get_task_names(): 287 | print(" • " + str(t)) 288 | 289 | task = tasks_storage.popleft() 290 | print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m") 291 | print(str(task["task_name"])) 292 | 293 | result = execution_agent(db, OBJECTIVE, str(task["task_name"])) 294 | print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m") 295 | print(result) 296 | 297 | enriched_result = {"data": result} 298 | result_id = f"result_{task['task_id']}" 299 | 300 | store_results(db, task, result, result_id) 301 | 302 | new_tasks = task_creation_agent( 303 | OBJECTIVE, 304 | enriched_result, 305 | task["task_name"], 306 | tasks_storage.get_task_names(), 307 | ) 308 | 309 | print("Adding new tasks to task_storage") 310 | for new_task in new_tasks: 311 | new_task.update({"task_id": tasks_storage.next_task_id()}) 312 | print(str(new_task)) 313 | tasks_storage.append(new_task) 314 | 315 | if not JOIN_EXISTING_OBJECTIVE: 316 | prioritized_tasks = prioritization_agent() 317 | if prioritized_tasks: 318 | tasks_storage.replace(prioritized_tasks) 319 | 320 | time.sleep(5) 321 | else: 322 | print("Done.") 323 | loop = False 324 | 325 | 326 | if __name__ == "__main__": 327 | main() 328 | -------------------------------------------------------------------------------- /tests/colorlogs.py: -------------------------------------------------------------------------------- 1 | import colorlog 2 | from colorlog import ColoredFormatter 3 | 4 | # Define a mapping from thread names to colors 5 | THREAD_COLOR_MAPPING = { 6 | "EmailProcessor": "red", 7 | "email_fetcher": "green", 8 | "EntityExtraction": "blue", 9 | # Add other thread name to color mappings here 10 | } 11 | 12 | 13 | class ThreadNameColoredFormatter(ColoredFormatter): 14 | def __init__(self, *args, **kwargs): 15 | super().__init__(*args, **kwargs) 16 | # Set the default log colors based on log levels 17 | self.default_log_colors = { 18 | "DEBUG": "cyan", 19 | "INFO": "white", # Default color for INFO, will be overridden by thread color if available 20 | "WARNING": "yellow", 21 | "ERROR": "red", 22 | "CRITICAL": "red,bg_white", 23 | } 24 | 25 | def format(self, record): 26 | # Set the log color based on log level 27 | self.log_colors = self.default_log_colors.copy() 28 | 29 | # Override the log color for INFO based on thread name if applicable 30 | if record.levelname == "INFO": 31 | thread_name = ( 32 | record.threadName.split("-")[0] if record.threadName else "Thread" 33 | ) # Use the base name for mapping 34 | thread_log_color = THREAD_COLOR_MAPPING.get(thread_name, "white") 35 | self.log_colors["INFO"] = thread_log_color 36 | 37 | return super().format(record) 38 | 39 | 40 | # Configure the formatter 41 | formatter = ThreadNameColoredFormatter( 42 | "%(log_color)s%(asctime)s - %(name)s - %(levelname)s - [%(threadName)s] - %(message)s", 43 | datefmt="%Y-%m-%d %H:%M:%S", 44 | ) 45 | 46 | # Usage example in the main application setup 47 | if __name__ == "__main__": 48 | import logging 49 | import threading 50 | 51 | # Set up the handler and logger 52 | handler = logging.StreamHandler() 53 | handler.setFormatter(formatter) 54 | 55 | logger = logging.getLogger() 56 | logger.addHandler(handler) 57 | logger.setLevel(logging.DEBUG) 58 | 59 | # Define a test function to generate logs in different threads 60 | def log_messages(logger): 61 | logger.debug("This is a DEBUG message") 62 | logger.info("This is an INFO message") 63 | logger.warning("This is a WARNING message") 64 | logger.error("This is an ERROR message") 65 | logger.critical("This is a CRITICAL message") 66 | 67 | # Create and start threads 68 | threads = [] 69 | for thread_name in ["EmailProcessor", "email_fetcher", "EntityExtraction"]: 70 | thread_logger = logging.getLogger(thread_name) 71 | thread = threading.Thread( 72 | name=thread_name, target=log_messages, args=(thread_logger,) 73 | ) 74 | threads.append(thread) 75 | thread.start() 76 | 77 | # Log from the main thread 78 | log_messages(logger) 79 | 80 | # Wait for all threads to complete 81 | for thread in threads: 82 | thread.join() 83 | -------------------------------------------------------------------------------- /tests/embedding.py: -------------------------------------------------------------------------------- 1 | import ollama 2 | 3 | 4 | def get_embedding_length(): 5 | model = "mxbai-embed-large" 6 | prompt = "NexusDB is the best database." 7 | 8 | response = ollama.(model, prompt) 9 | print(f"Embedding length: {len(response['embedding'])}") 10 | 11 | 12 | def chat(): 13 | print("Chatting with the Llama AI.../n/n") 14 | model = "llama3" 15 | prompt = "Please say 'Hello' and nothing else." 16 | 17 | response = ollama.generate(model, prompt, stream=False) 18 | print(response) 19 | 20 | 21 | # Call the function 22 | chat() 23 | # get_embedding_length() 24 | -------------------------------------------------------------------------------- /tests/entity_add.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import List 3 | 4 | import ollama 5 | from ollama import Message 6 | 7 | 8 | def conditional_entity_addition(data): 9 | # Retrieve the entity type from the data 10 | entity_type = data.get("entity_type", None) 11 | # If no entity type is provided, return an error 12 | if not entity_type: 13 | return {"error": "Entity type is required."}, 400 14 | 15 | # Adjusted to access nested 'data' 16 | entity_data = data.get("data", {}) 17 | 18 | search_results = [ 19 | {"id": "1", "name": "John Smith"}, 20 | {"id": "2", "name": "Jane Doe"}, 21 | ] 22 | 23 | # Combine all search results 24 | combined_results = {result["id"]: result for result in search_results}.values() 25 | print(f"Combined results: {list(combined_results)}") 26 | combined_results_str = ", ".join(json.dumps(result) for result in combined_results) 27 | 28 | # Prepare the message for OpenAI API 29 | prompt: List[Message] = [ 30 | Message( 31 | role="system", 32 | content="You are a helpful assistant who's specialty is to decide if new input data matches data already in our database. Review the search results provided, compare against the input data, and if there's a match respond with the ID number of the match, and only the ID number. If there are no matches, respond with 'No Matches'. Your response is ALWAYS an ID number alone, or 'No Matches'. When reviewing whether a match existings in our search results to our new input, take into account that the name may not match perfectly (for example, one might have just a first name, or a nick name, while the other has a full name), in which case look at the additional information about the user to determine if there's a strong likelihood they are the same person. For companies, you should consider different names of the same company as the same, such as EA and Electronic Arts (make your best guess). If the likelihood is strong, respond with and only with the ID number. If likelihood is low, respond with 'No Matches'.", 33 | ), 34 | Message( 35 | role="user", 36 | content=f"Here are the search results: {combined_results_str}. Does any entry match the input data: {data}?", 37 | ), 38 | ] 39 | 40 | # Make a call to OpenAI API 41 | try: 42 | response = ollama.chat( 43 | model="llama3", 44 | messages=prompt, 45 | stream=True, 46 | ) 47 | 48 | if isinstance(response, dict) and "response" in response: 49 | response_text = response["response"] 50 | return response_text 51 | else: 52 | ai_response = "" 53 | for chunk in response: 54 | if isinstance(chunk, dict): 55 | if ( 56 | "message" in chunk 57 | and isinstance(chunk["message"], dict) 58 | and "content" in chunk["message"] 59 | ): 60 | print(chunk["message"]["content"], end="", flush=True) 61 | ai_response += chunk["message"]["content"] 62 | else: 63 | raise Exception("Invalid chunk structure") 64 | print(f"AI response: {ai_response}") 65 | 66 | # Process the AI's response 67 | if "no matches" in ai_response.lower(): 68 | # If no match found, add the new entity 69 | # entity_id = add_entity(entity_type, data) 70 | print("adding entity\n\n") 71 | entity_id = "123" 72 | return {"success": True, "entity_id": entity_id}, 200 73 | else: 74 | # If a match is found, return the match details 75 | match_id = ai_response 76 | return { 77 | "success": False, 78 | "message": "Match found", 79 | "match_id": match_id, 80 | }, 200 81 | 82 | except Exception as e: 83 | print(f"Error calling OpenAI: {e}") 84 | return {"error": str(e)}, 500 85 | 86 | 87 | # Mocking the input data 88 | input_data = {"entity_type": "person", "data": {"name": "John Doe", "age": 30}} 89 | 90 | # Call the function and print the result 91 | result, status_code = conditional_entity_addition(input_data) 92 | print(result, status_code) 93 | -------------------------------------------------------------------------------- /tests/graph_agent.py: -------------------------------------------------------------------------------- 1 | import ollama 2 | 3 | 4 | def entity_extraction_agent(text_input): 5 | prompt = [ 6 | { 7 | "role": "system", 8 | "content": """You are an AI expert specializing in knowledge graph creation with the goal of capturing relationships based on a given input or request. 9 | You are given input in various forms such as paragraph, email, text files, and more. 10 | Your task is to create a knowledge graph based on the input. 11 | Only use organizations, people, and events as nodes and do not include concepts or products. 12 | Only add nodes that have a relationship with at least one other node. 13 | Make sure that the node type (people, org, event) matches the to_type or for_type when the entity is part of a relationship. 14 | Return the knowledge graph as a JSON object. DO NOT INCLUDE ANYTHING ELSE IN THE RESPONSE.""", 15 | }, 16 | { 17 | "role": "user", 18 | "content": "Can you please help John Smith from IT get access to the system? He needs it as part of the IT Modernization effort.", 19 | }, 20 | { 21 | "role": "assistant", 22 | "content": '{"entities": [{"name": "Modernization of the IT infrastructure", "type": "Project", "description": "A project to modernize the IT infrastructure of the company.", "department": "IT",},{"name": "Person A", "type": "Person", "memberOf": "IT",},{"name": "IT", "type": "Organization", "description": "The IT department of the company.", "member": "Person A",},]}', 23 | }, 24 | {"role": "user", "content": text_input}, 25 | ] 26 | 27 | response = ollama.chat( 28 | model="llama3", 29 | messages=prompt, 30 | stream=True, 31 | ) 32 | 33 | if isinstance(response, dict) and "response" in response: 34 | response_text = response["response"] 35 | return response_text 36 | else: 37 | try: 38 | for chunk in response: 39 | if isinstance(chunk, dict): 40 | if ( 41 | "message" in chunk 42 | and isinstance(chunk["message"], dict) 43 | and "content" in chunk["message"] 44 | ): 45 | print(chunk["message"]["content"], end="", flush=True) 46 | else: 47 | raise Exception("Invalid chunk structure") 48 | return "done" 49 | except Exception as e: 50 | raise Exception(f"No 'response' found in the API response: {e}") 51 | 52 | 53 | response = entity_extraction_agent( 54 | "Adam from team A will be able to help answer any questions." 55 | ) 56 | print(response) 57 | -------------------------------------------------------------------------------- /tests/ollama_raw.py: -------------------------------------------------------------------------------- 1 | import ollama 2 | 3 | 4 | def entity_extraction_agent(text_input): 5 | response = ollama.generate( 6 | model="llama3", 7 | prompt=text_input, 8 | # raw=True, 9 | stream=True, 10 | ) 11 | 12 | if isinstance(response, dict) and "response" in response: 13 | response_text = response["response"] 14 | return response_text 15 | else: 16 | try: 17 | for chunk in response: 18 | if isinstance(chunk, dict): 19 | print(chunk["response"], end="", flush=True) 20 | return "done" 21 | except Exception as e: 22 | raise Exception(f"No 'response' found in the API response: {e}") 23 | 24 | 25 | response = entity_extraction_agent(".") 26 | print(response) 27 | -------------------------------------------------------------------------------- /tests/ollama_streaming.py: -------------------------------------------------------------------------------- 1 | import threading 2 | from typing import List 3 | 4 | from ollama import Message 5 | 6 | from utils.ollama import ollama_chat 7 | 8 | # Dummy Messages for Testing 9 | messages: List[Message] = [ 10 | Message(role="user", content="Hello, how are you?"), 11 | Message(role="user", content="What is the weather like today?"), 12 | Message(role="user", content="Tell me a joke."), 13 | Message(role="user", content="What's the capital of France?"), 14 | Message(role="user", content="What's the latest news?"), 15 | ] 16 | 17 | # Dummy model name 18 | model = "llama3" 19 | 20 | # Define the number of threads 21 | num_threads = len(messages) 22 | 23 | 24 | # Test function to run in threads 25 | def test_ollama_chat(thread_id: int, message: Message): 26 | print(f"Thread-{thread_id} starting with message: {message}") 27 | response = ollama_chat(model=model, messages=[message], stream=True) 28 | print(f"\nThread-{thread_id} received response:\n{response}\n") 29 | 30 | 31 | # Create and start threads 32 | threads = [] 33 | for i in range(num_threads): 34 | thread = threading.Thread(target=test_ollama_chat, args=(i, messages[i])) 35 | threads.append(thread) 36 | thread.start() 37 | 38 | # Wait for all threads to complete 39 | for thread in threads: 40 | thread.join() 41 | 42 | print("All threads completed.") 43 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Astra-Analytics/Task-Agent-Starter-Kit/011cccfd852920748f66d7e6dd80ca2aaf974f70/utils/__init__.py -------------------------------------------------------------------------------- /utils/custom_log_formatter.py: -------------------------------------------------------------------------------- 1 | from colorlog import ColoredFormatter 2 | 3 | # Define a mapping from thread names to colors 4 | THREAD_COLOR_MAPPING = { 5 | "EmailProcessor": "cyan", 6 | "email_fetcher": "purple", 7 | "EntityExtraction": "blue", 8 | # Add other thread name to color mappings here 9 | } 10 | 11 | 12 | class ThreadNameColoredFormatter(ColoredFormatter): 13 | def __init__(self, *args, **kwargs): 14 | super().__init__(*args, **kwargs) 15 | # Set the default log colors based on log levels 16 | self.default_log_colors = { 17 | "DEBUG": "light_white", 18 | "INFO": "white", # Default color for INFO, will be overridden by thread color if available 19 | "WARNING": "yellow", 20 | "ERROR": "red", 21 | "CRITICAL": "red,bg_white", 22 | } 23 | 24 | def format(self, record): 25 | # Set the log color based on log level 26 | self.log_colors = self.default_log_colors.copy() 27 | 28 | # Override the log color for INFO based on thread name if applicable 29 | if record.levelname == "INFO": 30 | thread_name = ( 31 | record.threadName.split("-")[0] if record.threadName else "Thread" 32 | ) # Use the base name for mapping 33 | thread_log_color = THREAD_COLOR_MAPPING.get(thread_name, "white") 34 | self.log_colors["INFO"] = thread_log_color 35 | 36 | return super().format(record) 37 | -------------------------------------------------------------------------------- /utils/ollama.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import threading 3 | from typing import Any, Dict, Iterator, List, Mapping, Union 4 | 5 | import ollama 6 | from ollama import Message 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | # Initialize a lock 11 | print_lock = threading.Lock() 12 | 13 | 14 | def get_ollama_embedding(text): 15 | text = text.replace("\n", " ") 16 | response = ollama.embeddings(model="mxbai-embed-large", prompt=text) 17 | return response["embedding"] 18 | 19 | 20 | def handle_response( 21 | response: Union[Dict[str, Any], Iterator[Mapping[str, Any]]], stream: bool = False 22 | ) -> str: 23 | if isinstance(response, dict) and "response" in response: 24 | return response["response"].strip() 25 | elif stream: 26 | ai_response = "" 27 | try: 28 | with print_lock: # Acquire the lock 29 | for chunk in response: 30 | if isinstance(chunk, Mapping) and "message" in chunk: 31 | message = chunk["message"] 32 | if isinstance(message, Mapping) and "content" in message: 33 | print(message["content"], end="", flush=True) 34 | ai_response += message["content"] 35 | elif isinstance(message, str): 36 | print(message, end="", flush=True) 37 | ai_response += message 38 | else: 39 | raise Exception("Invalid chunk structure") 40 | elif isinstance(chunk, Mapping) and "response" in chunk: 41 | print(chunk["response"], end="", flush=True) 42 | ai_response += chunk["response"] 43 | else: 44 | raise Exception("Invalid chunk structure") 45 | return ai_response 46 | except Exception as e: 47 | raise Exception(f"No 'response' found in the API response: {e}") 48 | else: 49 | raise Exception(f"Unexpected response structure: {response}") 50 | 51 | 52 | def ollama_generate(model: str, prompt: str, stream: bool = False) -> str: 53 | response = ollama.generate(model=model, prompt=prompt, stream=stream) 54 | if isinstance(response, (dict, Iterator)): 55 | return handle_response(response, stream=stream) 56 | else: 57 | raise TypeError("Invalid response type") 58 | 59 | 60 | def ollama_chat(model: str, messages: List[Message], stream: bool = False) -> str: 61 | response = ollama.chat(model=model, messages=messages, stream=stream) 62 | if isinstance(response, (dict, Iterator)): 63 | return handle_response(response, stream=stream) 64 | else: 65 | raise TypeError("Invalid response type") 66 | --------------------------------------------------------------------------------