├── .flake8 ├── .github ├── CODEOWNERS ├── dependabot.yml └── workflows │ ├── black.yml │ └── flake8.yml ├── .gitignore ├── LICENSE ├── README.md ├── ai ├── ai_constants.py └── providers │ ├── __init__.py │ ├── anthropic.py │ ├── base_provider.py │ ├── openai.py │ └── vertexai.py ├── app.py ├── app_oauth.py ├── data └── .gitignore ├── listeners ├── __init__.py ├── actions │ ├── __init__.py │ └── set_user_selection.py ├── commands │ ├── __init__.py │ └── ask_command.py ├── events │ ├── __init__.py │ ├── app_home_opened.py │ ├── app_mentioned.py │ └── app_messaged.py ├── functions │ ├── __init__.py │ └── summary_function.py └── listener_utils │ ├── listener_constants.py │ └── parse_conversation.py ├── manifest.json ├── pyproject.toml ├── requirements.txt ├── slack.json ├── state_store ├── __init__.py ├── file_state_store.py ├── get_user_state.py ├── set_user_state.py ├── user_identity.py └── user_state_store.py └── tests └── __init__.py /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 125 3 | exclude = .gitignore,venv 4 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Code owners are the default owners for everything in 2 | # this repository. The owners listed below will be requested for 3 | # review when a pull request is opened. 4 | # To add code owner(s), uncomment the line below and 5 | # replace the @global-owner users with their GitHub username(s). 6 | # * @global-owner1 @global-owner2 7 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "pip" 4 | directory: "/" 5 | schedule: 6 | interval: "monthly" 7 | labels: 8 | - "pip" 9 | - "dependencies" 10 | - package-ecosystem: "github-actions" 11 | directory: "/" 12 | schedule: 13 | interval: "monthly" 14 | -------------------------------------------------------------------------------- /.github/workflows/black.yml: -------------------------------------------------------------------------------- 1 | name: Formatting validation using black 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | timeout-minutes: 5 12 | strategy: 13 | matrix: 14 | python-version: ["3.13"] 15 | 16 | steps: 17 | - uses: actions/checkout@v4 18 | - name: Set up Python ${{ matrix.python-version }} 19 | uses: actions/setup-python@v5 20 | with: 21 | python-version: ${{ matrix.python-version }} 22 | - name: Install dependencies 23 | run: | 24 | pip install -U pip 25 | pip install -r requirements.txt 26 | - name: Format with black 27 | run: | 28 | black . 29 | if git status --porcelain | grep .; then git --no-pager diff; exit 1; fi 30 | -------------------------------------------------------------------------------- /.github/workflows/flake8.yml: -------------------------------------------------------------------------------- 1 | name: Linting validation using flake8 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | timeout-minutes: 5 12 | strategy: 13 | matrix: 14 | python-version: ["3.13"] 15 | 16 | steps: 17 | - uses: actions/checkout@v4 18 | - name: Set up Python ${{ matrix.python-version }} 19 | uses: actions/setup-python@v5 20 | with: 21 | python-version: ${{ matrix.python-version }} 22 | - name: Install dependencies 23 | run: | 24 | pip install -U pip 25 | pip install -r requirements.txt 26 | - name: Lint with flake8 27 | run: | 28 | flake8 *.py && flake8 listeners/ 29 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # general things to ignore 2 | build/ 3 | dist/ 4 | docs/_sources/ 5 | docs/.doctrees 6 | .eggs/ 7 | *.egg-info/ 8 | *.egg 9 | *.py[cod] 10 | __pycache__/ 11 | *.so 12 | *~ 13 | 14 | # virtualenv 15 | env*/ 16 | venv/ 17 | .venv* 18 | .env* 19 | 20 | # codecov / coverage 21 | .coverage 22 | cov_* 23 | coverage.xml 24 | 25 | # due to using tox and pytest 26 | .tox 27 | .cache 28 | .pytest_cache/ 29 | .python-version 30 | pip 31 | .mypy_cache/ 32 | 33 | # misc 34 | tmp.txt 35 | .DS_Store 36 | logs/ 37 | *.db 38 | .pytype/ 39 | data/* 40 | .slack/apps.dev.json 41 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Slack Technologies, LLC 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Slack AI Chatbot 2 | 3 | This Slack chatbot app template offers a customizable solution for integrating AI-powered conversations into your Slack workspace. Here's what the app can do out of the box: 4 | 5 | * Interact with the bot by mentioning it in conversations and threads 6 | * Send direct messages to the bot for private interactions 7 | * Use the `/ask-bolty` command to communicate with the bot in channels where it hasn't been added 8 | * Utilize a custom function for integration with Workflow Builder to summarize messages in conversations 9 | * Select your preferred API/model from the app home to customize the bot's responses 10 | * Bring Your Own Language Model [BYO LLM](#byo-llm) for customization 11 | * Custom FileStateStore creates a file in /data per user to store API/model preferences 12 | 13 | Inspired by [ChatGPT-in-Slack](https://github.com/seratch/ChatGPT-in-Slack/tree/main) 14 | 15 | Before getting started, make sure you have a development workspace where you have permissions to install apps. If you don’t have one setup, go ahead and [create one](https://slack.com/create). 16 | ## Installation 17 | 18 | #### Prerequisites 19 | * To use the OpenAI and Anthropic models, you must have an account with sufficient credits. 20 | * To use the Vertex models, you must have [a Google Cloud Provider project](https://cloud.google.com/vertex-ai/generative-ai/docs/start/quickstarts/quickstart-multimodal#expandable-1) with sufficient credits. 21 | 22 | #### Create a Slack App 23 | 1. Open [https://api.slack.com/apps/new](https://api.slack.com/apps/new) and choose "From an app manifest" 24 | 2. Choose the workspace you want to install the application to 25 | 3. Copy the contents of [manifest.json](./manifest.json) into the text box that says `*Paste your manifest code here*` (within the JSON tab) and click *Next* 26 | 4. Review the configuration and click *Create* 27 | 5. Click *Install to Workspace* and *Allow* on the screen that follows. You'll then be redirected to the App Configuration dashboard. 28 | 29 | #### Environment Variables 30 | Before you can run the app, you'll need to store some environment variables. 31 | 32 | 1. Open your apps configuration page from this list, click **OAuth & Permissions** in the left hand menu, then copy the Bot User OAuth Token. You will store this in your environment as `SLACK_BOT_TOKEN`. 33 | 2. Click **Basic Information** from the left hand menu and follow the steps in the App-Level Tokens section to create an app-level token with the `connections:write` scope. Copy this token. You will store this in your environment as `SLACK_APP_TOKEN`. 34 | 35 | Next, set the gathered tokens as environment variables using the following commands: 36 | 37 | ```zsh 38 | # MacOS/Linux 39 | export SLACK_BOT_TOKEN= 40 | export SLACK_APP_TOKEN= 41 | ``` 42 | 43 | ```pwsh 44 | # Windows 45 | set SLACK_BOT_TOKEN= 46 | set SLACK_APP_TOKEN= 47 | ``` 48 | 49 | Different models from different AI providers are available if the corresponding environment variable is added, as shown in the sections below. 50 | 51 | ##### Anthropic Setup 52 | 53 | To interact with Anthropic models, navigate to your Anthropic account dashboard to [create an API key](https://console.anthropic.com/settings/keys), then export the key as follows: 54 | 55 | ```zsh 56 | export ANTHROPIC_API_KEY= 57 | ``` 58 | 59 | ##### Google Cloud Vertex AI Setup 60 | 61 | To use Google Cloud Vertex AI, [follow this quick start](https://cloud.google.com/vertex-ai/generative-ai/docs/start/quickstarts/quickstart-multimodal#expandable-1) to create a project for sending requests to the Gemini API, then gather [Application Default Credentials](https://cloud.google.com/docs/authentication/provide-credentials-adc) with the strategy to match your development environment. 62 | 63 | Once your project and credentials are configured, export environment variables to select from Gemini models: 64 | 65 | ```zsh 66 | export VERTEX_AI_PROJECT_ID= 67 | export VERTEX_AI_LOCATION= 68 | ``` 69 | 70 | The project location can be located under the **Region** on the [Vertex AI](https://console.cloud.google.com/vertex-ai) dashboard, as well as more details about available Gemini models. 71 | 72 | ##### OpenAI Setup 73 | 74 | Unlock the OpenAI models from your OpenAI account dashboard by clicking [create a new secret key](https://platform.openai.com/api-keys), then export the key like so: 75 | 76 | ```zsh 77 | export OPENAI_API_KEY= 78 | ``` 79 | 80 | ### Setup Your Local Project 81 | ```zsh 82 | # Clone this project onto your machine 83 | git clone https://github.com/slack-samples/bolt-python-ai-chatbot.git 84 | 85 | # Change into this project directory 86 | cd bolt-python-ai-chatbot 87 | 88 | # Setup your python virtual environment 89 | python3 -m venv .venv 90 | source .venv/bin/activate 91 | 92 | # Install the dependencies 93 | pip install -r requirements.txt 94 | 95 | # Start your local server 96 | python3 app.py 97 | ``` 98 | 99 | #### Linting 100 | ```zsh 101 | # Run flake8 from root directory for linting 102 | flake8 *.py && flake8 listeners/ 103 | 104 | # Run black from root directory for code formatting 105 | black . 106 | ``` 107 | 108 | ## Project Structure 109 | 110 | ### `manifest.json` 111 | 112 | `manifest.json` is a configuration for Slack apps. With a manifest, you can create an app with a pre-defined configuration, or adjust the configuration of an existing app. 113 | 114 | 115 | ### `app.py` 116 | 117 | `app.py` is the entry point for the application and is the file you'll run to start the server. This project aims to keep this file as thin as possible, primarily using it as a way to route inbound requests. 118 | 119 | 120 | ### `/listeners` 121 | 122 | Every incoming request is routed to a "listener". Inside this directory, we group each listener based on the Slack Platform feature used, so `/listeners/commands` handles incoming [Slash Commands](https://api.slack.com/interactivity/slash-commands) requests, `/listeners/events` handles [Events](https://api.slack.com/apis/events-api) and so on. 123 | 124 | ### `/ai` 125 | 126 | * `ai_constants.py`: Defines constants used throughout the AI module. 127 | 128 | 129 | #### `ai/providers` 130 | This module contains classes for communicating with different API providers, such as [Anthropic](https://www.anthropic.com/), [OpenAI](https://openai.com/), and [Vertex AI](cloud.google.com/vertex-ai). To add your own LLM, create a new class for it using the `base_api.py` as an example, then update `ai/providers/__init__.py` to include and utilize your new class for API communication. 131 | 132 | * `__init__.py`: 133 | This file contains utility functions for handling responses from the provider APIs and retrieving available providers. 134 | 135 | ### `/state_store` 136 | 137 | * `user_identity.py`: This file defines the UserIdentity class for creating user objects. Each object represents a user with the user_id, provider, and model attributes. 138 | 139 | * `user_state_store.py`: This file defines the base class for FileStateStore. 140 | 141 | * `file_state_store.py`: This file defines the FileStateStore class which handles the logic for creating and managing files for each user. 142 | 143 | * `set_user_state.py`: This file creates a user object and uses a FileStateStore to save the user's selected provider to a JSON file. 144 | 145 | * `get_user_state.py`: This file retrieves a users selected provider from the JSON file created with `set_user_state.py`. 146 | 147 | ## App Distribution / OAuth 148 | 149 | Only implement OAuth if you plan to distribute your application across multiple workspaces. A separate `app_oauth.py` file can be found with relevant OAuth settings. 150 | 151 | When using OAuth, Slack requires a public URL where it can send requests. In this template app, we've used [`ngrok`](https://ngrok.com/download). Checkout [this guide](https://ngrok.com/docs#getting-started-expose) for setting it up. 152 | 153 | Start `ngrok` to access the app on an external network and create a redirect URL for OAuth. 154 | 155 | ``` 156 | ngrok http 3000 157 | ``` 158 | 159 | This output should include a forwarding address for `http` and `https` (we'll use `https`). It should look something like the following: 160 | 161 | ``` 162 | Forwarding https://3cb89939.ngrok.io -> http://localhost:3000 163 | ``` 164 | 165 | Navigate to **OAuth & Permissions** in your app configuration and click **Add a Redirect URL**. The redirect URL should be set to your `ngrok` forwarding address with the `slack/oauth_redirect` path appended. For example: 166 | 167 | ``` 168 | https://3cb89939.ngrok.io/slack/oauth_redirect 169 | ``` 170 | -------------------------------------------------------------------------------- /ai/ai_constants.py: -------------------------------------------------------------------------------- 1 | # This file defines constant strings used as system messages for configuring the behavior of the AI assistant. 2 | # Used in `handle_response.py` and `dm_sent.py` 3 | 4 | DEFAULT_SYSTEM_CONTENT = """ 5 | You are a versatile AI assistant. 6 | Help users with writing, codiing, task management, advice, project management, and any other needs. 7 | Provide concise, relevant assistance tailored to each request. 8 | Note that context is sent in order of the most recent message last. 9 | Do not respond to messages in the context, as they have already been answered. 10 | Be professional and friendly. 11 | Don't ask for clarification unless absolutely necessary. 12 | Don't ask questions in your response. 13 | Don't use user names in your response. 14 | """ 15 | DM_SYSTEM_CONTENT = """ 16 | This is a private DM between you and user. 17 | You are the user's helpful AI assistant. 18 | """ 19 | -------------------------------------------------------------------------------- /ai/providers/__init__.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional 2 | 3 | from state_store.get_user_state import get_user_state 4 | 5 | from ..ai_constants import DEFAULT_SYSTEM_CONTENT 6 | from .anthropic import AnthropicAPI 7 | from .openai import OpenAI_API 8 | from .vertexai import VertexAPI 9 | 10 | """ 11 | New AI providers must be added below. 12 | `get_available_providers()` 13 | This function retrieves available API models from different AI providers. 14 | It combines the available models into a single dictionary. 15 | `_get_provider()` 16 | This function returns an instance of the appropriate API provider based on the given provider name. 17 | `get_provider_response`() 18 | This function retrieves the user's selected API provider and model, 19 | sets the model, and generates a response. 20 | Note that context is an optional parameter because some functionalities, 21 | such as commands, do not allow access to conversation history if the bot 22 | isn't in the channel where the command is run. 23 | """ 24 | 25 | 26 | def get_available_providers(): 27 | return { 28 | **AnthropicAPI().get_models(), 29 | **OpenAI_API().get_models(), 30 | **VertexAPI().get_models(), 31 | } 32 | 33 | 34 | def _get_provider(provider_name: str): 35 | if provider_name.lower() == "anthropic": 36 | return AnthropicAPI() 37 | elif provider_name.lower() == "openai": 38 | return OpenAI_API() 39 | elif provider_name.lower() == "vertexai": 40 | return VertexAPI() 41 | else: 42 | raise ValueError(f"Unknown provider: {provider_name}") 43 | 44 | 45 | def get_provider_response(user_id: str, prompt: str, context: Optional[List] = [], system_content=DEFAULT_SYSTEM_CONTENT): 46 | formatted_context = "\n".join([f"{msg['user']}: {msg['text']}" for msg in context]) 47 | full_prompt = f"Prompt: {prompt}\nContext: {formatted_context}" 48 | try: 49 | provider_name, model_name = get_user_state(user_id, False) 50 | provider = _get_provider(provider_name) 51 | provider.set_model(model_name) 52 | response = provider.generate_response(full_prompt, system_content) 53 | return response 54 | except Exception as e: 55 | raise e 56 | -------------------------------------------------------------------------------- /ai/providers/anthropic.py: -------------------------------------------------------------------------------- 1 | from .base_provider import BaseAPIProvider 2 | import anthropic 3 | import os 4 | import logging 5 | 6 | logging.basicConfig(level=logging.ERROR) 7 | logger = logging.getLogger(__name__) 8 | 9 | 10 | class AnthropicAPI(BaseAPIProvider): 11 | MODELS = { 12 | "claude-3-5-sonnet-20240620": { 13 | "name": "Claude 3.5 Sonnet", 14 | "provider": "Anthropic", 15 | "max_tokens": 4096, # or 8192 with the header anthropic-beta: max-tokens-3-5-sonnet-2024-07-15 16 | }, 17 | "claude-3-sonnet-20240229": {"name": "Claude 3 Sonnet", "provider": "Anthropic", "max_tokens": 4096}, 18 | "claude-3-haiku-20240307": {"name": "Claude 3 Haiku", "provider": "Anthropic", "max_tokens": 4096}, 19 | "claude-3-opus-20240229": {"name": "Claude 3 Opus", "provider": "Anthropic", "max_tokens": 4096}, 20 | } 21 | 22 | def __init__(self): 23 | self.api_key = os.environ.get("ANTHROPIC_API_KEY") 24 | 25 | def set_model(self, model_name: str): 26 | if model_name not in self.MODELS.keys(): 27 | raise ValueError("Invalid model") 28 | self.current_model = model_name 29 | 30 | def get_models(self) -> dict: 31 | if self.api_key is not None: 32 | return self.MODELS 33 | else: 34 | return {} 35 | 36 | def generate_response(self, prompt: str, system_content: str) -> str: 37 | try: 38 | self.client = anthropic.Anthropic(api_key=self.api_key) 39 | response = self.client.messages.create( 40 | model=self.current_model, 41 | system=system_content, 42 | messages=[{"role": "user", "content": [{"type": "text", "text": prompt}]}], 43 | max_tokens=self.MODELS[self.current_model]["max_tokens"], 44 | ) 45 | return response.content[0].text 46 | except anthropic.APIConnectionError as e: 47 | logger.error(f"Server could not be reached: {e.__cause__}") 48 | raise e 49 | except anthropic.RateLimitError as e: 50 | logger.error(f"A 429 status code was received. {e}") 51 | raise e 52 | except anthropic.AuthenticationError as e: 53 | logger.error(f"There's an issue with your API key. {e}") 54 | raise e 55 | except anthropic.APIStatusError as e: 56 | logger.error(f"Another non-200-range status code was received: {e.status_code}") 57 | raise e 58 | -------------------------------------------------------------------------------- /ai/providers/base_provider.py: -------------------------------------------------------------------------------- 1 | # A base class for API providers, defining the interface and common properties for subclasses. 2 | 3 | 4 | class BaseAPIProvider(object): 5 | def set_model(self, model_name: str): 6 | raise NotImplementedError("Subclass must implement set_model") 7 | 8 | def get_models(self) -> dict: 9 | raise NotImplementedError("Subclass must implement get_models") 10 | 11 | def generate_response(self, prompt: str, system_content: str) -> str: 12 | raise NotImplementedError("Subclass must implement generate_response") 13 | -------------------------------------------------------------------------------- /ai/providers/openai.py: -------------------------------------------------------------------------------- 1 | import openai 2 | from .base_provider import BaseAPIProvider 3 | import os 4 | import logging 5 | 6 | logging.basicConfig(level=logging.ERROR) 7 | logger = logging.getLogger(__name__) 8 | 9 | 10 | class OpenAI_API(BaseAPIProvider): 11 | MODELS = { 12 | "gpt-4.1": {"name": "GPT-4.1", "provider": "OpenAI", "max_tokens": 10000}, 13 | "gpt-4.1-mini": {"name": "GPT-4.1 Mini", "provider": "OpenAI", "max_tokens": 10000}, 14 | "gpt-4.1-nano": {"name": "GPT-4.1 Nano", "provider": "OpenAI", "max_tokens": 10000}, 15 | "o4-mini": {"name": "o4-mini", "provider": "OpenAI", "max_tokens": 50000}, 16 | } 17 | 18 | def __init__(self): 19 | self.api_key = os.environ.get("OPENAI_API_KEY") 20 | 21 | def set_model(self, model_name: str): 22 | if model_name not in self.MODELS.keys(): 23 | raise ValueError("Invalid model") 24 | self.current_model = model_name 25 | 26 | def get_models(self) -> dict: 27 | if self.api_key is not None: 28 | return self.MODELS 29 | else: 30 | return {} 31 | 32 | def generate_response(self, prompt: str, system_content: str) -> str: 33 | try: 34 | self.client = openai.OpenAI(api_key=self.api_key) 35 | response = self.client.responses.create( 36 | model=self.current_model, 37 | input=[ 38 | {"role": "developer", "content": system_content}, 39 | {"role": "user", "content": prompt}, 40 | ], 41 | max_output_tokens=self.MODELS[self.current_model]["max_tokens"], 42 | ) 43 | return response.output_text 44 | except openai.APIConnectionError as e: 45 | logger.error(f"Server could not be reached: {e.__cause__}") 46 | raise e 47 | except openai.RateLimitError as e: 48 | logger.error(f"A 429 status code was received. {e}") 49 | raise e 50 | except openai.AuthenticationError as e: 51 | logger.error(f"There's an issue with your API key. {e}") 52 | raise e 53 | except openai.APIStatusError as e: 54 | logger.error(f"Another non-200-range status code was received: {e.status_code}") 55 | raise e 56 | -------------------------------------------------------------------------------- /ai/providers/vertexai.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | 4 | import google.api_core.exceptions 5 | import vertexai.generative_models 6 | 7 | from .base_provider import BaseAPIProvider 8 | 9 | logging.basicConfig(level=logging.ERROR) 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | class VertexAPI(BaseAPIProvider): 14 | VERTEX_AI_PROVIDER = "VertexAI" 15 | MODELS = { 16 | "gemini-1.5-flash-001": { 17 | "name": "Gemini 1.5 Flash 001", 18 | "provider": VERTEX_AI_PROVIDER, 19 | "max_tokens": 8192, 20 | "system_instruction_supported": True, 21 | }, 22 | "gemini-1.5-flash-002": { 23 | "name": "Gemini 1.5 Flash 002", 24 | "provider": VERTEX_AI_PROVIDER, 25 | "max_tokens": 8192, 26 | "system_instruction_supported": True, 27 | }, 28 | "gemini-1.5-pro-002": { 29 | "name": "Gemini 1.5 Pro 002", 30 | "provider": VERTEX_AI_PROVIDER, 31 | "max_tokens": 8192, 32 | "system_instruction_supported": True, 33 | }, 34 | "gemini-1.5-pro-001": { 35 | "name": "Gemini 1.5 Pro 001", 36 | "provider": VERTEX_AI_PROVIDER, 37 | "max_tokens": 8192, 38 | "system_instruction_supported": True, 39 | }, 40 | "gemini-1.0-pro-002": { 41 | "name": "Gemini 1.0 Pro 002", 42 | "provider": VERTEX_AI_PROVIDER, 43 | "max_tokens": 8192, 44 | "system_instruction_supported": True, 45 | }, 46 | "gemini-1.0-pro-001": { 47 | "name": "Gemini 1.0 Pro 001", 48 | "provider": VERTEX_AI_PROVIDER, 49 | "max_tokens": 8192, 50 | "system_instruction_supported": False, 51 | }, 52 | "gemini-flash-experimental": { 53 | "name": "Gemini Flash Experimental", 54 | "provider": VERTEX_AI_PROVIDER, 55 | "max_tokens": 8192, 56 | "system_instruction_supported": True, 57 | }, 58 | "gemini-pro-experimental": { 59 | "name": "Gemini Pro Experimental", 60 | "provider": VERTEX_AI_PROVIDER, 61 | "max_tokens": 8192, 62 | "system_instruction_supported": True, 63 | }, 64 | "gemini-experimental": { 65 | "name": "Gemini Experimental", 66 | "provider": VERTEX_AI_PROVIDER, 67 | "max_tokens": 8192, 68 | "system_instruction_supported": True, 69 | }, 70 | } 71 | 72 | def __init__(self): 73 | self.enabled = bool(os.environ.get("VERTEX_AI_PROJECT_ID", "")) 74 | if self.enabled: 75 | vertexai.init( 76 | project=os.environ.get("VERTEX_AI_PROJECT_ID"), 77 | location=os.environ.get("VERTEX_AI_LOCATION"), 78 | ) 79 | 80 | def set_model(self, model_name: str): 81 | if model_name not in self.MODELS.keys(): 82 | raise ValueError("Invalid model") 83 | self.current_model = model_name 84 | 85 | def get_models(self) -> dict: 86 | if self.enabled: 87 | return self.MODELS 88 | else: 89 | return {} 90 | 91 | def generate_response(self, prompt: str, system_content: str) -> str: 92 | system_instruction = None 93 | if self.MODELS[self.current_model]["system_instruction_supported"]: 94 | system_instruction = system_content 95 | else: 96 | prompt = system_content + "\n" + prompt 97 | 98 | try: 99 | self.client = vertexai.generative_models.GenerativeModel( 100 | model_name=self.current_model, 101 | generation_config={ 102 | "max_output_tokens": self.MODELS[self.current_model]["max_tokens"], 103 | }, 104 | system_instruction=system_instruction, 105 | ) 106 | response = self.client.generate_content( 107 | contents=prompt, 108 | ) 109 | return "".join(part.text for part in response.candidates[0].content.parts) 110 | 111 | except google.api_core.exceptions.Unauthorized as e: 112 | logger.error(f"Client is not Authorized. {e.reason}, {e.message}") 113 | raise e 114 | except google.api_core.exceptions.Forbidden as e: 115 | logger.error(f"Client Forbidden. {e.reason}, {e.message}") 116 | raise e 117 | except google.api_core.exceptions.TooManyRequests as e: 118 | logger.error(f"Too many requests. {e.reason}, {e.message}") 119 | raise e 120 | except google.api_core.exceptions.ClientError as e: 121 | logger.error(f"Client error: {e.reason}, {e.message}") 122 | raise e 123 | except google.api_core.exceptions.ServerError as e: 124 | logger.error(f"Server error: {e.reason}, {e.message}") 125 | raise e 126 | except google.api_core.exceptions.GoogleAPICallError as e: 127 | logger.error(f"Error: {e.reason}, {e.message}") 128 | raise e 129 | except google.api_core.exceptions.GoogleAPIError as e: 130 | logger.error(f"Unknown error. {e}") 131 | raise e 132 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | 4 | from slack_bolt import App 5 | from slack_bolt.adapter.socket_mode import SocketModeHandler 6 | 7 | from listeners import register_listeners 8 | 9 | # Initialization 10 | app = App(token=os.environ.get("SLACK_BOT_TOKEN")) 11 | logging.basicConfig(level=logging.DEBUG) 12 | 13 | # Register Listeners 14 | register_listeners(app) 15 | 16 | # Start Bolt app 17 | if __name__ == "__main__": 18 | SocketModeHandler(app, os.environ.get("SLACK_APP_TOKEN")).start() 19 | -------------------------------------------------------------------------------- /app_oauth.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | from slack_bolt import App, BoltResponse 4 | from slack_bolt.oauth.callback_options import CallbackOptions, SuccessArgs, FailureArgs 5 | from slack_bolt.oauth.oauth_settings import OAuthSettings 6 | 7 | from slack_sdk.oauth.installation_store import FileInstallationStore 8 | from slack_sdk.oauth.state_store import FileOAuthStateStore 9 | 10 | from listeners import register_listeners 11 | 12 | logging.basicConfig(level=logging.DEBUG) 13 | 14 | 15 | # Callback to run on successful installation 16 | def success(args: SuccessArgs) -> BoltResponse: 17 | # Call default handler to return an HTTP response 18 | return args.default.success(args) 19 | # return BoltResponse(status=200, body="Installation successful!") 20 | 21 | 22 | # Callback to run on failed installation 23 | def failure(args: FailureArgs) -> BoltResponse: 24 | return args.default.failure(args) 25 | # return BoltResponse(status=args.suggested_status_code, body=args.reason) 26 | 27 | 28 | # Initialization 29 | app = App( 30 | signing_secret=os.environ.get("SLACK_SIGNING_SECRET"), 31 | installation_store=FileInstallationStore(), 32 | oauth_settings=OAuthSettings( 33 | client_id=os.environ.get("SLACK_CLIENT_ID"), 34 | client_secret=os.environ.get("SLACK_CLIENT_SECRET"), 35 | scopes=["channels:history", "chat:write", "commands"], 36 | user_scopes=[], 37 | redirect_uri=None, 38 | install_path="/slack/install", 39 | redirect_uri_path="/slack/oauth_redirect", 40 | state_store=FileOAuthStateStore(expiration_seconds=600), 41 | callback_options=CallbackOptions(success=success, failure=failure), 42 | ), 43 | ) 44 | 45 | # Register Listeners 46 | register_listeners(app) 47 | 48 | # Start Bolt app 49 | if __name__ == "__main__": 50 | app.start(3000) 51 | -------------------------------------------------------------------------------- /data/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !.gitignore -------------------------------------------------------------------------------- /listeners/__init__.py: -------------------------------------------------------------------------------- 1 | from listeners import actions 2 | from listeners import commands 3 | from listeners import events 4 | from listeners import functions 5 | 6 | 7 | def register_listeners(app): 8 | actions.register(app) 9 | commands.register(app) 10 | events.register(app) 11 | functions.register(app) 12 | -------------------------------------------------------------------------------- /listeners/actions/__init__.py: -------------------------------------------------------------------------------- 1 | from slack_bolt import App 2 | from .set_user_selection import set_user_selection 3 | 4 | 5 | def register(app: App): 6 | app.action("pick_a_provider")(set_user_selection) 7 | -------------------------------------------------------------------------------- /listeners/actions/set_user_selection.py: -------------------------------------------------------------------------------- 1 | from logging import Logger 2 | from slack_bolt import Ack 3 | from state_store.set_user_state import set_user_state 4 | 5 | 6 | def set_user_selection(logger: Logger, ack: Ack, body: dict): 7 | try: 8 | ack() 9 | user_id = body["user"]["id"] 10 | value = body["actions"][0]["selected_option"]["value"] 11 | if value != "null": 12 | # parsing the selected option value from the options array in app_home_opened.py 13 | selected_provider, selected_model = value.split(" ")[-1], value.split(" ")[0] 14 | set_user_state(user_id, selected_provider, selected_model) 15 | else: 16 | raise ValueError("Please make a selection") 17 | except Exception as e: 18 | logger.error(e) 19 | -------------------------------------------------------------------------------- /listeners/commands/__init__.py: -------------------------------------------------------------------------------- 1 | from slack_bolt import App 2 | from .ask_command import ask_callback 3 | 4 | 5 | def register(app: App): 6 | app.command("/ask-bolty")(ask_callback) 7 | -------------------------------------------------------------------------------- /listeners/commands/ask_command.py: -------------------------------------------------------------------------------- 1 | from slack_bolt import Ack, Say, BoltContext 2 | from logging import Logger 3 | from ai.providers import get_provider_response 4 | from slack_sdk import WebClient 5 | 6 | """ 7 | Callback for handling the 'ask-bolty' command. It acknowledges the command, retrieves the user's ID and prompt, 8 | checks if the prompt is empty, and responds with either an error message or the provider's response. 9 | """ 10 | 11 | 12 | def ask_callback(client: WebClient, ack: Ack, command, say: Say, logger: Logger, context: BoltContext): 13 | try: 14 | ack() 15 | user_id = context["user_id"] 16 | channel_id = context["channel_id"] 17 | prompt = command["text"] 18 | 19 | if prompt == "": 20 | client.chat_postEphemeral( 21 | channel=channel_id, user=user_id, text="Looks like you didn't provide a prompt. Try again." 22 | ) 23 | else: 24 | client.chat_postEphemeral( 25 | channel=channel_id, 26 | user=user_id, 27 | blocks=[ 28 | { 29 | "type": "rich_text", 30 | "elements": [ 31 | { 32 | "type": "rich_text_quote", 33 | "elements": [{"type": "text", "text": prompt}], 34 | }, 35 | { 36 | "type": "rich_text_section", 37 | "elements": [{"type": "text", "text": get_provider_response(user_id, prompt)}], 38 | }, 39 | ], 40 | } 41 | ], 42 | ) 43 | except Exception as e: 44 | logger.error(e) 45 | client.chat_postEphemeral(channel=channel_id, user=user_id, text=f"Received an error from Bolty:\n{e}") 46 | -------------------------------------------------------------------------------- /listeners/events/__init__.py: -------------------------------------------------------------------------------- 1 | from slack_bolt import App 2 | from .app_home_opened import app_home_opened_callback 3 | from .app_mentioned import app_mentioned_callback 4 | from .app_messaged import app_messaged_callback 5 | 6 | 7 | def register(app: App): 8 | app.event("app_home_opened")(app_home_opened_callback) 9 | app.event("app_mention")(app_mentioned_callback) 10 | app.event("message")(app_messaged_callback) 11 | -------------------------------------------------------------------------------- /listeners/events/app_home_opened.py: -------------------------------------------------------------------------------- 1 | from logging import Logger 2 | from ai.providers import get_available_providers 3 | from slack_sdk import WebClient 4 | from state_store.get_user_state import get_user_state 5 | 6 | """ 7 | Callback for handling the 'app_home_opened' event. It checks if the event is for the 'home' tab, 8 | generates a list of model options for a dropdown menu, retrieves the user's state to set the initial option, 9 | and publishes a view to the user's home tab in Slack. 10 | """ 11 | 12 | 13 | def app_home_opened_callback(event: dict, logger: Logger, client: WebClient): 14 | if event["tab"] != "home": 15 | return 16 | 17 | # create a list of options for the dropdown menu each containing the model name and provider 18 | options = [ 19 | { 20 | "text": {"type": "plain_text", "text": f"{model_info['name']} ({model_info['provider']})", "emoji": True}, 21 | "value": f"{model_name} {model_info['provider'].lower()}", 22 | } 23 | for model_name, model_info in get_available_providers().items() 24 | ] 25 | 26 | # retrieve user's state to determine if they already have a selected model 27 | user_state = get_user_state(event["user"], True) 28 | initial_option = None 29 | 30 | if user_state: 31 | initial_model = get_user_state(event["user"], True)[1] 32 | # set the initial option to the user's previously selected model 33 | initial_option = list(filter(lambda x: x["value"].startswith(initial_model), options)) 34 | else: 35 | # add an empty option if the user has no previously selected model. 36 | options.append( 37 | { 38 | "text": {"type": "plain_text", "text": "Select a provider", "emoji": True}, 39 | "value": "null", 40 | } 41 | ) 42 | 43 | try: 44 | client.views_publish( 45 | user_id=event["user"], 46 | view={ 47 | "type": "home", 48 | "blocks": [ 49 | { 50 | "type": "header", 51 | "text": {"type": "plain_text", "text": "Welcome to Bolty's Home Page!", "emoji": True}, 52 | }, 53 | {"type": "divider"}, 54 | { 55 | "type": "rich_text", 56 | "elements": [ 57 | { 58 | "type": "rich_text_section", 59 | "elements": [{"type": "text", "text": "Pick an option", "style": {"bold": True}}], 60 | } 61 | ], 62 | }, 63 | { 64 | "type": "actions", 65 | "elements": [ 66 | { 67 | "type": "static_select", 68 | "initial_option": initial_option[0] if initial_option else options[-1], 69 | "options": options, 70 | "action_id": "pick_a_provider", 71 | } 72 | ], 73 | }, 74 | ], 75 | }, 76 | ) 77 | except Exception as e: 78 | logger.error(e) 79 | -------------------------------------------------------------------------------- /listeners/events/app_mentioned.py: -------------------------------------------------------------------------------- 1 | from ai.providers import get_provider_response 2 | from logging import Logger 3 | from slack_sdk import WebClient 4 | from slack_bolt import Say 5 | from ..listener_utils.listener_constants import DEFAULT_LOADING_TEXT, MENTION_WITHOUT_TEXT 6 | from ..listener_utils.parse_conversation import parse_conversation 7 | 8 | """ 9 | Handles the event when the app is mentioned in a Slack channel, retrieves the conversation context, 10 | and generates an AI response if text is provided, otherwise sends a default response 11 | """ 12 | 13 | 14 | def app_mentioned_callback(client: WebClient, event: dict, logger: Logger, say: Say): 15 | try: 16 | channel_id = event.get("channel") 17 | thread_ts = event.get("thread_ts") 18 | user_id = event.get("user") 19 | text = event.get("text") 20 | 21 | if thread_ts: 22 | conversation = client.conversations_replies(channel=channel_id, ts=thread_ts, limit=10)["messages"] 23 | else: 24 | conversation = client.conversations_history(channel=channel_id, limit=10)["messages"] 25 | thread_ts = event["ts"] 26 | 27 | conversation_context = parse_conversation(conversation[:-1]) 28 | 29 | if text: 30 | waiting_message = say(text=DEFAULT_LOADING_TEXT, thread_ts=thread_ts) 31 | response = get_provider_response(user_id, text, conversation_context) 32 | client.chat_update(channel=channel_id, ts=waiting_message["ts"], text=response) 33 | else: 34 | response = MENTION_WITHOUT_TEXT 35 | client.chat_update(channel=channel_id, ts=waiting_message["ts"], text=response) 36 | 37 | except Exception as e: 38 | logger.error(e) 39 | client.chat_update(channel=channel_id, ts=waiting_message["ts"], text=f"Received an error from Bolty:\n{e}") 40 | -------------------------------------------------------------------------------- /listeners/events/app_messaged.py: -------------------------------------------------------------------------------- 1 | from ai.ai_constants import DM_SYSTEM_CONTENT 2 | from ai.providers import get_provider_response 3 | from logging import Logger 4 | from slack_bolt import Say 5 | from slack_sdk import WebClient 6 | from ..listener_utils.listener_constants import DEFAULT_LOADING_TEXT 7 | from ..listener_utils.parse_conversation import parse_conversation 8 | 9 | """ 10 | Handles the event when a direct message is sent to the bot, retrieves the conversation context, 11 | and generates an AI response. 12 | """ 13 | 14 | 15 | def app_messaged_callback(client: WebClient, event: dict, logger: Logger, say: Say): 16 | channel_id = event.get("channel") 17 | thread_ts = event.get("thread_ts") 18 | user_id = event.get("user") 19 | text = event.get("text") 20 | 21 | try: 22 | if event.get("channel_type") == "im": 23 | conversation_context = "" 24 | 25 | if thread_ts: # Retrieves context to continue the conversation in a thread. 26 | conversation = client.conversations_replies(channel=channel_id, limit=10, ts=thread_ts)["messages"] 27 | conversation_context = parse_conversation(conversation[:-1]) 28 | 29 | waiting_message = say(text=DEFAULT_LOADING_TEXT, thread_ts=thread_ts) 30 | response = get_provider_response(user_id, text, conversation_context, DM_SYSTEM_CONTENT) 31 | client.chat_update(channel=channel_id, ts=waiting_message["ts"], text=response) 32 | except Exception as e: 33 | logger.error(e) 34 | client.chat_update(channel=channel_id, ts=waiting_message["ts"], text=f"Received an error from Bolty:\n{e}") 35 | -------------------------------------------------------------------------------- /listeners/functions/__init__.py: -------------------------------------------------------------------------------- 1 | from slack_bolt import App 2 | from .summary_function import handle_summary_function_callback 3 | 4 | 5 | def register(app: App): 6 | app.function("summary_function")(handle_summary_function_callback) 7 | -------------------------------------------------------------------------------- /listeners/functions/summary_function.py: -------------------------------------------------------------------------------- 1 | from ai.providers import get_provider_response 2 | from logging import Logger 3 | from slack_bolt import Complete, Fail, Ack 4 | from slack_sdk import WebClient 5 | from ..listener_utils.listener_constants import SUMMARIZE_CHANNEL_WORKFLOW 6 | from ..listener_utils.parse_conversation import parse_conversation 7 | 8 | """ 9 | Handles the event to summarize a Slack channel's conversation history. 10 | It retrieves the conversation history, parses it, generates a summary using an AI response, 11 | and completes the workflow with the summary or fails if an error occurs. 12 | """ 13 | 14 | 15 | def handle_summary_function_callback( 16 | ack: Ack, inputs: dict, fail: Fail, logger: Logger, client: WebClient, complete: Complete 17 | ): 18 | ack() 19 | try: 20 | user_context = inputs["user_context"] 21 | channel_id = inputs["channel_id"] 22 | history = client.conversations_history(channel=channel_id, limit=10)["messages"] 23 | conversation = parse_conversation(history) 24 | 25 | summary = get_provider_response(user_context["id"], SUMMARIZE_CHANNEL_WORKFLOW, conversation) 26 | 27 | complete({"user_context": user_context, "response": summary}) 28 | except Exception as e: 29 | logger.exception(e) 30 | fail(e) 31 | -------------------------------------------------------------------------------- /listeners/listener_utils/listener_constants.py: -------------------------------------------------------------------------------- 1 | # This file defines constant messages used by the Slack bot for when a user mentions the bot without text, 2 | # when summarizing a channel's conversation history, and a default loading message. 3 | # Used in `app_mentioned_callback`, `dm_sent_callback`, and `handle_summary_function_callback`. 4 | 5 | MENTION_WITHOUT_TEXT = """ 6 | Hi there! You didn't provide a message with your mention. 7 | Mention me again in this thread so that I can help you out! 8 | """ 9 | SUMMARIZE_CHANNEL_WORKFLOW = """ 10 | A user has just joined this Slack channel. 11 | Please create a quick summary of the conversation in this channel to help them catch up. 12 | Don't use user IDs or names in your response. 13 | """ 14 | DEFAULT_LOADING_TEXT = "Thinking..." 15 | -------------------------------------------------------------------------------- /listeners/listener_utils/parse_conversation.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, List 2 | from slack_sdk.web.slack_response import SlackResponse 3 | import logging 4 | 5 | logging.basicConfig(level=logging.ERROR) 6 | logger = logging.getLogger(__name__) 7 | 8 | """ 9 | Parses a conversation history, excluding messages from the bot, 10 | and formats it as a string with user IDs and their messages. 11 | Used in `app_mentioned_callback`, `dm_sent_callback`, 12 | and `handle_summary_function_callback`.""" 13 | 14 | 15 | def parse_conversation(conversation: SlackResponse) -> Optional[List[dict]]: 16 | parsed = [] 17 | try: 18 | for message in conversation: 19 | user = message["user"] 20 | text = message["text"] 21 | parsed.append({"user": user, "text": text}) 22 | return parsed 23 | except Exception as e: 24 | logger.error(e) 25 | return None 26 | -------------------------------------------------------------------------------- /manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "_metadata": { 3 | "major_version": 1, 4 | "minor_version": 1 5 | }, 6 | "display_information": { 7 | "name": "Bolty" 8 | }, 9 | "features": { 10 | "app_home": { 11 | "home_tab_enabled": true, 12 | "messages_tab_enabled": true, 13 | "messages_tab_read_only_enabled": false 14 | }, 15 | "bot_user": { 16 | "display_name": "Bolty", 17 | "always_online": true 18 | }, 19 | "slash_commands": [ 20 | { 21 | "command": "/ask-bolty", 22 | "description": "Interact with Bolty.", 23 | "should_escape": false 24 | } 25 | ] 26 | }, 27 | "oauth_config": { 28 | "scopes": { 29 | "bot": [ 30 | "app_mentions:read", 31 | "channels:history", 32 | "channels:read", 33 | "chat:write", 34 | "chat:write.public", 35 | "commands", 36 | "groups:history", 37 | "groups:read", 38 | "im:history", 39 | "im:read", 40 | "im:write", 41 | "mpim:history", 42 | "mpim:read", 43 | "mpim:write", 44 | "users:read" 45 | ] 46 | } 47 | }, 48 | "functions": { 49 | "summary_function": { 50 | "title": "Bolty Custom Function", 51 | "description": "Interact with an AI Chatbot. Bolty must be a channel member.", 52 | "input_parameters": { 53 | "user_context": { 54 | "type": "slack#/types/user_context", 55 | "title": "User", 56 | "description": "Tag the user that will be notified when bot responds", 57 | "hint": "Tag user who ran the workflow", 58 | "name": "user_context", 59 | "is_required": true 60 | }, 61 | "channel_id": { 62 | "type": "slack#/types/channel_id", 63 | "title": "Channel", 64 | "description": "Channel that user joined", 65 | "hint": "Input channel that user joined", 66 | "name": "channel_id", 67 | "is_required": true 68 | } 69 | }, 70 | "output_parameters": { 71 | "user_context": { 72 | "type": "slack#/types/user_context", 73 | "title": "User", 74 | "description": "User that completed the workflow", 75 | "name": "user_context", 76 | "is_required": true 77 | }, 78 | "response": { 79 | "type": "string", 80 | "title": "Summary", 81 | "description": "AI-generated summary of recent messages in channel", 82 | "name": "response", 83 | "is_required": true 84 | } 85 | } 86 | } 87 | }, 88 | "settings": { 89 | "event_subscriptions": { 90 | "bot_events": [ 91 | "app_home_opened", 92 | "app_mention", 93 | "function_executed", 94 | "message.channels", 95 | "message.groups", 96 | "message.im", 97 | "message.mpim" 98 | ] 99 | }, 100 | "interactivity": { 101 | "is_enabled": true 102 | }, 103 | "org_deploy_enabled": true, 104 | "socket_mode_enabled": true, 105 | "token_rotation_enabled": false, 106 | "function_runtime": "remote" 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.black] 2 | line-length = 125 3 | 4 | [tool.pytest.ini_options] 5 | testpaths = ["tests"] 6 | log_file = "logs/pytest.log" 7 | log_file_level = "DEBUG" 8 | log_format = "%(asctime)s %(levelname)s %(message)s" 9 | log_date_format = "%Y-%m-%d %H:%M:%S" 10 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | slack-bolt==1.23.0 2 | pytest 3 | flake8==7.2.0 4 | black==25.1.0 5 | slack-cli-hooks==0.0.3 6 | openai==1.82.1 7 | anthropic==0.52.1 8 | google-cloud-aiplatform==1.95.1 9 | -------------------------------------------------------------------------------- /slack.json: -------------------------------------------------------------------------------- 1 | { 2 | "hooks": { 3 | "get-hooks": "python3 -m slack_cli_hooks.hooks.get_hooks" 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /state_store/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/slack-samples/bolt-python-ai-chatbot/b2be4dacea5b6204e0e10c3c8dceecda195844df/state_store/__init__.py -------------------------------------------------------------------------------- /state_store/file_state_store.py: -------------------------------------------------------------------------------- 1 | from .user_state_store import UserStateStore 2 | from .user_identity import UserIdentity 3 | import logging 4 | from pathlib import Path 5 | import json 6 | import os 7 | 8 | 9 | class FileStateStore(UserStateStore): 10 | def __init__( 11 | self, 12 | *, 13 | base_dir: str = "./data", 14 | logger: logging.Logger = logging.getLogger(__name__), 15 | ): 16 | self.base_dir = base_dir 17 | self.logger = logger 18 | 19 | def set_state(self, user_identity: UserIdentity): 20 | state = user_identity["user_id"] 21 | self._mkdir(self.base_dir) 22 | filepath = f"{self.base_dir}/{state}" 23 | 24 | with open(filepath, "w") as file: 25 | data = json.dumps(user_identity) 26 | file.write(data) 27 | return state 28 | 29 | def unset_state(self, user_identity: UserIdentity): 30 | state = user_identity["user_id"] 31 | filepath = f"{self.base_dir}/{state}" 32 | try: 33 | os.remove(filepath) 34 | return state 35 | except FileNotFoundError as e: 36 | self.logger.warning(f"Failed to find data for {user_identity} - {e}") 37 | raise e 38 | 39 | @staticmethod 40 | def _mkdir(path): 41 | if isinstance(path, str): 42 | path = Path(path) 43 | path.mkdir(parents=True, exist_ok=True) 44 | -------------------------------------------------------------------------------- /state_store/get_user_state.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from state_store.user_identity import UserIdentity 4 | import logging 5 | 6 | logging.basicConfig(level=logging.ERROR) 7 | logger = logging.getLogger(__name__) 8 | 9 | 10 | def get_user_state(user_id: str, is_app_home: bool): 11 | filepath = f"./data/{user_id}" 12 | if not is_app_home and not os.path.exists(filepath): 13 | raise FileNotFoundError("No provider selection found. Please navigate to the App Home and make a selection.") 14 | try: 15 | if os.path.exists(filepath): 16 | with open(filepath, "r") as file: 17 | user_identity: UserIdentity = json.load(file) 18 | return user_identity["provider"], user_identity["model"] 19 | except Exception as e: 20 | logger.error(e) 21 | raise e 22 | -------------------------------------------------------------------------------- /state_store/set_user_state.py: -------------------------------------------------------------------------------- 1 | from .file_state_store import FileStateStore, UserIdentity 2 | 3 | 4 | def set_user_state(user_id: str, provider_name: str, model_name: str): 5 | try: 6 | user = UserIdentity(user_id=user_id, provider=provider_name, model=model_name) 7 | file_store = FileStateStore() 8 | file_store.set_state(user) 9 | except Exception as e: 10 | raise ValueError(f"Error instantiating API: {e}") 11 | -------------------------------------------------------------------------------- /state_store/user_identity.py: -------------------------------------------------------------------------------- 1 | from typing import TypedDict 2 | 3 | 4 | class UserIdentity(TypedDict): 5 | user_id: str 6 | provider: str 7 | model: str 8 | -------------------------------------------------------------------------------- /state_store/user_state_store.py: -------------------------------------------------------------------------------- 1 | from .user_identity import UserIdentity 2 | 3 | 4 | class UserStateStore: 5 | def set_state(user_identity: UserIdentity): 6 | raise NotImplementedError() 7 | 8 | def unset_state(state: str): 9 | raise NotImplementedError() 10 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022, Slack Technologies, LLC. All rights reserved. 2 | --------------------------------------------------------------------------------