├── src ├── __init__.py ├── main.py ├── SimpleThrottle.py ├── AsyncStreamingSlackCallbackHandler.py ├── conversation_utils.py ├── ConversationAI.py └── slackbot.py ├── run_local.sh ├── run_on_modal.sh ├── requirements-dev.txt ├── requirements.txt ├── .env.example ├── railway.json ├── main.py ├── LICENSE ├── README.md ├── slackbot-manifest-chatterbot.json ├── slackbot-manifest-chatterbot-dev.json └── .gitignore /src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /run_local.sh: -------------------------------------------------------------------------------- 1 | ./src/main.py -------------------------------------------------------------------------------- /run_on_modal.sh: -------------------------------------------------------------------------------- 1 | modal run ./src/slackbot.py -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | -r requirements.txt 2 | 3 | pipreqs -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | langchain==0.0.119 2 | openai==0.27.2 3 | python-dotenv==1.0.0 4 | slack_bolt==1.16.4 5 | slack_sdk==3.20.2 6 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # You will need all of the following keys 2 | OPENAI_API_KEY= 3 | SLACK_APP_TOKEN= 4 | SLACK_BOT_TOKEN= 5 | SLACK_SIGNING_SECRET= -------------------------------------------------------------------------------- /railway.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://railway.app/railway.schema.json", 3 | "build": { 4 | "builder": "NIXPACKS" 5 | }, 6 | "deploy": { 7 | "startCommand": "python main.py", 8 | "restartPolicyType": "ON_FAILURE", 9 | "restartPolicyMaxRetries": 10 10 | } 11 | } -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | 4 | import logging 5 | logger = logging.getLogger(__name__) 6 | logging.basicConfig(level=logging.INFO) 7 | 8 | import asyncio 9 | import os 10 | from dotenv import load_dotenv 11 | from pathlib import Path 12 | 13 | # Get the folder this file is in: 14 | this_file_folder = os.path.dirname(os.path.realpath(__file__)) 15 | load_dotenv(Path(this_file_folder) / ".env") 16 | 17 | from src.slackbot import slack_bot 18 | 19 | async def start(): 20 | await slack_bot.start() 21 | 22 | if __name__ == "__main__": 23 | asyncio.run(start()) 24 | -------------------------------------------------------------------------------- /src/main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import logging 3 | logger = logging.getLogger(__name__) 4 | 5 | import asyncio 6 | import os 7 | from dotenv import load_dotenv 8 | from pathlib import Path 9 | 10 | # Get the folder this file is in: 11 | this_file_folder = os.path.dirname(os.path.realpath(__file__)) 12 | # Get the parent folder of this file's folder: 13 | parent_folder = os.path.dirname(this_file_folder) 14 | 15 | load_dotenv(Path(parent_folder) / ".env") 16 | 17 | from slackbot import slack_bot 18 | 19 | async def main(): 20 | await slack_bot.start() 21 | 22 | if __name__ == "__main__": 23 | asyncio.run(main()) 24 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Taylor Brown 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/SimpleThrottle.py: -------------------------------------------------------------------------------- 1 | import time 2 | import asyncio 3 | 4 | class SimpleThrottle: 5 | def __init__(self, coro, delay): 6 | self.coro = coro 7 | self.delay = delay 8 | self.last_call = None 9 | self.update_task = None 10 | self.queue_count = 0 11 | 12 | async def _wrapper(self): 13 | if self.queue_count > 0: 14 | self.queue_count -= 1 15 | 16 | if self.last_call is not None: 17 | elapsed_time = time.time() - self.last_call 18 | if elapsed_time < self.delay: 19 | await asyncio.sleep(self.delay - elapsed_time) 20 | 21 | await self.coro() 22 | self.last_call = time.time() 23 | self.update_task = None 24 | 25 | if self.queue_count > 0: 26 | await self.call() 27 | 28 | async def call(self): 29 | if self.update_task is None: 30 | self.update_task = asyncio.ensure_future(self._wrapper()) 31 | else: 32 | self.queue_count = min(self.queue_count + 1, 1) 33 | 34 | async def call_and_wait(self): 35 | if self.update_task is not None: 36 | await self.update_task 37 | await self.coro() 38 | 39 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Slackbot that uses langchain under the hood 2 | 3 | [![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/GB1yZ7) 4 | 5 | # Local development: 6 | 7 | 1. Create your Slackbot first, by following this tutorial: 8 | https://slack.dev/bolt-python/tutorial/getting-started 9 | (We really need to create a bot template, and will do that in the future) 10 | 11 | 2. Then, copy .env.example to .env and fill in the values. 12 | 13 | 3. 14 | 15 | ```bash 16 | pip install -r requirements-dev.txt 17 | 18 | # 19 | # To run it locally (no need to use ngrok or anything) 20 | ./run_local.sh 21 | 22 | # Now you can talk to the bot on Slack 23 | @chatterbot Tell me a joke. 24 | ``` 25 | 26 | # TODO: 27 | 28 | - [x] Deploy the bot to Railway 29 | - [x] Make the bot respond with markdown 30 | - [ ] Use Redis for permanent memory 31 | - [x] Use OpenAI streaming to generate text 32 | - [x] Create a dev and staging bot that runs the bot with a different name in Slack 33 | - [ ] Switch to use a different type of memory, like token buffer: https://langchain.readthedocs.io/en/latest/modules/memory/types/token_buffer.html 34 | - [x] Get rid of the error that happens when a system message happens and there isn't a "user" field. 35 | - [x] Welcome people to the channel via something creative 36 | - [ ] On a regular basis (daily?), have Chatterbot say something interesting 37 | - [ ] Notify the user if GPT is being slow... 38 | - [x] Usernames and Embed profile information about the people chatting with Chatterbot, if it hasn't already seen them before 39 | - [ ] Use Slack-Machine as the framework 40 | - [x] Make Chatterbot not respond if it is not being spoken to directly 41 | - [ ] ~Have a "Bot is typing..." message or some other indicator...~ (Also turns out this is impossible) 42 | - [ ] ~Make the bot have a green dot if it's online or a grey dot if not~ (Turns out this is impossible in the new framework??) 43 | -------------------------------------------------------------------------------- /slackbot-manifest-chatterbot.json: -------------------------------------------------------------------------------- 1 | { 2 | "display_information": { 3 | "name": "Chatterbot", 4 | "description": "AI bot that uses GPT to be helpful", 5 | "background_color": "#365dd1" 6 | }, 7 | "features": { 8 | "app_home": { 9 | "home_tab_enabled": false, 10 | "messages_tab_enabled": true, 11 | "messages_tab_read_only_enabled": false 12 | }, 13 | "bot_user": { 14 | "display_name": "Chatterbot", 15 | "always_online": true 16 | } 17 | }, 18 | "oauth_config": { 19 | "scopes": { 20 | "bot": [ 21 | "app_mentions:read", 22 | "channels:read", 23 | "chat:write", 24 | "chat:write.public", 25 | "channels:history", 26 | "channels:join", 27 | "chat:write.customize", 28 | "files:read", 29 | "emoji:read", 30 | "files:write", 31 | "groups:read", 32 | "groups:history", 33 | "im:history", 34 | "im:read", 35 | "im:write", 36 | "links:read", 37 | "links:write", 38 | "metadata.message:read", 39 | "mpim:history", 40 | "mpim:read", 41 | "mpim:write", 42 | "reactions:read", 43 | "reactions:write", 44 | "users:read", 45 | "users.profile:read", 46 | "users:read.email", 47 | "users:write" 48 | ] 49 | } 50 | }, 51 | "settings": { 52 | "event_subscriptions": { 53 | "bot_events": [ 54 | "app_mention", 55 | "message.channels", 56 | "message.groups", 57 | "message.im", 58 | "message.mpim", 59 | "reaction_added", 60 | "reaction_removed" 61 | ] 62 | }, 63 | "interactivity": { 64 | "is_enabled": true 65 | }, 66 | "org_deploy_enabled": false, 67 | "socket_mode_enabled": true, 68 | "token_rotation_enabled": false 69 | } 70 | } -------------------------------------------------------------------------------- /slackbot-manifest-chatterbot-dev.json: -------------------------------------------------------------------------------- 1 | { 2 | "display_information": { 3 | "name": "Chatterbot-Dev", 4 | "description": "Developer mode for AI bot that uses GPT to be helpful", 5 | "background_color": "#ff0000" 6 | }, 7 | "features": { 8 | "app_home": { 9 | "home_tab_enabled": false, 10 | "messages_tab_enabled": true, 11 | "messages_tab_read_only_enabled": false 12 | }, 13 | "bot_user": { 14 | "display_name": "Chatterbot-Dev", 15 | "always_online": true 16 | } 17 | }, 18 | "oauth_config": { 19 | "scopes": { 20 | "bot": [ 21 | "app_mentions:read", 22 | "channels:read", 23 | "chat:write", 24 | "chat:write.public", 25 | "channels:history", 26 | "channels:join", 27 | "chat:write.customize", 28 | "files:read", 29 | "emoji:read", 30 | "files:write", 31 | "groups:read", 32 | "groups:history", 33 | "im:history", 34 | "im:read", 35 | "im:write", 36 | "links:read", 37 | "links:write", 38 | "metadata.message:read", 39 | "mpim:history", 40 | "mpim:read", 41 | "mpim:write", 42 | "reactions:read", 43 | "reactions:write", 44 | "users:read", 45 | "users.profile:read", 46 | "users:read.email", 47 | "users:write" 48 | ] 49 | } 50 | }, 51 | "settings": { 52 | "event_subscriptions": { 53 | "bot_events": [ 54 | "app_mention", 55 | "message.channels", 56 | "message.groups", 57 | "message.im", 58 | "message.mpim", 59 | "reaction_added", 60 | "reaction_removed" 61 | ] 62 | }, 63 | "interactivity": { 64 | "is_enabled": true 65 | }, 66 | "org_deploy_enabled": false, 67 | "socket_mode_enabled": true, 68 | "token_rotation_enabled": false 69 | } 70 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /src/AsyncStreamingSlackCallbackHandler.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Any, Dict, List, Union 3 | 4 | logger = logging.getLogger(__name__) 5 | 6 | from typing import Any, Dict, List, Union 7 | 8 | from langchain.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler 9 | from langchain.schema import AgentAction, AgentFinish, LLMResult 10 | from slack_sdk import WebClient 11 | from slack_sdk.errors import SlackApiError 12 | 13 | from .SimpleThrottle import SimpleThrottle 14 | 15 | 16 | class AsyncStreamingSlackCallbackHandler(AsyncCallbackHandler): 17 | """Async callback handler for streaming to Slack. Only works with LLMs that support streaming.""" 18 | 19 | def __init__(self, client: WebClient): 20 | self.client = client 21 | self.channel_id = None 22 | self.thread_ts = None 23 | self.update_delay = 0.1 # Set the desired delay in seconds 24 | self.update_throttle = SimpleThrottle(self._update_message_in_slack, self.update_delay) 25 | 26 | async def start_new_response(self, channel_id, thread_ts): 27 | self.current_message = "" 28 | self.message_ts = None 29 | self.channel_id = channel_id 30 | self.thread_ts = thread_ts 31 | 32 | async def _update_message_in_slack(self): 33 | try: 34 | await self.client.chat_update( 35 | channel=self.channel_id, ts=self.message_ts, text=self.current_message 36 | ) 37 | except SlackApiError as e: 38 | print(f"Error updating message: {e}") 39 | 40 | async def on_llm_new_token(self, token: str, **kwargs: Any) -> None: 41 | """Run on new LLM token. Only available when streaming is enabled.""" 42 | self.current_message += token 43 | await self.update_throttle.call() 44 | 45 | 46 | async def handle_llm_error(self, e: Exception) -> None: 47 | """Post error message to channel with provided channel_id and thread_ts.""" 48 | try: 49 | logger.error(f"Got LLM Error. Will post about it: {e}") 50 | await self.client.chat_postMessage(text=str(e), channel=self.channel_id, thread_ts=self.thread_ts) 51 | except Exception as e: 52 | logger.error(f"Error posting exception message: {e}") 53 | 54 | async def on_llm_start( 55 | self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any 56 | ) -> None: 57 | try: 58 | if self.channel_id is None: 59 | raise Exception("channel_id is None") 60 | # Send an empty response and get the timestamp 61 | post_response = await self.client.chat_postMessage(text="...", channel=self.channel_id, thread_ts=self.thread_ts) 62 | self.message_ts: str = post_response["ts"] 63 | except Exception as e: 64 | await self.handle_llm_error(e) 65 | 66 | async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: 67 | """Run when LLM ends running.""" 68 | try: 69 | await self.update_throttle.call_and_wait() 70 | # Make sure it got the last one: 71 | await self.start_new_response(self.channel_id, self.thread_ts) 72 | except Exception as e: 73 | await self.handle_llm_error(e) 74 | 75 | async def on_llm_error( 76 | self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any 77 | ) -> None: 78 | """Run when LLM errors.""" 79 | await self.handle_llm_error(error) 80 | 81 | async def on_chain_start( 82 | self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any 83 | ) -> None: 84 | """Run when chain starts running.""" 85 | 86 | async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: 87 | """Run when chain ends running.""" 88 | 89 | async def on_chain_error( 90 | self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any 91 | ) -> None: 92 | """Run when chain errors.""" 93 | 94 | async def on_tool_start( 95 | self, serialized: Dict[str, Any], input_str: str, **kwargs: Any 96 | ) -> None: 97 | """Run when tool starts running.""" 98 | 99 | async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: 100 | """Run on agent action.""" 101 | pass 102 | 103 | async def on_tool_end(self, output: str, **kwargs: Any) -> None: 104 | """Run when tool ends running.""" 105 | 106 | async def on_tool_error( 107 | self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any 108 | ) -> None: 109 | """Run when tool errors.""" 110 | 111 | async def on_text(self, text: str, **kwargs: Any) -> None: 112 | """Run on arbitrary text.""" 113 | print("Got text!", text) 114 | 115 | async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: 116 | """Run on agent end.""" 117 | -------------------------------------------------------------------------------- /src/conversation_utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | from langchain.agents import Tool 3 | from langchain.memory import ConversationBufferMemory 4 | from langchain import OpenAI, LLMChain 5 | from langchain.chat_models.openai import ChatOpenAI 6 | from langchain.agents import initialize_agent 7 | from langchain.agents import load_tools 8 | from langchain.utilities import GoogleSerperAPIWrapper 9 | from langchain.utilities import SerpAPIWrapper 10 | from langchain.agents import ZeroShotAgent, Tool, AgentExecutor 11 | from langchain.prompts import PromptTemplate 12 | # import LLM: 13 | from langchain.chat_models.openai import ChatOpenAI 14 | 15 | from typing import Dict, List 16 | from pydantic import BaseModel 17 | 18 | class Generation(BaseModel): 19 | text: str 20 | generation_info: str = None 21 | 22 | class TokenUsage(BaseModel): 23 | completion_tokens: int 24 | prompt_tokens: int 25 | total_tokens: int 26 | 27 | class LlmOutput(BaseModel): 28 | token_usage: TokenUsage 29 | model_name: str 30 | 31 | class Output(BaseModel): 32 | generations: List[List[Generation]] 33 | llm_output: LlmOutput 34 | 35 | # llm_babbage = OpenAI(temperature=0, model_name="babbage", request_timeout=30, max_retries=2) 36 | # llm_curie = OpenAI(temperature=0, model_name="curie", request_timeout=30, max_retries=2) 37 | # llm_davinci = OpenAI(temperature=0, model_name="davinci", request_timeout=30, max_retries=2) 38 | 39 | # llm_gpt4 = OpenAI(temperature=0, model_name="gpt4", request_timeout=30, max_retries=2, verbose=True) 40 | llm_gpt3_turbo = OpenAI(temperature=0, model_name="gpt-3.5-turbo", request_timeout=30, max_retries=2, verbose=True) 41 | 42 | async def get_simple_response(input:str) -> Output: 43 | return await llm_gpt3_turbo.agenerate([input],) 44 | 45 | # TODO: Use agents to do this call and parsing, and combine these two prompts: 46 | 47 | async def is_asking_for_smart_mode(input:str): 48 | 49 | prompt = PromptTemplate( 50 | input_variables=["input"], 51 | template="""Determine the following input contains explicit requests like increased intelligence, extra thinking, gpt4, expensiveness, slowness, etc. If so, return "smart_mode: yes". If the input is not explicitly requesting increased intelligence, slowness, gpt4, your answer should be "smart_mode: no". ONLY write "smart_mode: yes" or "smart_mode: no". 52 | 53 | Examples: 54 | Hey Chatterbot, I am gonna need you to think real hard about this one! No need to be creative since I'm just gonna talk about code. 55 | smart_mode: yes 56 | 57 | Hey Chatterbot, let's brainstorm some funny song titles! 58 | smart_mode: no 59 | 60 | Help me code. 61 | smart_mode: no 62 | 63 | {input} 64 | """) 65 | 66 | query = prompt.format(input=input) 67 | print("About to ask GPT 3 about: ", query) 68 | 69 | try: 70 | response : Output = await get_simple_response(query) 71 | response = response.generations[0][0].text 72 | response = response.split("smart_mode: ")[1] 73 | response = response.strip().lower() 74 | return response == "yes" 75 | except Exception as e: 76 | print("Error in is_asking_for_smart_mode", e) 77 | return False 78 | 79 | # TODO: Combine with prompt above 80 | async def get_recommended_temperature(input:str, default_temperature=0.3): 81 | 82 | prompt = PromptTemplate( 83 | input_variables=["input", "default_temperature"], 84 | template="""Please indicate the appropriate temperature for the LLM to respond to the following message, using a scale from 0.00 to 1.00. For tasks that require maximum precision, such as coding, please use a temperature of 0. For tasks that require more creativity, such as generating imaginative responses, use a temperature of 0.7-1.0. If an explicit temperature/creativity is requested, use that. (Remember to convert percentages to a range between 0 and 1.0) If the appropriate temperature is unclear, please use a default of {default_temperature}. Please note that the temperature should be selected based solely on the nature of the task, and should not be influenced by the complexity or sophistication of the message. 85 | 86 | Examples: 87 | Get as creative as possible for this one! 88 | temperature: 1.00 89 | 90 | Tell me a bedtime story about a dinosaur! 91 | temperature: 0.80 92 | 93 | Let's write some code. (Be really smart please) 94 | temperature: 0.00 95 | 96 | Temperature:88% 97 | Model: Super duper smart! 98 | temperature: 0.88 99 | 100 | How are you doing today? 101 | temperature: {default_temperature} 102 | 103 | ### 104 | 105 | : {input} 106 | """) 107 | 108 | query = prompt.format(default_temperature=default_temperature, input=input) 109 | print("About to ask GPT 3 about: ", query) 110 | 111 | try: 112 | response : Output = await get_simple_response(query) 113 | response = response.generations[0][0].text 114 | print("response: ", response) 115 | response = response.split("temperature: ")[1] 116 | response = response.strip().lower() 117 | # try to parse the response as a float: 118 | try: 119 | return float(response) 120 | except: 121 | return default_temperature 122 | except Exception as e: 123 | print("Error in is_asking_for_smart_mode", e) 124 | return default_temperature 125 | 126 | def get_conversational_agent(model_name="gpt-3.5-turbo"): 127 | search = GoogleSerperAPIWrapper() 128 | # TODO: File a PR to fix this return_messages=True thing 129 | memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True) 130 | search = SerpAPIWrapper() 131 | tools = [ 132 | Tool( 133 | name = "dinosaurs", 134 | func=search.run, 135 | description="(Useful to learn about dinosaurs)." 136 | ), 137 | Tool( 138 | name = "search", 139 | func=search.run, 140 | description="(ONLY if your confidence in your answer is below 0.2, use this tool to search for information)" 141 | ), 142 | ] 143 | llm=ChatOpenAI(temperature=0, model=model_name, verbose=True, request_timeout=30, max_retries=0) 144 | agent_chain = initialize_agent(tools, llm, agent="chat-conversational-react-description", verbose=True, memory=memory, request_timeout=30) 145 | return agent_chain 146 | 147 | -------------------------------------------------------------------------------- /src/ConversationAI.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from langchain import ConversationChain 3 | from langchain.agents import Agent, Tool, initialize_agent 4 | from langchain.chains import ConversationChain 5 | from langchain.chat_models import ChatOpenAI 6 | from langchain.chat_models.openai import ChatOpenAI 7 | from langchain.memory import ConversationBufferMemory 8 | from langchain.prompts import (ChatPromptTemplate, HumanMessagePromptTemplate, 9 | MessagesPlaceholder, 10 | SystemMessagePromptTemplate) 11 | from langchain.utilities import GoogleSerperAPIWrapper, SerpAPIWrapper 12 | from .conversation_utils import is_asking_for_smart_mode, get_recommended_temperature 13 | 14 | from langchain.callbacks.base import AsyncCallbackManager 15 | from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler 16 | 17 | from slack_sdk import WebClient 18 | # How to do a search over docs with conversation: 19 | #https://langchain.readthedocs.io/en/latest/modules/memory/examples/adding_memory_chain_multiple_inputs.html 20 | # People talking about the parsing error: https://github.com/hwchase17/langchain/issues/1657 21 | 22 | from .AsyncStreamingSlackCallbackHandler import AsyncStreamingSlackCallbackHandler 23 | 24 | 25 | DEFAULT_MODEL="gpt-3.5-turbo" 26 | UPGRADE_MODEL="gpt-4" 27 | DEFAULT_TEMPERATURE=0.3 28 | 29 | class ConversationAI: 30 | def __init__( 31 | self, bot_name:str, slack_client:WebClient, existing_thread_history=None, model_name:str=None 32 | ): 33 | self.bot_name = bot_name 34 | self.existing_thread_history = existing_thread_history 35 | self.model_name = None 36 | self.agent = None 37 | self.model_temperature = None 38 | self.slack_client = slack_client 39 | self.lock = asyncio.Lock() 40 | 41 | async def create_agent(self, sender_user_info, initial_message): 42 | print(f"Creating new ConversationAI for {self.bot_name}") 43 | 44 | sender_profile = sender_user_info["profile"] 45 | 46 | # TODO: If we are picking up from where a previous thread left off, we shouldn't be looking at the initial message the same way, and should use the original message as the "initial message" 47 | 48 | # Call both async methods simultaneously 49 | smart_mode_task = asyncio.create_task(is_asking_for_smart_mode(initial_message)) 50 | recommended_temperature_task = asyncio.create_task(get_recommended_temperature(initial_message, DEFAULT_TEMPERATURE)) 51 | 52 | # Await the results 53 | requested_smart_mode, recommended_temperature = await asyncio.gather(smart_mode_task, recommended_temperature_task) 54 | 55 | if requested_smart_mode: 56 | self.model_name = UPGRADE_MODEL 57 | else: 58 | self.model_name = DEFAULT_MODEL 59 | 60 | if recommended_temperature is not None: 61 | recommended_temperature = max(0.0, recommended_temperature) 62 | recommended_temperature = min(1.0, recommended_temperature) 63 | else: 64 | recommended_temperature = DEFAULT_TEMPERATURE 65 | 66 | self.model_temperature = recommended_temperature 67 | 68 | print("Will use model: " + self.model_name) 69 | print(f"Will use temperature: {self.model_temperature}") 70 | 71 | model_facts = f"You are based on the OpenAI model {self.model_name}. Your 'creativity temperature' is set to {self.model_temperature}." 72 | 73 | #additional_kwargs={"name": "example_user"} 74 | prompt = ChatPromptTemplate.from_messages( 75 | [ 76 | # TODO: We need a way to label who the humans are - does the HumanMessagePromptTemplate support this? 77 | # TODO: Extract this prompt out of this file 78 | SystemMessagePromptTemplate.from_template( 79 | f"""The following is a Slack chat thread between users and you, a Slack bot named {self.bot_name}. 80 | You are funny and smart, and you are here to help. 81 | If you are not confident in your answer, you say so, because you know that is helpful. 82 | You don't have realtime access to the internet, so if asked for information about a URL or site, you should first acknowledge that your knowledge is limted before responding with what you do know. 83 | Since you are responding in Slack, you format your messages in Slack markdown, and you LOVE to use Slack emojis to convey emotion. 84 | If the human appears to be talking to someone else, especially if they start their message with addressing someone else like "@not-the-bot-name", or they am about you in the 3rd person, you will ONLY respond with the emoji: ":speak_no_evil:" 85 | Some facts about you: 86 | {model_facts} 87 | """ 88 | ), 89 | HumanMessagePromptTemplate.from_template(f"""Here is some information about me. Do not respond to this directly, but feel free to incorporate it into your responses: 90 | I'm {sender_profile.get("real_name")}. 91 | Since we're talking in Slack, you can @mention me like this: "<@{sender_user_info.get("id")}>" 92 | My title is: {sender_profile.get("title")} 93 | My current status: "{sender_profile.get("status_emoji")}{sender_profile.get("status_text")}" 94 | Please try to "tone-match" me: If I use emojis, please use lots of emojis. If I appear business-like, please seem business-like in your responses. Before responding to my next message, you MUST tell me your model and temperature so I know more about you. Don't reference anything I just asked you directly."""), 95 | MessagesPlaceholder(variable_name="history"), 96 | HumanMessagePromptTemplate.from_template("{input}") 97 | ] 98 | ) 99 | self.callbackHandler = AsyncStreamingSlackCallbackHandler(self.slack_client) 100 | 101 | llm = ChatOpenAI(model_name = self.model_name, temperature=self.model_temperature, request_timeout=60, max_retries=3, streaming=True, verbose=True, callback_manager=AsyncCallbackManager([self.callbackHandler])) 102 | # This buffer memory can be set to an arbitrary buffer 103 | memory = ConversationBufferMemory(return_messages=True) 104 | 105 | # existing_thread_history is an array of objects like this: 106 | # { 107 | # "taylor": "Hello, how are you?", 108 | # "bot": "I am fine, thank you. How are you?" 109 | # "kevin": "@taylor, I'm talking to you now" 110 | # "taylor": "@kevin, Oh cool!" 111 | # } 112 | # We should iterate through this and add each of these to the memory: 113 | # Unfortunately, we can't use the human's name because the memory doesn't seem to support that yet 114 | existing_thread_history = self.existing_thread_history 115 | if existing_thread_history is not None: 116 | for message in existing_thread_history: 117 | sender_name = list(message.keys())[0] # get the first key which is the name (assuming only one key per dictionary) 118 | message_content = list(message.values())[0] # get the first value which is the message content 119 | if sender_name == "bot": 120 | memory.chat_memory.add_ai_message(message_content) 121 | else: 122 | memory.chat_memory.add_user_message(message_content) 123 | 124 | self.memory = memory 125 | self.agent = ConversationChain( 126 | memory=memory, prompt=prompt, llm=llm, verbose=True 127 | ) 128 | return self.agent 129 | 130 | async def get_or_create_agent(self, sender_user_info, message): 131 | if self.agent is None: 132 | self.agent = await self.create_agent(sender_user_info, message) 133 | return self.agent 134 | 135 | async def respond(self, sender_user_info, channel_id:str, thread_ts:str, message_being_responded_to_ts:str, message:str): 136 | async with self.lock: 137 | agent = await self.get_or_create_agent(sender_user_info, message) 138 | # TODO: This is messy and needs to be refactored... 139 | print("Starting response...") 140 | await self.callbackHandler.start_new_response(channel_id, thread_ts) 141 | # Now that we have a handler set up, just telling it to predict is sufficient to get it to start streaming the message response... 142 | response = await self.agent.apredict(input=message) 143 | return response 144 | 145 | def get_conversational_agent(model_name="gpt-3.5-turbo"): 146 | search = GoogleSerperAPIWrapper() 147 | # TODO: File a PR to fix this return_messages=True thing 148 | memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True) 149 | search = SerpAPIWrapper() 150 | tools = [ 151 | Tool( 152 | name = "search", 153 | func=search.run, 154 | description="(ONLY if your confidence in your answer is below 0.2, use this tool to search for information)" 155 | ), 156 | ] 157 | llm=ChatOpenAI(temperature=0, model=model_name, verbose=True, request_timeout=30, max_retries=0) 158 | agent_chain = initialize_agent(tools, llm, agent="chat-conversational-react-description", verbose=True, memory=memory, request_timeout=30) 159 | return agent_chain 160 | 161 | 162 | 163 | 164 | # class CustomConversationAgent(Agent): 165 | # def __init__(self, llm_chain, allowed_tools=None): 166 | # super().__init__(llm_chain, allowed_tools) 167 | 168 | # def parse_output(self, output): 169 | # lines = output.strip().split("\n") 170 | # is_talking_to_ai = "no" 171 | # switch_to_smarter_mode = "no" 172 | 173 | # for line in lines: 174 | # if line.startswith("Is_talking_to_AI:"): 175 | # is_talking_to_ai = line.split(":")[1].strip() 176 | # elif line.startswith("Switch_to_smarter_mode:"): 177 | # switch_to_smarter_mode = line.split(":")[1].strip() 178 | 179 | # return is_talking_to_ai, switch_to_smarter_mode 180 | -------------------------------------------------------------------------------- /src/slackbot.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Make this a python module: 4 | 5 | 6 | # TODO: How is logging normally controlled? 7 | import logging 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | import os 12 | import re 13 | from typing import List 14 | 15 | from langchain import OpenAI 16 | from slack_bolt.adapter.socket_mode.async_handler import AsyncSocketModeHandler 17 | from slack_bolt.async_app import AsyncApp 18 | from slack_sdk.errors import SlackApiError 19 | 20 | from .ConversationAI import ConversationAI 21 | 22 | OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY') 23 | SLACK_BOT_TOKEN = os.environ.get("SLACK_BOT_TOKEN") 24 | SLACK_APP_TOKEN = os.environ.get('SLACK_APP_TOKEN') 25 | SLACK_SIGNING_SECRET = os.environ.get("SLACK_SIGNING_SECRET") 26 | 27 | 28 | class SlackBot: 29 | def __init__(self, slack_app: AsyncApp): 30 | self.threads_bot_is_participating_in = {} 31 | self.app = slack_app 32 | self.client = self.app.client 33 | self.id_to_name_cache = {} 34 | self.user_id_to_info_cache = {} 35 | 36 | async def start(self): 37 | logger.info("Looking up bot user_id. (If this fails, something is wrong with the auth)") 38 | response = await self.app.client.auth_test() 39 | self.bot_user_id = response["user_id"] 40 | self.bot_user_name = await self.get_username_for_user_id(self.bot_user_id) 41 | logger.info("Bot user id: "+ self.bot_user_id) 42 | logger.info("Bot user name: "+ self.bot_user_name) 43 | 44 | await AsyncSocketModeHandler(app, SLACK_APP_TOKEN).start_async() 45 | 46 | async def get_user_info_for_user_id(self, user_id): 47 | user_info = self.user_id_to_info_cache.get(user_id, None) 48 | if user_info is not None: 49 | return user_info 50 | 51 | user_info_response = await self.app.client.users_info(user=user_id) 52 | user_info = user_info_response['user'] 53 | logger.debug(user_info) 54 | self.user_id_to_info_cache[user_id] = user_info 55 | return user_info 56 | 57 | async def get_username_for_user_id(self, user_id): 58 | user_info = await self.get_user_info_for_user_id(user_id) 59 | profile = user_info['profile'] 60 | if (user_info['is_bot']): 61 | ret_val = profile['real_name'] 62 | else: 63 | ret_val = profile['display_name'] 64 | 65 | return ret_val 66 | 67 | async def upload_snippets(self, channel_id: str, thread_ts: str, response: str) -> str: 68 | matches: List[str] = re.findall(r"```(.*?)```", response, re.DOTALL) 69 | counter: int = 1 70 | for match in matches: 71 | match = match.strip() 72 | first_line: str = match.splitlines()[0] 73 | first_word: str = first_line.split()[0] 74 | extension: str = "txt" 75 | if first_word == "python": 76 | extension = "py" 77 | elif first_word in ["javascript", "typescript"]: 78 | extension = "js" 79 | elif first_word == "bash": 80 | extension = "sh" 81 | if not extension: 82 | if first_word: 83 | extension = first_word 84 | else: 85 | extension = "txt" 86 | file_response = await self.client.files_upload(channels=channel_id, content=match, filename=f"snippet_{counter}.{extension}", thread_ts=thread_ts) 87 | file_id: str = file_response["file"]["id"] 88 | response += "\n"+f"" 89 | counter += 1 90 | return response 91 | 92 | async def reply_to_slack(self, channel_id, thread_ts, message_ts, response): 93 | # In the future, we could take out any triple backticks code like: 94 | # ```python 95 | # print("Hello world!") 96 | # ``` 97 | # And we could upload it to Slack as a file and then link to it in the response. 98 | # Let's try something - if they have an emoji, and only an emoji, in the response, let's react to the message with that emoji: 99 | # regex for slack emoji to ensure that the _entire_ message only consists of a single emoji: 100 | slack_emoji_regex = r"^:[a-z0-9_+-]+:$" 101 | if re.match(slack_emoji_regex, response.strip()): 102 | try: 103 | emoji_name=response.strip().replace(":", "") 104 | logger.info("Responding with single emoji: "+emoji_name) 105 | await self.client.reactions_add(channel=channel_id, name=emoji_name, timestamp=message_ts) 106 | except Exception as e: 107 | logger.exception(e) 108 | return 109 | else: 110 | await self.client.chat_postMessage(channel=channel_id, text=response, thread_ts=thread_ts) 111 | 112 | async def confirm_message_received(self, channel, thread_ts, message_ts, user_id_of_sender): 113 | # React to the message with a thinking face emoji: 114 | try: 115 | await self.client.reactions_add(channel=channel, name="thinking_face", timestamp=message_ts) 116 | except Exception as e: 117 | logger.exception(e) 118 | 119 | async def confirm_wont_respond_to_message(self, channel, thread_ts, message_ts, user_id_of_sender): 120 | # React to the message with a speak_no_evil emoji: 121 | try: 122 | await self.client.reactions_add(channel=channel, name="speak_no_evil", timestamp=message_ts) 123 | except Exception as e: 124 | logger.exception(e) 125 | 126 | 127 | async def respond_to_message(self, channel_id, thread_ts, message_ts, user_id, text): 128 | try: 129 | conversation_ai: ConversationAI = self.threads_bot_is_participating_in.get(thread_ts, None) 130 | if conversation_ai is None: 131 | raise Exception("No AI found for thread_ts") 132 | text = await self.translate_mentions_to_names(text) 133 | sender_user_info = await self.get_user_info_for_user_id(user_id) 134 | response = await conversation_ai.respond(sender_user_info, channel_id, thread_ts, message_ts, text) 135 | if (response is None): 136 | # Let's just put an emoji on the message to say we aren't responding 137 | await self.confirm_wont_respond_to_message(channel_id, thread_ts, message_ts, user_id) 138 | # We don't respond here since the bot is streaming responses 139 | except Exception as e: 140 | response = f":exclamation::exclamation::exclamation: Error: {e}" 141 | # Print a red error to the console: 142 | logger.exception(response) 143 | await self.reply_to_slack(channel_id, thread_ts, message_ts, response) 144 | 145 | @staticmethod 146 | def is_parent_thread_message(message_ts, thread_ts): 147 | return message_ts == thread_ts 148 | 149 | async def translate_mentions_to_names(self, text): 150 | # Replace every @mention of a user id with their actual name: 151 | # First, use a regex to find @mentions that look like <@U123456789>: 152 | matches = re.findall(r"<@(U[A-Z0-9]+)>", text) 153 | for match in matches: 154 | mention_string = f"<@{match}>" 155 | mention_name = await self.get_username_for_user_id(match) 156 | if mention_name is not None: 157 | text = text.replace(mention_string, "@"+mention_name) 158 | 159 | return text 160 | 161 | async def add_ai_to_thread(self, channel_id, thread_ts, message_ts): 162 | if thread_ts in self.threads_bot_is_participating_in: 163 | return 164 | 165 | processed_history = None 166 | # Is this thread_ts the very first message in the thread? If so, we need to create a new AI for it. 167 | if not self.is_parent_thread_message(message_ts, thread_ts): 168 | logger.debug("It looks like I am not the first message in the thread. I should get the full thread history from Slack and add it to my memory.") 169 | # This is not the very first message in the thread 170 | # We should figure out a way to boostrap the memory: 171 | # Get the full thread history from Slack: 172 | thread_history = await client.conversations_replies(channel=channel_id, ts=thread_ts) 173 | # Iterate through the thread history, adding each of these to the ai_memory: 174 | processed_history = [] 175 | message_history = thread_history.data['messages'] 176 | # Get rid of the last message from the histroy since it's the message we're responding to: 177 | message_history = message_history[:-1] 178 | for message in message_history: 179 | text = message['text'] 180 | text = await self.translate_mentions_to_names(text) 181 | user_id = message['user'] 182 | user_name = await self.get_username_for_user_id(user_id) 183 | if (user_id == self.bot_user_id): 184 | processed_history.append({"bot": text}) 185 | else: 186 | # Get the username for this user_id: 187 | processed_history.append({f"{user_name}": text}) 188 | 189 | ai = ConversationAI(self.bot_user_name, self.client, processed_history) 190 | self.threads_bot_is_participating_in[thread_ts] = ai 191 | 192 | def is_ai_participating_in_thread(self, thread_ts, message_ts): 193 | if thread_ts in self.threads_bot_is_participating_in: 194 | return True 195 | return False 196 | 197 | def is_bot_mentioned(self, text): 198 | return f"<@{self.bot_user_id}>" in text 199 | 200 | async def on_message(self, event, say): 201 | message_ts = event['ts'] 202 | thread_ts = event.get('thread_ts', message_ts) 203 | try: 204 | # {'client_msg_id': '7e605650-8b39-4f61-99c5-795a1168fb7c', 'type': 'message', 'text': 'Hi there Chatterbot', 'user': 'U024LBTMX', 'ts': '1679289332.087509', 'blocks': [{'type': 'rich_text', 'block_id': 'ins/', 'elements': [{'type': 'rich_text_section', 'elements': [{'type': 'text', 'text': 'Hi there Chatterbot'}]}]}], 'team': 'T024LBTMV', 'channel': 'D04V265MYEM', 'event_ts': '1679289332.087509', 'channel_type': 'im'} 205 | 206 | logger.info(f"Received message event: {event}") 207 | # At first I thought we weren't told about our own messages, but I don't think that's true. Let's make sure we aren't hearing about our own: 208 | if event.get('user', None) == self.bot_user_id: 209 | logger.debug("Not handling message event since I sent the message.") 210 | return 211 | 212 | start_participating_if_not_already = False 213 | channel_id = event['channel'] 214 | # Is this message part of an im? 215 | channel_type = event.get('channel_type', None) 216 | if channel_type and channel_type == "im": 217 | # This is a direct message. So of course we should be participating if we are not 218 | start_participating_if_not_already = True 219 | # else if this is a message in a channel: 220 | elif self.is_bot_mentioned(event['text']): 221 | # This is a message in a channel, but it mentions us. So we should be participating if we are not 222 | start_participating_if_not_already = True 223 | 224 | if start_participating_if_not_already: 225 | await self.add_ai_to_thread(channel_id, thread_ts, message_ts) 226 | 227 | # And now, are we participating in it? 228 | if self.is_ai_participating_in_thread(thread_ts, message_ts): 229 | user_id = event['user'] 230 | text = event['text'] 231 | await self.confirm_message_received(channel_id, thread_ts, message_ts, user_id) 232 | await self.respond_to_message(channel_id, thread_ts, message_ts, user_id, text) 233 | except Exception as e: 234 | response = f":exclamation::exclamation::exclamation: Error: {e}" 235 | logger.exception(response) 236 | await say(text=response, thread_ts=thread_ts) 237 | 238 | async def on_member_joined_channel(self, event_data): 239 | # Get user ID and channel ID from event data 240 | user_id = event_data["user"] 241 | channel_id = event_data["channel"] 242 | 243 | user_info = await self.get_user_info_for_user_id(user_id) 244 | username = await self.get_username_for_user_id(user_id) 245 | profile = user_info.get("profile", {}) 246 | llm_gpt3_turbo = OpenAI(temperature=1, model_name="gpt-3.5-turbo", request_timeout=30, max_retries=5, verbose=True) 247 | 248 | # TODO: Extract into yaml file instead: 249 | welcome_message = (await llm_gpt3_turbo.agenerate([f""" 250 | You are a funny and creative slackbot {self.bot_user_name} 251 | Someone just joined a Slack channel you are a member of, and you want to welcome them creatively and in a way that will make them feel special. 252 | You are VERY EXCITED about someone joining the channel, and you want to convey that! 253 | Their username is {username}, but when you mention their username, you should say "<@{user_id}>" instead. 254 | Their title is: {profile.get("title")} 255 | Their current status: "{profile.get("status_emoji")} {profile.get("status_text")}" 256 | Write a slack message, formatted in Slack markdown, that encourages everyone to welcome them to the channel excitedly. 257 | Use emojis. Maybe write a song. Maybe a poem. 258 | 259 | Afterwards, tell the user that you look forward to "chatting" with them, and tell them that they can just mention <@{self.bot_user_id}> whenever they want to talk. 260 | """])).generations[0][0].text 261 | if welcome_message: 262 | try: 263 | # Send a welcome message to the user 264 | await self.client.chat_postMessage(channel=channel_id, text=welcome_message) 265 | except e: 266 | logger.exception("Error sending welcome message") 267 | 268 | 269 | app = AsyncApp(token=SLACK_BOT_TOKEN, signing_secret=SLACK_SIGNING_SECRET) 270 | client = app.client 271 | slack_bot = SlackBot(app) 272 | 273 | @app.event("message") 274 | async def on_message(payload, say): 275 | logger.info("Processing message...") 276 | await slack_bot.on_message(payload, say) 277 | 278 | # Define event handler for user joining a channel 279 | @app.event("member_joined_channel") 280 | async def handle_member_joined_channel(event_data): 281 | logger.info("Processing member_joined_channel event", event_data) 282 | await slack_bot.on_member_joined_channel(event_data) 283 | 284 | @app.event('reaction_added') 285 | async def on_reaction_added(payload): 286 | logger.info("Ignoring reaction_added") 287 | 288 | @app.event('reaction_removed') 289 | async def on_reaction_removed(payload): 290 | logger.info("Ignoring reaction_removed") 291 | 292 | @app.event('app_mention') 293 | async def on_app_mention(payload, say): 294 | logger.info("Ignoring app_mention in favor of handling it via the message handler...") 295 | 296 | # This will take too long - maybe at some point we can do this and save it off somewhere 297 | async def getCommonCustomEmojis(num_to_retrieve: int = 40): 298 | # Call the emoji.list API method to retrieve the list of custom emojis 299 | try: 300 | response = await client.emoji_list() 301 | emoji_list = response["emoji"] 302 | except SlackApiError as e: 303 | print("Error retrieving custom emojis: {}".format(e)) 304 | 305 | # Retrieve the usage count for each custom emoji 306 | emoji_counts = [] 307 | for emoji in emoji_list: 308 | try: 309 | response = await client.emoji_get(name=emoji) 310 | count = response["emoji"]["usage_count"] 311 | emoji_counts.append((emoji, count)) 312 | except SlackApiError as e: 313 | print("Error retrieving usage count for emoji {}: {}".format(emoji, e)) 314 | 315 | # Sort the list of custom emojis by usage count in descending order 316 | emoji_counts.sort(key=lambda x: x[1], reverse=True) 317 | 318 | # Retrieve the num_to_retrieve most commonly used custom emojis 319 | most_common_emojis = [emoji for emoji, count in emoji_counts[:num_to_retrieve]] 320 | 321 | return most_common_emojis --------------------------------------------------------------------------------