├── .github └── workflows │ ├── ruff.yml │ ├── sync_codellama.yml │ ├── sync_deepfloydif.yml │ ├── sync_falcon180b.yml │ └── sync_wuerstchen.yml ├── Makefile ├── README.md ├── codellama └── codellama.py ├── deepfloydif └── deepfloydif.py ├── falcon180b └── falcon180b.py ├── legacy ├── audioldm2 └── musicgen.py ├── pyproject.toml ├── requirements.txt └── wuerstchen └── wuerstchen.py /.github/workflows/ruff.yml: -------------------------------------------------------------------------------- 1 | name: Ruff 2 | on: [ push, pull_request ] 3 | jobs: 4 | ruff: 5 | runs-on: ubuntu-latest 6 | steps: 7 | - uses: actions/checkout@v3 8 | 9 | - name: Setup Python 10 | uses: actions/setup-python@v2 11 | with: 12 | python-version: 3.11.4 13 | 14 | - name: Install dependencies 15 | run: pip install black ruff 16 | 17 | - name: Run Black 18 | run: black . --check --diff 19 | 20 | - name: Run Ruff 21 | run: ruff check . 22 | -------------------------------------------------------------------------------- /.github/workflows/sync_codellama.yml: -------------------------------------------------------------------------------- 1 | name: Sync with Hugging Face Hub 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Sync with Hugging Face (Codellama) 13 | uses: nateraw/huggingface-sync-action@v0.0.4 14 | with: 15 | github_repo_id: huggingface/discord-bots 16 | huggingface_repo_id: huggingface-projects/codellama-bot 17 | repo_type: space 18 | space_sdk: gradio 19 | hf_token: ${{ secrets.HF_TOKEN_WRITE }} 20 | subdirectory: codellama 21 | files: | 22 | app.py: huggingface-projects/codellama-bot 23 | requirements.txt: huggingface-projects/codellama-bot 24 | -------------------------------------------------------------------------------- /.github/workflows/sync_deepfloydif.yml: -------------------------------------------------------------------------------- 1 | name: Sync with Hugging Face Hub 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Sync with Hugging Face (DeepfloydIF) 13 | uses: nateraw/huggingface-sync-action@v0.0.4 14 | with: 15 | github_repo_id: huggingface/discord-bots 16 | huggingface_repo_id: huggingface-projects/deepfloydif-bot 17 | repo_type: space 18 | space_sdk: gradio 19 | hf_token: ${{ secrets.HF_TOKEN_WRITE }} 20 | subdirectory: deepfloydif 21 | files: | 22 | app.py: huggingface-projects/deepfloydif-bot 23 | requirements.txt: huggingface-projects/deepfloydif-bot 24 | -------------------------------------------------------------------------------- /.github/workflows/sync_falcon180b.yml: -------------------------------------------------------------------------------- 1 | name: Sync with Hugging Face Hub 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Sync with Hugging Face (Falcon180b) 13 | uses: nateraw/huggingface-sync-action@v0.0.4 14 | with: 15 | github_repo_id: huggingface/discord-bots 16 | huggingface_repo_id: huggingface-projects/falcon180b-bot 17 | repo_type: space 18 | space_sdk: gradio 19 | hf_token: ${{ secrets.HF_TOKEN_WRITE }} 20 | subdirectory: falcon180b 21 | files: | 22 | app.py: huggingface-projects/falcon180b-bot 23 | requirements.txt: huggingface-projects/falcon180b-bot 24 | -------------------------------------------------------------------------------- /.github/workflows/sync_wuerstchen.yml: -------------------------------------------------------------------------------- 1 | name: Sync with Hugging Face Hub 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Sync with Hugging Face (Wuerstchen) 13 | uses: nateraw/huggingface-sync-action@v0.0.4 14 | with: 15 | github_repo_id: huggingface/discord-bots 16 | huggingface_repo_id: huggingface-projects/wuerstchen-bot 17 | repo_type: space 18 | space_sdk: gradio 19 | hf_token: ${{ secrets.HF_TOKEN_WRITE }} 20 | subdirectory: wuerstchen 21 | files: | 22 | app.py: huggingface-projects/wuerstchen-bot 23 | requirements.txt: huggingface-projects/wuerstchen-bot 24 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: quality style 2 | 3 | check_dirs := /codellama/app.py, /deepfloydif/app.py, /falcon/app.py, /wuerstchen/app.py, /legacy/app.py 4 | 5 | quality: 6 | black --check $(check_dirs) 7 | ruff $(check_dirs) 8 | style: 9 | black $(check_dirs) 10 | ruff $(check_dirs) --fix 11 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Our bots 🤖 2 | | Bot | Code Link | Invite Link | 3 | | -------- | -------- | -------- | 4 | | CodeLlama 13B | [Code](https://huggingface.co/spaces/huggingface-projects/codellama-bot) | [Invite Bot](https://discord.com/api/oauth2/authorize?client_id=1152238037355474964&permissions=309237647360&scope=bot) | 5 | | DeepFloydIF | [Code](https://huggingface.co/spaces/huggingface-projects/deepfloydif-bot) | [Invite Bot](https://discord.com/api/oauth2/authorize?client_id=1154395078735953930&permissions=51200&scope=bot) | 6 | | Falcon 180B | [Code](https://huggingface.co/spaces/huggingface-projects/falcon180b-bot) | [Invite Bot](https://discord.com/api/oauth2/authorize?client_id=1155169841276260546&permissions=326417516544&scope=bot) | 7 | | Wuerstchen | [Code](https://huggingface.co/spaces/huggingface-projects/wuerstchen-bot) | [Invite Bot](https://discord.com/api/oauth2/authorize?client_id=1155489509518098565&permissions=51200&scope=bot) | 8 | | AudioLDM 2 | [Code](https://huggingface.co/spaces/huggingface-projects/AudioLDM2-bot)| - | 9 | | MusicGen | [Code](https://huggingface.co/spaces/huggingface-projects/MusicGen-bot) | - | 10 | 11 | 12 | # TLDR: How do our bots work ❓ 13 | 14 | - We run the bots inside a free-tier [Space](https://huggingface.co/new-space?sdk=gradio), which acts as a server. 15 | - We use Gradio apps as APIs to use them in our bots 16 | 17 | ### Building blocks of a Discord Bot 🤖 18 | 19 | 1. Create an [application](https://discord.com/developers/applications) 20 | 2. Create a Hugging Face [Space](https://huggingface.co/new-space?sdk=gradio) 21 | 3. Add [commands](https://huggingface.co/spaces/huggingface-projects/huggingbots/blob/main/app.py) 22 | 23 | After that, we'll have a working discord bot. So how do we spice it up with machine learning? 24 | 25 | ### Using ML demos in your bot 🧠 26 | - Almost any [Gradio](https://github.com/gradio-app/gradio/tree/main/client/python) app can be [used as an API](https://www.gradio.app/guides/sharing-your-app#api-page)! This means we can query most Spaces on the Hugging Face Hub and use them in our discord bots. 27 | 28 | ![image](https://github.com/lunarflu/fork-discord-bots/assets/70143200/97316c28-7c99-42c0-ab6a-687819d678f8) 29 | 30 | 31 | Here's an extremely simplified example 💻: 32 | 33 | ```python 34 | from gradio_client import Client 35 | 36 | musicgen = Client("huggingface-projects/transformers-musicgen", hf_token=os.getenv("HF_TOKEN")) 37 | 38 | # call this function when we use a command + prompt 39 | async def music_create(ctx, prompt): 40 | # run_in_executor for the blocking function 41 | loop = asyncio.get_running_loop() 42 | job = await loop.run_in_executor(None, music_create_job, prompt) 43 | 44 | # extract what we want from the outputs 45 | video = job.outputs()[0][0] 46 | 47 | # send what we want to discord 48 | await thread.send(video_file) 49 | 50 | # submit as a Gradio job; this makes retrieving outputs simpler 51 | def music_create_job(prompt): 52 | # pass prompt and other parameters if necessary 53 | job = musicgen.submit(prompt, api_name="/predict") 54 | return job 55 | 56 | ``` 57 | In summary, we: 58 | 1. Use a command and specify a prompt ("piano music", for example) 59 | 2. Query a specific Gradio Space as an API, and send it our prompt 60 | 3. Retrieve the results once done and post them to discord 61 | 62 | 🎉 And voila! 🎉 63 | 64 | For further explorations (depending on your needs), we can recommend checking these out 🧐: 65 | - Events in discord bots (to automate some behavior) 66 | - Handling concurrency (important if you're making many concurrent requests at once) 67 | - UI (discord buttons, interactive fields) (can add a lot of functionality) 68 | -------------------------------------------------------------------------------- /codellama/codellama.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import json 3 | import os 4 | import threading 5 | from threading import Event 6 | 7 | import discord 8 | import gradio as gr 9 | from discord.ext import commands 10 | from gradio_client import Client 11 | 12 | event = Event() 13 | DISCORD_TOKEN = os.getenv("DISCORD_TOKEN") 14 | HF_TOKEN = os.getenv("HF_TOKEN") 15 | codellama_client = Client("https://huggingface-projects-codellama-13b-chat.hf.space/", HF_TOKEN) 16 | codellama_threadid_userid_dictionary = {} 17 | codellama_threadid_conversation = {} 18 | intents = discord.Intents.all() 19 | bot = commands.Bot(command_prefix="/", intents=intents) 20 | 21 | 22 | @bot.event 23 | async def on_ready(): 24 | print(f"Logged in as {bot.user} (ID: {bot.user.id})") 25 | synced = await bot.tree.sync() 26 | print(f"Synced commands: {', '.join([s.name for s in synced])}.") 27 | event.set() 28 | print("------") 29 | 30 | 31 | @bot.hybrid_command( 32 | name="codellama", 33 | description="Enter a prompt to generate code!", 34 | ) 35 | async def codellama(ctx, prompt: str): 36 | """Codellama generation""" 37 | try: 38 | await try_codellama(ctx, prompt) 39 | except Exception as e: 40 | print(f"Error: {e}") 41 | 42 | 43 | @bot.event 44 | async def on_message(message): 45 | """Checks channel and continues codellama conversation if it's the right Discord Thread""" 46 | try: 47 | if not message.author.bot: 48 | await continue_codellama(message) 49 | except Exception as e: 50 | print(f"Error: {e}") 51 | 52 | 53 | async def try_codellama(ctx, prompt): 54 | """Generates code based on a given prompt""" 55 | try: 56 | if ctx.guild.id == 879548962464493619: 57 | if ctx.channel.id != 1147210106321256508: 58 | return 59 | global codellama_threadid_userid_dictionary 60 | global codellama_threadid_conversation 61 | 62 | message = await ctx.send(f"**{prompt}** - {ctx.author.mention}") 63 | thread = await message.create_thread(name=prompt[:100]) 64 | 65 | loop = asyncio.get_running_loop() 66 | output_code = await loop.run_in_executor(None, codellama_initial_generation, prompt, thread) 67 | codellama_threadid_userid_dictionary[thread.id] = ctx.author.id 68 | await thread.send(output_code) 69 | except Exception as e: 70 | print(f"Error: {e}") 71 | 72 | 73 | def codellama_initial_generation(prompt, thread): 74 | """Job.submit inside of run_in_executor = more consistent bot behavior""" 75 | global codellama_threadid_conversation 76 | 77 | chat_history = f"{thread.id}.json" 78 | conversation = [] 79 | with open(chat_history, "w") as json_file: 80 | json.dump(conversation, json_file) 81 | 82 | job = codellama_client.submit(prompt, chat_history, fn_index=0) 83 | 84 | while job.done() is False: 85 | pass 86 | else: 87 | result = job.outputs()[-1] 88 | with open(result, "r") as json_file: 89 | data = json.load(json_file) 90 | response = data[-1][-1] 91 | conversation.append((prompt, response)) 92 | with open(chat_history, "w") as json_file: 93 | json.dump(conversation, json_file) 94 | 95 | codellama_threadid_conversation[thread.id] = chat_history 96 | if len(response) > 1300: 97 | response = response[:1300] + "...\nTruncating response due to discord api limits." 98 | return response 99 | 100 | 101 | async def continue_codellama(message): 102 | """Continues a given conversation based on chat_history""" 103 | try: 104 | if not message.author.bot: 105 | global codellama_threadid_userid_dictionary # tracks userid-thread existence 106 | if message.channel.id in codellama_threadid_userid_dictionary: # is this a valid thread? 107 | if codellama_threadid_userid_dictionary[message.channel.id] == message.author.id: 108 | global codellama_threadid_conversation 109 | 110 | prompt = message.content 111 | chat_history = codellama_threadid_conversation[message.channel.id] 112 | 113 | # Check to see if conversation is ongoing or ended (>15000 characters) 114 | with open(chat_history, "r") as json_file: 115 | conversation = json.load(json_file) 116 | total_characters = 0 117 | for item in conversation: 118 | for string in item: 119 | total_characters += len(string) 120 | 121 | if total_characters < 15000: 122 | job = codellama_client.submit(prompt, chat_history, fn_index=0) 123 | while job.done() is False: 124 | pass 125 | else: 126 | result = job.outputs()[-1] 127 | with open(result, "r") as json_file: 128 | data = json.load(json_file) 129 | response = data[-1][-1] 130 | with open(chat_history, "r") as json_file: 131 | conversation = json.load(json_file) 132 | conversation.append((prompt, response)) 133 | with open(chat_history, "w") as json_file: 134 | json.dump(conversation, json_file) 135 | codellama_threadid_conversation[message.channel.id] = chat_history 136 | 137 | if len(response) > 1300: 138 | response = response[:1300] + "...\nTruncating response due to discord api limits." 139 | 140 | await message.reply(response) 141 | 142 | total_characters = 0 143 | for item in conversation: 144 | for string in item: 145 | total_characters += len(string) 146 | 147 | if total_characters >= 15000: 148 | await message.reply("Conversation ending due to length, feel free to start a new one!") 149 | 150 | except Exception as e: 151 | print(f"Error: {e}") 152 | 153 | 154 | def run_bot(): 155 | if not DISCORD_TOKEN: 156 | print("DISCORD_TOKEN NOT SET") 157 | event.set() 158 | else: 159 | bot.run(DISCORD_TOKEN) 160 | 161 | 162 | threading.Thread(target=run_bot).start() 163 | event.wait() 164 | 165 | welcome_message = """ 166 | ## Add this bot to your server by clicking this link: 167 | 168 | https://discord.com/api/oauth2/authorize?client_id=1152238037355474964&permissions=309237647360&scope=bot 169 | 170 | ## How to use it? 171 | 172 | The bot can be triggered via `/codellama` followed by your text prompt. 173 | 174 | This will generate text based on the text prompt and create a thread for the discussion. 175 | 176 | To continue the conversation, simply ask additional questions in the thread - no need for repeating the command! 177 | 178 | ⚠️ Note ⚠️: Please make sure this bot's command does have the same name as another command in your server. 179 | 180 | ⚠️ Note ⚠️: Bot commands do not work in DMs with the bot as of now. 181 | """ 182 | 183 | 184 | with gr.Blocks() as demo: 185 | gr.Markdown(f""" 186 | # Discord bot of https://huggingface.co/spaces/codellama/codellama-13b-chat 187 | {welcome_message} 188 | """) 189 | 190 | demo.launch() 191 | -------------------------------------------------------------------------------- /deepfloydif/deepfloydif.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import glob 3 | import os 4 | import pathlib 5 | import random 6 | import threading 7 | 8 | import gradio as gr 9 | import discord 10 | from gradio_client import Client 11 | from PIL import Image 12 | from discord.ext import commands 13 | from discord.ui import Button, View 14 | 15 | HF_TOKEN = os.getenv("HF_TOKEN") 16 | deepfloydif_client = Client("huggingface-projects/IF", HF_TOKEN) 17 | DISCORD_TOKEN = os.getenv("DISCORD_TOKEN") 18 | intents = discord.Intents.all() 19 | bot = commands.Bot(command_prefix="/", intents=intents) 20 | 21 | 22 | @bot.event 23 | async def on_ready(): 24 | print(f"Logged in as {bot.user} (ID: {bot.user.id})") 25 | synced = await bot.tree.sync() 26 | print(f"Synced commands: {', '.join([s.name for s in synced])}.") 27 | print("------") 28 | 29 | 30 | @bot.hybrid_command( 31 | name="deepfloydif", 32 | description="Enter a prompt to generate an image! Can generate realistic text, too!", 33 | ) 34 | async def deepfloydif(ctx, prompt: str): 35 | """DeepfloydIF stage 1 generation""" 36 | try: 37 | await deepfloydif_generate64(ctx, prompt) 38 | except Exception as e: 39 | print(f"Error: {e}") 40 | 41 | 42 | def deepfloydif_generate64_inference(prompt): 43 | """Generates four images based on a prompt""" 44 | negative_prompt = "" 45 | seed = random.randint(0, 1000) 46 | number_of_images = 4 47 | guidance_scale = 7 48 | custom_timesteps_1 = "smart50" 49 | number_of_inference_steps = 50 50 | ( 51 | stage_1_images, 52 | stage_1_param_path, 53 | path_for_upscale256_upscaling, 54 | ) = deepfloydif_client.predict( 55 | prompt, 56 | negative_prompt, 57 | seed, 58 | number_of_images, 59 | guidance_scale, 60 | custom_timesteps_1, 61 | number_of_inference_steps, 62 | api_name="/generate64", 63 | ) 64 | return [stage_1_images, stage_1_param_path, path_for_upscale256_upscaling] 65 | 66 | 67 | def deepfloydif_upscale256_inference(index, path_for_upscale256_upscaling): 68 | """Upscales one of the images from deepfloydif_generate64_inference based on the chosen index""" 69 | selected_index_for_upscale256 = index 70 | seed_2 = 0 71 | guidance_scale_2 = 4 72 | custom_timesteps_2 = "smart50" 73 | number_of_inference_steps_2 = 50 74 | result_path = deepfloydif_client.predict( 75 | path_for_upscale256_upscaling, 76 | selected_index_for_upscale256, 77 | seed_2, 78 | guidance_scale_2, 79 | custom_timesteps_2, 80 | number_of_inference_steps_2, 81 | api_name="/upscale256", 82 | ) 83 | return result_path 84 | 85 | 86 | def deepfloydif_upscale1024_inference(index, path_for_upscale256_upscaling, prompt): 87 | """Upscales to stage 2, then stage 3""" 88 | selected_index_for_upscale256 = index 89 | seed_2 = 0 # default seed for stage 2 256 upscaling 90 | guidance_scale_2 = 4 # default for stage 2 91 | custom_timesteps_2 = "smart50" # default for stage 2 92 | number_of_inference_steps_2 = 50 # default for stage 2 93 | negative_prompt = "" # empty (not used, could add in the future) 94 | 95 | seed_3 = 0 # default for stage 3 1024 upscaling 96 | guidance_scale_3 = 9 # default for stage 3 97 | number_of_inference_steps_3 = 40 # default for stage 3 98 | 99 | result_path = deepfloydif_client.predict( 100 | path_for_upscale256_upscaling, 101 | selected_index_for_upscale256, 102 | seed_2, 103 | guidance_scale_2, 104 | custom_timesteps_2, 105 | number_of_inference_steps_2, 106 | prompt, 107 | negative_prompt, 108 | seed_3, 109 | guidance_scale_3, 110 | number_of_inference_steps_3, 111 | api_name="/upscale1024", 112 | ) 113 | return result_path 114 | 115 | 116 | def load_image(png_files, stage_1_images): 117 | """Opens images as variables so we can combine them later""" 118 | results = [] 119 | for file in png_files: 120 | png_path = os.path.join(stage_1_images, file) 121 | results.append(Image.open(png_path)) 122 | return results 123 | 124 | 125 | def combine_images(png_files, stage_1_images, partial_path): 126 | if os.environ.get("TEST_ENV") == "True": 127 | print("Combining images for deepfloydif_generate64") 128 | images = load_image(png_files, stage_1_images) 129 | combined_image = Image.new("RGB", (images[0].width * 2, images[0].height * 2)) 130 | combined_image.paste(images[0], (0, 0)) 131 | combined_image.paste(images[1], (images[0].width, 0)) 132 | combined_image.paste(images[2], (0, images[0].height)) 133 | combined_image.paste(images[3], (images[0].width, images[0].height)) 134 | combined_image_path = os.path.join(stage_1_images, f"{partial_path}.png") 135 | combined_image.save(combined_image_path) 136 | return combined_image_path 137 | 138 | 139 | async def deepfloydif_generate64(ctx, prompt): 140 | """DeepfloydIF command (generate images with realistic text using slash commands)""" 141 | try: 142 | if ctx.guild.id == 879548962464493619: 143 | if ctx.channel.id != 1119313215675973714: 144 | return 145 | channel = ctx.channel 146 | # interaction.response message can't be used to create a thread, so we create another message 147 | message = await ctx.send(f"**{prompt}** - {ctx.author.mention} (generating...)") 148 | 149 | loop = asyncio.get_running_loop() 150 | result = await loop.run_in_executor(None, deepfloydif_generate64_inference, prompt) 151 | stage_1_images = result[0] 152 | path_for_upscale256_upscaling = result[2] 153 | 154 | partial_path = pathlib.Path(path_for_upscale256_upscaling).name 155 | png_files = list(glob.glob(f"{stage_1_images}/**/*.png")) 156 | 157 | if png_files: 158 | await message.delete() 159 | combined_image_path = combine_images(png_files, stage_1_images, partial_path) 160 | if os.environ.get("TEST_ENV") == "True": 161 | print("Images combined for deepfloydif_generate64") 162 | 163 | with Image.open(combined_image_path) as img: 164 | width, height = img.size 165 | new_width = width * 3 166 | new_height = height * 3 167 | resized_img = img.resize((new_width, new_height)) 168 | x2_combined_image_path = combined_image_path 169 | resized_img.save(x2_combined_image_path) 170 | 171 | # making image bigger, more readable 172 | with open(x2_combined_image_path, "rb") as f: # was combined_image_path 173 | button1 = Button(custom_id="0", emoji="↖") 174 | button2 = Button(custom_id="1", emoji="↗") 175 | button3 = Button(custom_id="2", emoji="↙") 176 | button4 = Button(custom_id="3", emoji="↘") 177 | 178 | async def button_callback(interaction): 179 | index = int(interaction.data["custom_id"]) # 0,1,2,3 180 | 181 | await interaction.response.send_message( 182 | f"{interaction.user.mention} (upscaling...)", ephemeral=True 183 | ) 184 | result_path = await deepfloydif_upscale256(index, path_for_upscale256_upscaling) 185 | 186 | # create and use upscale 1024 button 187 | with open(result_path, "rb") as f: 188 | upscale1024 = Button(label="High-quality upscale (x4)", custom_id=str(index)) 189 | upscale1024.callback = upscale1024_callback 190 | view = View(timeout=None) 191 | view.add_item(upscale1024) 192 | 193 | await interaction.delete_original_response() 194 | await channel.send( 195 | content=( 196 | f"{interaction.user.mention} Here is the upscaled image! Click the button" 197 | " to upscale even more!" 198 | ), 199 | file=discord.File(f, f"{prompt}.png"), 200 | view=view, 201 | ) 202 | 203 | async def upscale1024_callback(interaction): 204 | index = int(interaction.data["custom_id"]) 205 | 206 | await interaction.response.send_message( 207 | f"{interaction.user.mention} (upscaling...)", ephemeral=True 208 | ) 209 | result_path = await deepfloydif_upscale1024(index, path_for_upscale256_upscaling, prompt) 210 | 211 | with open(result_path, "rb") as f: 212 | await interaction.delete_original_response() 213 | await channel.send( 214 | content=f"{interaction.user.mention} Here's your high-quality x16 image!", 215 | file=discord.File(f, f"{prompt}.png"), 216 | ) 217 | 218 | button1.callback = button_callback 219 | button2.callback = button_callback 220 | button3.callback = button_callback 221 | button4.callback = button_callback 222 | 223 | view = View(timeout=None) 224 | view.add_item(button1) 225 | view.add_item(button2) 226 | view.add_item(button3) 227 | view.add_item(button4) 228 | 229 | # could store this message as combined_image_dfif in case it's useful for future testing 230 | await channel.send( 231 | f"**{prompt}** - {ctx.author.mention} Click a button to upscale! (make larger + enhance quality)", 232 | file=discord.File(f, f"{partial_path}.png"), 233 | view=view, 234 | ) 235 | else: 236 | await ctx.send(f"{ctx.author.mention} No PNG files were found, cannot post them!") 237 | 238 | except Exception as e: 239 | print(f"Error: {e}") 240 | 241 | 242 | async def deepfloydif_upscale256(index: int, path_for_upscale256_upscaling): 243 | """upscaling function for images generated using /deepfloydif""" 244 | try: 245 | loop = asyncio.get_running_loop() 246 | result_path = await loop.run_in_executor( 247 | None, deepfloydif_upscale256_inference, index, path_for_upscale256_upscaling 248 | ) 249 | return result_path 250 | 251 | except Exception as e: 252 | print(f"Error: {e}") 253 | 254 | 255 | async def deepfloydif_upscale1024(index: int, path_for_upscale256_upscaling, prompt): 256 | """upscaling function for images generated using /deepfloydif""" 257 | try: 258 | loop = asyncio.get_running_loop() 259 | result_path = await loop.run_in_executor( 260 | None, deepfloydif_upscale1024_inference, index, path_for_upscale256_upscaling, prompt 261 | ) 262 | return result_path 263 | 264 | except Exception as e: 265 | print(f"Error: {e}") 266 | 267 | 268 | def run_bot(): 269 | bot.run(DISCORD_TOKEN) 270 | 271 | 272 | threading.Thread(target=run_bot).start() 273 | 274 | 275 | welcome_message = """ 276 | ## Add this bot to your server by clicking this link: 277 | 278 | https://discord.com/api/oauth2/authorize?client_id=1154395078735953930&permissions=51200&scope=bot 279 | 280 | ## How to use it? 281 | 282 | The bot can be triggered via `/deepfloydif` followed by your text prompt. 283 | 284 | This will generate images based on the text prompt. You can upscale the images using the buttons up to 16x! 285 | 286 | ⚠️ Note ⚠️: Please make sure this bot's command does have the same name as another command in your server. 287 | 288 | ⚠️ Note ⚠️: Bot commands do not work in DMs with the bot as of now. 289 | """ 290 | 291 | 292 | with gr.Blocks() as demo: 293 | gr.Markdown(f""" 294 | # Discord bot of https://huggingface.co/spaces/DeepFloyd/IF 295 | {welcome_message} 296 | """) 297 | 298 | 299 | demo.queue(concurrency_count=100) 300 | demo.queue(max_size=100) 301 | demo.launch() 302 | -------------------------------------------------------------------------------- /falcon180b/falcon180b.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | import threading 4 | from threading import Event 5 | from typing import Optional 6 | 7 | import discord 8 | import gradio as gr 9 | from discord.ext import commands 10 | import gradio_client as grc 11 | from gradio_client.utils import QueueError 12 | 13 | event = Event() 14 | 15 | DISCORD_TOKEN = os.getenv("DISCORD_TOKEN") 16 | 17 | 18 | async def wait(job): 19 | while not job.done(): 20 | await asyncio.sleep(0.2) 21 | 22 | 23 | def get_client(session: Optional[str] = None) -> grc.Client: 24 | client = grc.Client("https://tiiuae-falcon-180b-demo.hf.space", hf_token=os.getenv("HF_TOKEN")) 25 | if session: 26 | client.session_hash = session 27 | return client 28 | 29 | 30 | def truncate_response(response: str) -> str: 31 | ending = "...\nTruncating response to 2000 characters due to discord api limits." 32 | if len(response) > 2000: 33 | return response[: 2000 - len(ending)] + ending 34 | else: 35 | return response 36 | 37 | 38 | intents = discord.Intents.default() 39 | intents.message_content = True 40 | bot = commands.Bot(command_prefix="/", intents=intents) 41 | 42 | 43 | @bot.event 44 | async def on_ready(): 45 | print(f"Logged in as {bot.user} (ID: {bot.user.id})") 46 | synced = await bot.tree.sync() 47 | print(f"Synced commands: {', '.join([s.name for s in synced])}.") 48 | event.set() 49 | print("------") 50 | 51 | 52 | thread_to_client = {} 53 | thread_to_user = {} 54 | 55 | 56 | @bot.hybrid_command( 57 | name="falcon180", 58 | description="Enter some text to chat with the bot! Like this: /falcon180 Hello, how are you?", 59 | ) 60 | async def chat(ctx, prompt: str): 61 | if ctx.author.id == bot.user.id: 62 | return 63 | try: 64 | if ctx.guild.id == 879548962464493619: 65 | if ctx.channel.id != 1119313248056004729: 66 | return 67 | message = await ctx.send("Creating thread...") 68 | 69 | thread = await message.create_thread(name=prompt[:100]) 70 | loop = asyncio.get_running_loop() 71 | client = await loop.run_in_executor(None, get_client, None) 72 | job = client.submit(prompt, "", 0.9, 256, 0.95, 1.0, api_name="/chat") 73 | await wait(job) 74 | 75 | try: 76 | job.result() 77 | response = job.outputs()[-1] 78 | await thread.send(truncate_response(response)) 79 | thread_to_client[thread.id] = client 80 | thread_to_user[thread.id] = ctx.author.id 81 | except QueueError: 82 | await thread.send("The gradio space powering this bot is really busy! Please try again later!") 83 | 84 | except Exception as e: 85 | print(f"{e}") 86 | 87 | 88 | async def continue_chat(message): 89 | """Continues a given conversation based on chathistory""" 90 | try: 91 | client = thread_to_client[message.channel.id] 92 | prompt = message.content 93 | job = client.submit(prompt, "", 0.9, 256, 0.95, 1.0, api_name="/chat") 94 | await wait(job) 95 | try: 96 | job.result() 97 | response = job.outputs()[-1] 98 | await message.reply(truncate_response(response)) 99 | except QueueError: 100 | await message.reply("The gradio space powering this bot is really busy! Please try again later!") 101 | 102 | except Exception as e: 103 | print(f"Error: {e}") 104 | 105 | 106 | @bot.event 107 | async def on_message(message): 108 | """Continue the chat""" 109 | try: 110 | if not message.author.bot: 111 | if message.channel.id in thread_to_user: 112 | if thread_to_user[message.channel.id] == message.author.id: 113 | await continue_chat(message) 114 | else: 115 | await bot.process_commands(message) 116 | 117 | except Exception as e: 118 | print(f"Error: {e}") 119 | 120 | 121 | # running in thread 122 | def run_bot(): 123 | if not DISCORD_TOKEN: 124 | print("DISCORD_TOKEN NOT SET") 125 | event.set() 126 | else: 127 | bot.run(DISCORD_TOKEN) 128 | 129 | 130 | threading.Thread(target=run_bot).start() 131 | 132 | event.wait() 133 | 134 | 135 | welcome_message = """ 136 | ## Add this bot to your server by clicking this link: 137 | 138 | https://discord.com/api/oauth2/authorize?client_id=1155169841276260546&permissions=326417516544&scope=bot 139 | 140 | ## How to use it? 141 | 142 | The bot can be triggered via `/falcon180` followed by your text prompt. 143 | 144 | This will create a thread with the bot's response to your text prompt. 145 | You can reply in the thread (without `/falcon180`) to continue the conversation. 146 | In the thread, the bot will only reply to the original author of the command. 147 | 148 | ⚠️ Note ⚠️: Please make sure this bot's command does have the same name as another command in your server. 149 | 150 | ⚠️ Note ⚠️: Bot commands do not work in DMs with the bot as of now. 151 | """ 152 | 153 | 154 | with gr.Blocks() as demo: 155 | gr.Markdown(f""" 156 | # Discord bot of https://tiiuae-falcon-180b-demo.hf.space 157 | {welcome_message} 158 | """) 159 | 160 | demo.launch() 161 | -------------------------------------------------------------------------------- /legacy/audioldm2: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | 4 | import discord 5 | from gradio_client import Client 6 | from gradio_client.utils import QueueError 7 | 8 | BOT_USER_ID = 1102236653545861151 # real 9 | MUSIC_CHANNEL_ID = 1143183148881035365 # real 10 | 11 | 12 | HF_TOKEN = os.getenv("HF_TOKEN") 13 | audioldm2 = Client("huggingface-projects/audioldm2-text2audio-text2music", HF_TOKEN) 14 | 15 | 16 | def audioldm2_create_job(prompt): 17 | """Generates a sound or music based on a given prompt""" 18 | try: 19 | job = audioldm2.submit(prompt, api_name="/text2audio") 20 | while not job.done(): 21 | pass 22 | return job 23 | 24 | except Exception as e: 25 | print(f"audioldm2_create_job Error: {e}") 26 | 27 | 28 | async def audioldm2_create(ctx, prompt): 29 | """Runs audioldm2_create_job in executor""" 30 | try: 31 | if ctx.author.id != BOT_USER_ID: 32 | if ctx.channel.id == MUSIC_CHANNEL_ID: 33 | if os.environ.get("TEST_ENV") == "True": 34 | print("Safetychecks passed for audioldm2_create") 35 | 36 | message = await ctx.send(f"**{prompt}** - {ctx.author.mention}") 37 | if len(prompt) > 99: 38 | small_prompt = prompt[:99] 39 | else: 40 | small_prompt = prompt 41 | thread = await message.create_thread(name=small_prompt, auto_archive_duration=60) 42 | 43 | if os.environ.get("TEST_ENV") == "True": 44 | print("Running audioldm2_create_job...") 45 | 46 | loop = asyncio.get_running_loop() 47 | job = await loop.run_in_executor(None, audioldm2_create_job, prompt) 48 | 49 | try: 50 | job.result() 51 | video = job.outputs()[0] 52 | except QueueError: 53 | await thread.send("The gradio space powering this bot is really busy! Please try again later!") 54 | 55 | short_filename = prompt[:20] 56 | video_filename = f"{short_filename}.mp4" 57 | 58 | with open(video, "rb") as file: 59 | discord_video_file = discord.File(file, filename=video_filename) 60 | await thread.send(file=discord_video_file) 61 | 62 | except Exception as e: 63 | print(f"audioldm2_create Error: {e}") 64 | -------------------------------------------------------------------------------- /legacy/musicgen.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | import threading 4 | import random 5 | from threading import Event 6 | from typing import Optional 7 | 8 | import discord 9 | import gradio as gr 10 | from discord.ext import commands 11 | 12 | import gradio_client as grc 13 | from gradio_client.utils import QueueError 14 | 15 | event = Event() 16 | DISCORD_TOKEN = os.getenv("DISCORD_TOKEN") 17 | 18 | 19 | async def wait(job): 20 | while not job.done(): 21 | await asyncio.sleep(0.2) 22 | 23 | 24 | def get_client(session: Optional[str] = None) -> grc.Client: 25 | client = grc.Client("huggingface-projects/transformers-musicgen", hf_token=os.getenv("HF_TOKEN")) 26 | if session: 27 | client.session_hash = session 28 | return client 29 | 30 | 31 | intents = discord.Intents.default() 32 | intents.message_content = True 33 | bot = commands.Bot(command_prefix="/", intents=intents) 34 | 35 | 36 | @bot.event 37 | async def on_ready(): 38 | print(f"Logged in as {bot.user} (ID: {bot.user.id})") 39 | synced = await bot.tree.sync() 40 | print(f"Synced commands: {', '.join([s.name for s in synced])}.") 41 | event.set() 42 | print("------") 43 | 44 | 45 | @bot.hybrid_command( 46 | name="musicgen", 47 | description="Enter a prompt to generate music!", 48 | ) 49 | async def musicgen_command(ctx, prompt: str, seed: int = None): 50 | """Generates music based on a prompt""" 51 | if ctx.author.id == bot.user.id: 52 | return 53 | if seed is None: 54 | seed = random.randint(1, 10000) 55 | try: 56 | await music_create(ctx, prompt, seed) 57 | except Exception as e: 58 | print(f"Error: {e}") 59 | 60 | 61 | async def music_create(ctx, prompt, seed): 62 | """Runs music_create_job in executor""" 63 | try: 64 | message = await ctx.send(f"**{prompt}** - {ctx.author.mention} Generating...") 65 | thread = await message.create_thread(name=prompt[:100]) 66 | 67 | loop = asyncio.get_running_loop() 68 | client = await loop.run_in_executor(None, get_client, None) 69 | job = client.submit(prompt, seed, api_name="/predict") 70 | await wait(job) 71 | 72 | try: 73 | job.result() 74 | files = job.outputs() 75 | media_files = files[0] 76 | audio = media_files[0] 77 | video = media_files[1] 78 | short_filename = prompt[:20] 79 | audio_filename = f"{short_filename}.mp3" 80 | video_filename = f"{short_filename}.mp4" 81 | 82 | with open(video, "rb") as file: 83 | discord_video_file = discord.File(file, filename=video_filename) 84 | await thread.send(file=discord_video_file) 85 | 86 | with open(audio, "rb") as file: 87 | discord_audio_file = discord.File(file, filename=audio_filename) 88 | await thread.send(file=discord_audio_file) 89 | 90 | except QueueError: 91 | await ctx.send("The gradio space powering this bot is really busy! Please try again later!") 92 | 93 | except Exception as e: 94 | print(f"music_create Error: {e}") 95 | 96 | 97 | def run_bot(): 98 | if not DISCORD_TOKEN: 99 | print("DISCORD_TOKEN NOT SET") 100 | event.set() 101 | else: 102 | bot.run(DISCORD_TOKEN) 103 | 104 | 105 | threading.Thread(target=run_bot).start() 106 | 107 | event.wait() 108 | 109 | welcome_message = """ 110 | ## Add this bot to your server by clicking this link: 111 | 112 | https://discord.com/api/oauth2/authorize?client_id=1150383223021506620&permissions=326417565696&scope=bot 113 | 114 | ## How to use it? 115 | 116 | The bot can be triggered via `/musicgen` followed by your text prompt. 117 | 118 | This will generate music based on your text prompt! 119 | 120 | ⚠️ Note ⚠️: Please make sure this bot's command does have the same name as another command in your server. 121 | 122 | ⚠️ Note ⚠️: Bot commands do not work in DMs with the bot as of now. 123 | """ 124 | 125 | 126 | with gr.Blocks() as demo: 127 | gr.Markdown(f""" 128 | # Discord bot of https://huggingface.co/spaces/facebook/MusicGen 129 | {welcome_message} 130 | """) 131 | 132 | demo.launch() 133 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.black] 2 | line-length = 119 3 | target_version = ['py37', 'py38', 'py39', 'py310'] 4 | preview = true 5 | 6 | [tool.ruff] 7 | line-length = 119 8 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | discord.py 2 | gradio 3 | -------------------------------------------------------------------------------- /wuerstchen/wuerstchen.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import glob 3 | import os 4 | import random 5 | import threading 6 | from discord.ext import commands 7 | import discord 8 | import gradio as gr 9 | from gradio_client import Client 10 | 11 | 12 | HF_TOKEN = os.getenv("HF_TOKEN") 13 | wuerstchen_client = Client("huggingface-projects/Wuerstchen-duplicate", HF_TOKEN) 14 | DISCORD_TOKEN = os.getenv("DISCORD_TOKEN") 15 | 16 | 17 | intents = discord.Intents.all() 18 | bot = commands.Bot(command_prefix="/", intents=intents) 19 | 20 | 21 | @bot.event 22 | async def on_ready(): 23 | print(f"Logged in as {bot.user} (ID: {bot.user.id})") 24 | synced = await bot.tree.sync() 25 | print(f"Synced commands: {', '.join([s.name for s in synced])}.") 26 | print("------") 27 | 28 | 29 | @bot.hybrid_command( 30 | name="wuerstchen", 31 | description="Enter a prompt to generate art!", 32 | ) 33 | async def wuerstchen_command(ctx, prompt: str): 34 | """Wuerstchen generation""" 35 | try: 36 | await run_wuerstchen(ctx, prompt) 37 | except Exception as e: 38 | print(f"Error wuerstchen: (app.py){e}") 39 | 40 | 41 | def wuerstchen_inference(prompt): 42 | """Inference for Wuerstchen""" 43 | negative_prompt = "" 44 | seed = random.randint(0, 1000) 45 | width = 1024 46 | height = 1024 47 | prior_num_inference_steps = 60 48 | prior_guidance_scale = 4 49 | decoder_num_inference_steps = 12 50 | decoder_guidance_scale = 0 51 | num_images_per_prompt = 1 52 | 53 | result_path = wuerstchen_client.predict( 54 | prompt, 55 | negative_prompt, 56 | seed, 57 | width, 58 | height, 59 | prior_num_inference_steps, 60 | prior_guidance_scale, 61 | decoder_num_inference_steps, 62 | decoder_guidance_scale, 63 | num_images_per_prompt, 64 | api_name="/run", 65 | ) 66 | png_file = list(glob.glob(f"{result_path}/**/*.png")) 67 | return png_file[0] 68 | 69 | 70 | async def run_wuerstchen(ctx, prompt): 71 | """Responds to /Wuerstchen command""" 72 | try: 73 | if ctx.guild.id == 879548962464493619: 74 | if ctx.channel.id != 1151792944676864041: 75 | return 76 | message = await ctx.send(f"**{prompt}** - {ctx.author.mention} (generating...)") 77 | 78 | loop = asyncio.get_running_loop() 79 | result_path = await loop.run_in_executor(None, wuerstchen_inference, prompt) 80 | 81 | await message.delete() 82 | with open(result_path, "rb") as f: 83 | await ctx.channel.send(f"**{prompt}** - {ctx.author.mention}", file=discord.File(f, "wuerstchen.png")) 84 | except Exception as e: 85 | print(f"Error: {e}") 86 | 87 | 88 | def run_bot(): 89 | bot.run(DISCORD_TOKEN) 90 | 91 | 92 | threading.Thread(target=run_bot).start() 93 | """This allows us to run the Discord bot in a Python thread""" 94 | 95 | 96 | welcome_message = """ 97 | ## Add this bot to your server by clicking this link: 98 | 99 | https://discord.com/api/oauth2/authorize?client_id=1155489509518098565&permissions=51200&scope=bot 100 | 101 | ## How to use it? 102 | 103 | The bot can be triggered via `/wuerstchen` followed by your text prompt. 104 | 105 | This will generate an image based on your prompt, which is then posted in the channel! 106 | 107 | ⚠️ Note ⚠️: Please make sure this bot's command does have the same name as another command in your server. 108 | 109 | ⚠️ Note ⚠️: Bot commands do not work in DMs with the bot as of now. 110 | """ 111 | 112 | 113 | with gr.Blocks() as demo: 114 | gr.Markdown(f""" 115 | # Discord bot of https://huggingface.co/spaces/warp-ai/Wuerstchen 116 | {welcome_message} 117 | """) 118 | 119 | 120 | demo.queue(concurrency_count=100) 121 | demo.queue(max_size=100) 122 | demo.launch() 123 | --------------------------------------------------------------------------------