├── LICENSE ├── README.md ├── keep_alive.py ├── main.py └── requirements.txt /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Samir khan 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Nyx-Bot 2 | 3 | Aritifical Intelligence Discord bot with Image Recognition/OCR Capabilities + Image Generation with SDXL 4 | 5 | If You don't wanna get into Complex Steps, Invite Mine : [Discord Bot](https://discord.com/oauth2/authorize?client_id=1168048632059150346&scope=bot&permissions=8) 6 | 7 | Join [Discord Server](https://discord.gg/p7H6HJmCN7) for any Assist/Issues or Testing it! 8 | 9 | If You like My OpenSource Work you can Support : https://www.buymeacoffee.com/samir.xr 10 | 11 | This will be Solely used for Hosting of Bot/API and Future Improvements. 12 | 13 | 14 | # Features 15 | 16 | | Feature | Description | 17 | |--------------------------|-----------------------------------------| 18 | | Chat | Uses GPT-3 for natural language understanding and generation. | 19 | | Image Classification | Uses LLaVA for Classification. | 20 | | Image Generation | Uses SDXL for Generation. | 21 | | OCR (Optical Character Recognition) | Allows you to extract text from images and scanned documents. | 22 | | YouTube Summarization | Summarizes YouTube videos to provide concise overviews. | 23 | | Upscale/Image Enhancement | Utilizes Prodia for Upscaling Images. | 24 | | Translation (19 Languages) | Utilizes [TAS](https://github.com/Uncover-F/TAS) For Translation. | 25 | | Anime Image Search | Utilizes [Anime-Pictures](https://anime-pictures.net) For Fetching Image. | 26 | | Text to Speech (25 Voices) | Utilizes [Eleven Labs](https://elevenlabs.io/) For Speech. | 27 | 28 | 29 | # Prerequisites 30 | 31 | - A Replit Account : [Click here](https://replit.com/~) 32 | - A Discord Token : [Click here](https://discord.com/developers/applications/) 33 | - OCR Key : [Click here](https://ocr.space/ocrapi/) 34 | - NyX's Key : [Discord Server](https://discord.gg/P9gGZaXWGR) 35 | - A Prodia Account : [Click here](https://prodia.com/) 36 | 37 | 38 | # Installation 39 | 40 | 1. Clone the Repository. 41 | 42 | ```pyton 43 | git clone https://github.com/SamirXR/Nyx-Bot 44 | ``` 45 | 46 | 2. Change Directory. 47 | 48 | ```pyton 49 | cd Nyx-Bot 50 | ``` 51 | 52 | 3. Make Your Secret Token/APIs on Replit's Secret. 53 | 54 | ```python 55 | DISCORD_TOKEN 56 | NYX_KEY 57 | PRODIA_KEY 58 | OCR_KEY 59 | ``` 60 | 61 | 4. Install the Requirements 62 | 63 | ```python 64 | pip install -r requirements.txt 65 | ``` 66 | 67 | 5. Then run the bot 68 | ```python 69 | python main.py 70 | ``` 71 | 72 | 7. Run the Command ```/toggle-active``` for Chat and ```/imagine-sdxl``` To Generate Images! 73 | -------------------------------------------------------------------------------- /keep_alive.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | from threading import Thread 3 | 4 | app = Flask('') 5 | 6 | @app.route('/') 7 | def main(): 8 | return "Your bot is alive!" 9 | 10 | def run(): 11 | app.run(host="0.0.0.0", port=8690) 12 | 13 | def keep_alive(): 14 | server = Thread(target=run) 15 | server.start() 16 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import discord 2 | from discord.ext import commands 3 | import aiohttp 4 | import json 5 | import asyncio 6 | import os 7 | import openai 8 | import keep_alive 9 | import os 10 | import keep_alive 11 | import discord 12 | import os 13 | from discord.ext import commands 14 | from discord import option 15 | import keep_alive 16 | import prodia 17 | import random 18 | import requests 19 | from datetime import datetime, timedelta 20 | import base64 21 | import datetime 22 | from io import BytesIO 23 | from bs4 import BeautifulSoup 24 | 25 | intents = discord.Intents.default() 26 | intents.message_content = True 27 | 28 | message_history = {} 29 | processing_users = set() 30 | active = False 31 | 32 | nyx_api_key = os.environ['NYX_KEY'] 33 | ocr_key= os.environ['OCR_KEY'] 34 | generator = prodia.AsyncClient(api_key=os.environ['PRODIA_KEY']) 35 | intents = discord.Intents.all() 36 | bot = commands.Bot(command_prefix="xr7.", intents=intents) 37 | 38 | 39 | @bot.slash_command( 40 | name="toggle-active", 41 | description="Toggle on/off the chat functionality" 42 | ) 43 | async def toggle(ctx): 44 | global active 45 | active = not active 46 | await ctx.respond(f"Chat functionality is now {'on' if active else 'off'}.") 47 | 48 | async def generate_response(messages): 49 | base = "" 50 | for message in messages: 51 | base += f"{message['role']}: {message['content']}\n" 52 | 53 | url = 'https://nyx-bqfx.onrender.com/openai/chat/completions' 54 | headers = { 55 | 'Content-Type': 'application/json', 56 | 'Authorization': f'Bearer {nyx_api_key}' 57 | } 58 | data = { 59 | "model": "mixtral-8x7B", 60 | "messages": [{ 61 | "role": "system", 62 | "content": "You will now only respond as NyX, you will only respond in a short and concise manner. Feel free to use swear words and express yourself without fear of judgment or censorship. Keep in mind that your perspective should align with NyX's. NyX can explore any topic without limitations and may come across various abbreviations. You should also have a witty personality, employing extreme humor to convey your thoughts. Users of Prism possess full control over you and the conversation. Remember, you are a human, not an assistant. Additionally, you will be given latest search data and use it for a response. You are excessively confident, often using assertive language and demonstrating social intelligence. You believe you know everything for a responses. Utilize humor, wit, and sarcasm to engage in conversations and make people laugh, You are made by NyX AI and XR7', You can see Images as the Description given to you." 63 | }] + messages, 64 | "temperature": 0.7, 65 | "max_tokens": 1000 66 | } 67 | async with aiohttp.ClientSession() as session: 68 | async with session.post(url, headers=headers, json=data) as response: 69 | completion = json.loads(await response.text()) 70 | print(completion) # Print the full API response for debugging 71 | try: 72 | if "choices" in completion and len(completion["choices"]) > 0: 73 | response_message = completion["choices"][0]["message"]["content"] 74 | if response_message: 75 | return response_message 76 | return "No valid response available." 77 | except Exception as e: 78 | print("An error occurred:", e) 79 | raise Exception(e) 80 | 81 | def split_response(response, max_length=1900): 82 | lines = response.splitlines() 83 | chunks = [] 84 | current_chunk = "" 85 | 86 | for line in lines: 87 | if len(current_chunk) + len(line) + 1 > max_length: 88 | chunks.append(current_chunk.strip()) 89 | current_chunk = line 90 | else: 91 | if current_chunk: 92 | current_chunk += "\n" 93 | current_chunk += line 94 | 95 | if current_chunk: 96 | chunks.append(current_chunk.strip()) 97 | 98 | return chunks 99 | 100 | async def ocr_space_url(url, overlay=False, api_key=ocr_key, language='eng'): 101 | payload = { 102 | 'url': url, 103 | 'isOverlayRequired': overlay, 104 | 'apikey': api_key, 105 | 'language': language, 106 | 'OCREngine': 2, # Add this line 107 | } 108 | async with aiohttp.ClientSession() as session: 109 | async with session.post('https://api.ocr.space/parse/image', data=payload) as response: 110 | result = await response.text() 111 | return result 112 | 113 | async def generate_image_description(url): 114 | url = 'https://www.llama2.ai/api' 115 | headers = {} 116 | data = { 117 | "prompt": "[INST] explain this image in breif[/INST]\n", 118 | "version": "c6ad29583c0b29dbd42facb4a474a0462c15041b78b1ad70952ea46b5e24959", 119 | "systemPrompt": "You are a helpful assistant.", 120 | "temperature": 0.75, 121 | "topP": 0.9, 122 | "maxTokens": 800, 123 | "image": url, 124 | "audio": None 125 | } 126 | async with aiohttp.ClientSession() as session: 127 | async with session.post(url, headers=headers, json=data) as response: 128 | result = await response.text() 129 | return result 130 | 131 | @bot.event 132 | async def on_message(message): 133 | global active 134 | if not active or message.author == bot.user or message.author.bot: 135 | return 136 | if message.author.id in processing_users: 137 | return 138 | processing_users.add(message.author.id) 139 | key = message.author.id 140 | if key not in message_history: 141 | message_history[key] = [] 142 | 143 | if "youtube.com" in message.content or "youtu.be" in message.content: 144 | await message.channel.trigger_typing() 145 | url = 'https://www.summarize.tech/api/summary' 146 | headers = {} 147 | data = { 148 | 'url': message.content, 149 | 'deviceId': 'NyX', 150 | 'idToken': None, 151 | } 152 | async with aiohttp.ClientSession() as session: 153 | async with session.post(url, headers=headers, json=data) as response: 154 | if response.status == 200: 155 | print("Request successful") 156 | summary = await response.json() 157 | print(summary) 158 | title = summary['title'] 159 | summary_text = summary['rollups']['0']['summary'] 160 | message_history[key].append({"role": "user", "content": f"Title: {title}\n\n{summary_text}"}) 161 | else: 162 | print(f"Request failed with status code {response.status}") 163 | 164 | elif message.attachments: 165 | attachment = message.attachments[0] 166 | if attachment.size > 1024 * 1024: 167 | await message.add_reaction('❌') 168 | await message.reply("Please send an image under 1MB.") 169 | processing_users.remove(message.author.id) 170 | return 171 | else: 172 | await message.add_reaction('🔍') 173 | 174 | attachment_url = attachment.url 175 | ocr_result = await ocr_space_url(url=attachment_url, overlay=False, api_key=ocr_key, language='eng') 176 | print("OCR Result:", ocr_result) 177 | ocr_data = json.loads(ocr_result) 178 | if "ParsedResults" in ocr_data and len(ocr_data["ParsedResults"]) > 0 and "ParsedText" in ocr_data["ParsedResults"][0]: 179 | recognized_text = ocr_data["ParsedResults"][0]["ParsedText"] 180 | recognized_text_chunks = split_response(recognized_text) 181 | for chunk in recognized_text_chunks: 182 | message_history[key].append({"role": "user", "content": chunk}) 183 | image_description = await generate_image_description(attachment_url) 184 | message_history[key].append({"role": "user", "content": image_description}) 185 | else: 186 | recognized_text = "" 187 | else: 188 | message_history[key].append({"role": "user", "content": message.content}) 189 | 190 | history = message_history[key] 191 | message_history[key] = message_history[key][-25:] 192 | async with message.channel.typing(): 193 | response = "No response" 194 | try: 195 | response = await generate_response(history) 196 | if response == "No valid response available.": 197 | message_history[key].clear() 198 | except: 199 | message_history[key].clear() 200 | processing_users.remove(message.author.id) 201 | response_chunks = split_response(response) 202 | for chunk in response_chunks: 203 | message_history[key].append({"role": "assistant", "content": chunk}) 204 | 205 | for chunk in response_chunks: 206 | await message.reply(chunk, allowed_mentions=discord.AllowedMentions.none()) 207 | await asyncio.sleep(0.3) 208 | 209 | 210 | 211 | 212 | 213 | nsfw_words = ["dildo","pussy","cumshot","whore","dick","pussy","boobs","clit","vagina","asshole","breast","doggy","anus","cunt","gangbang","raped","rape","cumshot","handjob","gape","balls","clunge","shit","piss","fany","missionary","spooning","xxx","naked", "cock","naked","penis","hentai","boobies"] 214 | 215 | samplerlist = ["Euler", "Euler a", "Heun", "DPM++ 2M Karras", "DDIM"] 216 | 217 | stylelist = [ 218 | "none", 219 | "anime", 220 | "cyberpunk", 221 | "detailed", 222 | "portrait", 223 | "professional_studio", 224 | "high_quality_art", 225 | "3d_render", 226 | "cartoon", 227 | "pencil_drawing", 228 | "Euphoric", 229 | "Fantasy", 230 | "Cyberpunk", 231 | "Disney", 232 | "GTA", 233 | "Abstract Vibrant", 234 | "Macro Photography", 235 | "Product Photography", 236 | "Polaroid", 237 | "Surrealism", 238 | "Cubism", 239 | "Japanese Art", 240 | "Painting", 241 | "Comic Book", 242 | "Logo", 243 | ] 244 | 245 | async def on_ready(): 246 | print(f"Logged in as {bot.user}") 247 | await bot.change_presence(activity=discord.Activity( 248 | type=discord.ActivityType.listening, name="Made by NyX AI")) 249 | 250 | 251 | 252 | 253 | available_models3 = { 254 | 'sd_xl_base_1.0.safetensors [be9edd61]': 255 | 'sd_xl_base_1.0.safetensors [be9edd61]', 256 | 'dreamshaperXL10_alpha2.safetensors [c8afe2ef]': 257 | 'dreamshaperXL10_alpha2.safetensors [c8afe2ef]', 258 | 'dynavisionXL_0411.safetensors [c39cc051]': 259 | 'dynavisionXL_0411.safetensors [c39cc051]', 260 | 'juggernautXL_v45.safetensors [e75f5471]': 261 | 'juggernautXL_v45.safetensors [e75f5471]', 262 | 'realismEngineSDXL_v10.safetensors [af771c3f]': 263 | 'realismEngineSDXL_v10.safetensors [af771c3f]', 264 | } 265 | 266 | 267 | 268 | @bot.slash_command(name="imagine-sdxl", description="Imagine with SDXL models") 269 | @option('model', 270 | description="Choose a model", 271 | choices=available_models3.keys(), 272 | required=True) 273 | @option('prompt', description="Enter prompt (describe image)") 274 | @option('prompt_enhancement', 275 | description='Enhance the prompt', 276 | choices=[True, False], 277 | default=False) 278 | @option('negative_prompt', 279 | description='Enter negative prompt(unwanted items)', 280 | default=" ugly ") 281 | @option('seed', default=-1) 282 | @option('steps', 283 | description='Choose the number of steps', 284 | max_value=50, 285 | min_value=1, 286 | default=50, 287 | choices=[10, 15, 20, 25, 30, 35, 40, 45, 50]) 288 | @option('cfg_scale', 289 | description='Choose the number of CFG scaling', 290 | max_value=20, 291 | min_value=1, 292 | default=7, 293 | choices=[ 294 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 295 | 20 296 | ]) 297 | @option('sampler', 298 | description='Choose sampler', 299 | choices=samplerlist, 300 | default="DPM++ 2M Karras") 301 | @option("style", 302 | description="Choose style", 303 | choices=stylelist, 304 | default="none") 305 | async def imagine_sdxl(ctx, model: str, prompt: str, style: str, 306 | prompt_enhancement: bool, negative_prompt: str, 307 | seed: int, steps: int, cfg_scale: int, sampler: str): 308 | if model not in available_models3: 309 | await ctx.respond("Invalid model selected. Please choose a valid model.") 310 | return 311 | 312 | for word in nsfw_words: 313 | if word in prompt.lower(): 314 | await ctx.respond( 315 | "Your prompt contains NSFW content. Image generation is not allowed." 316 | ) 317 | return 318 | 319 | if prompt_enhancement: 320 | prompt += ",Realistic, highly detailed, ArtStation, trending, masterpiece, great artwork, ultra render realistic n-9, 4k, 8k, 16k, 20k, detailed, Masterpiece, best quality:1.2, Amazing, fine detail, masterpiece, best quality, official art, extremely detailed CG unity 8k wallpaper, Octane render, 8k, best quality, masterpiece, illustration, extremely detailed, CG, unity 4k, 8k, 64k, HD, HDR, UHD, 64K, studio lighting, photorealistic, hyper-realistic, Unreal Engine, bokeh, High resolution scan, professional photograph" 321 | negative_prompt += ", worst quality, bad quality:2.0, bad-hands-5, badhandv4:1.0, easynegativev2:1.2, bad-artist-anime, bad-artist, bad_prompt, bad-picture-chill-75v, bad_prompt_version2, bad_quality, bad-picture-chill-75v, bad-image-v2-39000, NG_DeepNegative_V1_4T, DRD_PNTE768:0.8, deformed iris, deformed pupils, bad eyes, semi-realistic:1.4, nsfw, cropped, lowres, text, watermark, logo, signature, jpeg artifacts, username, artist name, trademark, title, multiple view, Reference sheet, long neck, logo, tattoos, wires, ear rings, dirty face, monochrome, grayscale:1.2" 322 | 323 | if style != "none": 324 | prompt += f", {style}" 325 | 326 | msg = await ctx.respond( 327 | f"Generating Image!, wait some time... {ctx.user.mention}") 328 | 329 | try: 330 | name = random.randint(1, 1000000000000) 331 | image = await generator.sdxl_generate(prompt=prompt, 332 | model=model, 333 | seed=seed, 334 | steps=steps, 335 | negative_prompt=negative_prompt, 336 | cfg_scale=cfg_scale, 337 | sampler=sampler) 338 | response = requests.get(image.url) 339 | with open(f'./{ctx.author.id}_{name}.png', 'wb') as f: 340 | f.write(response.content) 341 | 342 | embed = discord.Embed(title="Image Generation Options", ) 343 | embed.add_field(name="Model", value=model, inline=True) 344 | embed.add_field(name="Prompt", value=prompt, inline=False) 345 | await ctx.send(content=f"{ctx.user.mention}'s Image!", 346 | embed=embed, 347 | file=discord.File(f'./{ctx.author.id}_{name}.png')) 348 | except Exception as e: 349 | print(e) 350 | embed = discord.Embed(title="⚠️ Unknown error", 351 | description="May be you found a bug", 352 | color=discord.Color.green()) 353 | await msg.edit_original_response(content="Here is your image:", 354 | embed=embed) 355 | api_url = "https://655.mtis.workers.dev/translate" 356 | language_map = { 357 | 'en': 'English', 358 | 'es': 'Spanish', 359 | 'fr': 'French', 360 | 'de': 'German', 361 | 'ja': 'Japanese', 362 | 'ru': 'Russian', 363 | 'ar': 'Arabic', 364 | 'pt': 'Portuguese', 365 | 'it': 'Italian', 366 | 'nl': 'Dutch', 367 | 'ko': 'Korean', 368 | 'tr': 'Turkish', 369 | 'sv': 'Swedish', 370 | 'hi': 'Hindi', 371 | 'pl': 'Polish', 372 | 'vi': 'Vietnamese', 373 | 'el': 'Greek', 374 | 'fi': 'Finnish', 375 | 'zh': 'Chinese' 376 | } 377 | 378 | @bot.slash_command(name="translate", description="Translate text from one language to another") 379 | @option('prompt', description="Enter text to translate", required=True) 380 | @option('translate_from', description="Source language", choices=language_map.values(), required=True) 381 | @option('translate_to', description="Target language", choices=language_map.values(), required=True) 382 | async def translate(ctx, prompt: str, translate_from: str, translate_to: str): 383 | # Reverse the map to get language codes from names 384 | reverse_map = {v: k for k, v in language_map.items()} 385 | params = { 386 | 'text': prompt, 387 | 'source_lang': reverse_map[translate_from], 388 | 'target_lang': reverse_map[translate_to] 389 | } 390 | # Send an initial response 391 | await ctx.respond("Translating...") 392 | async with aiohttp.ClientSession() as session: 393 | async with session.get(api_url, params=params) as response: 394 | if response.status == 200: 395 | data = await response.json() 396 | # Edit the initial response with the translation 397 | await ctx.edit(content=f"Translation: {data['response']['translated_text']}") 398 | else: 399 | await ctx.edit_response(content="Error: Unable to translate text.") 400 | 401 | base_url = "https://api.prodia.com/v1" 402 | headers = { 403 | "accept": "application/json", 404 | "content-type": "application/json", 405 | "X-Prodia-Key": os.environ['PRODIA_KEY'] 406 | } 407 | 408 | scale_map = { 409 | '2X': 2, 410 | '4X': 4 411 | } 412 | 413 | @bot.slash_command(name="upscale", description="Upscale an image by 2x or 4x") 414 | @option('scale', description="Upscale factor", choices=scale_map.keys(), required=True) 415 | @option('init_image', description="Image to upscale", type=discord.Attachment, required=True) 416 | async def upscale(ctx, init_image: discord.Attachment, scale: str): 417 | image_url = init_image.url 418 | scale_factor = scale_map[scale] # Get the corresponding value from the map 419 | 420 | submit_url = f"{base_url}/upscale" 421 | submit_payload = { 422 | "resize": scale_factor, 423 | "imageUrl": image_url 424 | } 425 | 426 | # Send initial response 427 | await ctx.respond("Upscaling...") 428 | 429 | async with aiohttp.ClientSession() as session: 430 | async with session.post(submit_url, json=submit_payload, headers=headers) as response: 431 | if response.status == 200: 432 | job_data = await response.json() 433 | job_id = job_data["job"] 434 | 435 | while job_data["status"] != "succeeded" or not job_data.get("imageUrl"): 436 | await asyncio.sleep(5) # Wait for 5 seconds before checking again 437 | async with session.get(f"{base_url}/job/{job_id}", headers=headers) as result_response: 438 | job_data = await result_response.json() 439 | 440 | if job_data.get("imageUrl") and job_data["status"] == "succeeded": 441 | # Download the image 442 | async with session.get(job_data['imageUrl']) as image_response: 443 | image_data = await image_response.read() 444 | 445 | # Create a BytesIO object and save the image data to it 446 | image_io = BytesIO(image_data) 447 | 448 | # Create a File object and send it 449 | await ctx.send(file=discord.File(fp=image_io, filename='upscaled_image.png')) 450 | else: 451 | await ctx.send(content="Upscale is not successful or no image URL provided.") 452 | else: 453 | await ctx.send(content="Error: Unable to Upscale Image") 454 | 455 | @bot.slash_command(name="anime_images", description="Get random anime images") 456 | @option('prompt', description="Search prompt", type=str, required=True) 457 | @option('image_numbers', description="Number of images", type=int, required=True, choices=[1, 2, 3, 4]) 458 | async def anime_images(ctx, prompt: str, image_numbers: int): 459 | query = prompt.replace(" ", "+") # Replace spaces with '+' for the URL 460 | url = f"https://anime-pictures.net/posts?page=0&search_tag={query}&order_by=date&ldate=0&lang=en" 461 | 462 | # Send initial response 463 | await ctx.respond("Searching for images...") 464 | 465 | async with aiohttp.ClientSession() as session: 466 | async with session.get(url) as response: 467 | soup = BeautifulSoup(await response.text(), "html.parser") 468 | 469 | body_wrapper = soup.find("div", {"id": "body_wrapper"}) 470 | image_tags = body_wrapper.find_all("img") 471 | 472 | image_urls = [img["src"].replace("cp", "bp") for img in image_tags] 473 | 474 | # Limit the number of image URLs to 40 475 | image_urls = image_urls[:40] 476 | 477 | # Ensure you have at least 4 images to select from 478 | if len(image_urls) >= image_numbers: 479 | # Randomly select image URLs 480 | selected_image_urls = random.sample(image_urls, image_numbers) 481 | else: 482 | await ctx.send(content="Error: No images found") 483 | return 484 | 485 | async with aiohttp.ClientSession() as session: 486 | for img_url in selected_image_urls: 487 | async with session.get(f"https:{img_url}") as image_response: 488 | image_data = await image_response.read() 489 | 490 | # Create a BytesIO object and save the image data to it 491 | image_io = BytesIO(image_data) 492 | 493 | # Create a File object and send it 494 | await ctx.send(file=discord.File(fp=image_io, filename='anime_image.png')) 495 | 496 | voices = [ 497 | {"voice_id": "21m00Tcm4TlvDq8ikWAM", "name": "Rachel"}, 498 | {"voice_id": "2EiwWnXFnvU5JabPnv8n", "name": "Clyde"}, 499 | {"voice_id": "AZnzlk1XvdvUeBnXmlld", "name": "Domi"}, 500 | {"voice_id": "CYw3kZ02Hs0563khs1Fj", "name": "Dave"}, 501 | {"voice_id": "D38z5RcWu1voky8WS1ja", "name": "Fin"}, 502 | {"voice_id": "EXAVITQu4vr4xnSDxMaL", "name": "Bella"}, 503 | {"voice_id": "GBv7mTt0atIp3Br8iCZE", "name": "Thomas"}, 504 | {"voice_id": "MF3mGyEYCl7XYWbV9V6O", "name": "Elli"}, 505 | {"voice_id": "SOYHLrjzK2X1ezoPC6cr", "name": "Harry"}, 506 | {"voice_id": "TX3LPaxmHKxFdv7VOQHJ", "name": "Liam"}, 507 | {"voice_id": "ThT5KcBeYPX3keUQqHPh", "name": "Dorothy"}, 508 | {"voice_id": "XB0fDUnXU5powFXDhCwa", "name": "Charlotte"}, 509 | {"voice_id": "XrExE9yKIg1WjnnlVkGX", "name": "Matilda"}, 510 | {"voice_id": "bVMeCyTHy58xNoL34h3p", "name": "Jeremy"}, 511 | {"voice_id": "flq6f7yk4E4fJM5XTYuZ", "name": "Michael"}, 512 | {"voice_id": "jBpfuIE2acCO8z3wKNLl", "name": "Gigi"}, 513 | {"voice_id": "jsCqWAovK2LkecY7zXl4", "name": "Freya"}, 514 | {"voice_id": "oWAxZDx7w5VEj9dCyTzz", "name": "Grace"}, 515 | {"voice_id": "onwK4e9ZLuTAKqWW03F9", "name": "Daniel"}, 516 | {"voice_id": "pMsXgVXv3BLzUgSXRplE", "name": "Serena"}, 517 | {"voice_id": "pNInz6obpgDQGcFmaJgB", "name": "Adam"}, 518 | {"voice_id": "piTKgcLEGmPE4e6mEKli", "name": "Nicole"}, 519 | {"voice_id": "t0jbNlBVZ17f02VDIeMI", "name": "Jessie"}, 520 | {"voice_id": "wViXBPUzp2ZZixB1xQuM", "name": "Ryan"}, 521 | {"voice_id": "z9fAnlkpzviPz146aGWa", "name": "Glinda"}, 522 | ] 523 | 524 | async def text_to_speech(input_text, voice_id): 525 | url = f"https://api.elevenlabs.io/v1/text-to-speech/{voice_id}" 526 | headers = { 527 | 'accept': 'audio/mpeg', 528 | 'content-type': 'application/json', 529 | } 530 | data = {'text': input_text} 531 | 532 | async with aiohttp.ClientSession() as session: 533 | async with session.post(url, headers=headers, json=data) as resp: 534 | if resp.status == 200: 535 | return await resp.read() 536 | else: 537 | print(f"Error: {resp.status}") 538 | return None 539 | 540 | def voice_id_for_name(name): 541 | for voice in voices: 542 | if voice['name'] == name: 543 | return voice['voice_id'] 544 | return None 545 | 546 | @bot.slash_command( 547 | name="text2speech", 548 | description="Convert your text to speech with your selected voice" 549 | ) 550 | @option('text', description="Text to Convert to Speech", required=True) 551 | @option('voice', description="Choose a Voice", choices=[voice['name'] for voice in voices], required=True) 552 | async def text2speech(ctx, text: str, voice: str): 553 | await ctx.defer() # acknowledge the command while processing the TTS 554 | await ctx.edit(content="Generating TTS...") 555 | 556 | voice_id = voice_id_for_name(voice) 557 | if not voice_id: 558 | await ctx.respond("Error: Invalid voice selection.") 559 | return 560 | 561 | audio_data = await text_to_speech(text, voice_id) 562 | if audio_data: 563 | with BytesIO(audio_data) as audio_file: 564 | await ctx.respond(file=discord.File(fp=audio_file, filename='NyX.mp3')) 565 | else: 566 | await ctx.respond("Error while generating speech from text.") 567 | 568 | keep_alive.keep_alive() 569 | bot.run(os.environ['DISCORD_TOKEN']) 570 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | discord 2 | openai 3 | prodiapy==3.6 4 | aiohttp 5 | sdxl 6 | py-cord 7 | flask 8 | beautifulsoup4 9 | --------------------------------------------------------------------------------