├── config.py ├── .gitignore ├── README.md ├── translate.py ├── text_to_speech.py ├── requirements.txt ├── copilot.py ├── text_to_image.py ├── test.py └── bot.py /config.py: -------------------------------------------------------------------------------- 1 | token = "Your Token" -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | .env 3 | env 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Star History 2 | 3 | [![Star History Chart](https://api.star-history.com/svg?repos=abdibrokhim/UzNLP-Hackathon-ChatGPT-TelegramBot&type=Date)](https://star-history.com/#abdibrokhim/UzNLP-Hackathon-ChatGPT-TelegramBot&Date) 4 | -------------------------------------------------------------------------------- /translate.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | from deep_translator import GoogleTranslator 4 | 5 | 6 | class Translator: 7 | def translate(self, prompt): 8 | translator = GoogleTranslator(source='auto', target='en') 9 | r = translator.translate(prompt) 10 | return r 11 | 12 | 13 | # # Usage: 14 | # translator = Translator() 15 | # a = translator.translate("Ko'chada ketayotgan yolg'iz chol") 16 | # print(a) -------------------------------------------------------------------------------- /text_to_speech.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | 4 | class TextToSpeech: 5 | API_URL = "http://v2.nutq.uz/api/v1/cabinet/gen/?text=" 6 | PATH_TO_AUDIO = "audios/" 7 | COUNTER = 0 8 | 9 | def to_speech(self, text): 10 | text = text.replace(" ", "%20") 11 | response = requests.get(self.API_URL + text) 12 | try: 13 | with open(self.PATH_TO_AUDIO + f'audio{self.COUNTER}.mp3', "wb") as f: 14 | f.write(response.content) 15 | self.COUNTER += 1 16 | return self.PATH_TO_AUDIO + f'audio{self.COUNTER - 1}.mp3' 17 | except: 18 | print("Network error!") 19 | 20 | # Usage: 21 | # tts = TextToSpeech() 22 | # tts.to_speech("Salom Dunyo!") 23 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | anyio==3.6.2 2 | beautifulsoup4==4.11.1 3 | cachetools==5.2.0 4 | certifi==2022.12.7 5 | charset-normalizer==2.1.1 6 | deep-translator==1.9.1 7 | et-xmlfile==1.1.0 8 | google-api-core==2.11.0 9 | google-api-python-client==2.70.0 10 | google-auth==2.15.0 11 | google-auth-httplib2==0.1.0 12 | googleapis-common-protos==1.57.0 13 | grpcio==1.48.1 14 | grpcio-tools==1.48.1 15 | h11==0.14.0 16 | httpcore==0.16.3 17 | httplib2==0.21.0 18 | httpx==0.23.1 19 | idna==3.4 20 | numpy==1.24.0 21 | openai==0.25.0 22 | openpyxl==3.0.10 23 | pandas==1.5.2 24 | pandas-stubs==1.5.2.221213 25 | Pillow==9.3.0 26 | protobuf==3.19.5 27 | pyasn1==0.4.8 28 | pyasn1-modules==0.2.8 29 | pyparsing==3.0.9 30 | python-dateutil==2.8.2 31 | python-dotenv==0.21.0 32 | python-telegram-bot==20.0b0 33 | pytz==2022.7 34 | requests==2.28.1 35 | rfc3986==1.5.0 36 | rsa==4.9 37 | six==1.16.0 38 | sniffio==1.3.0 39 | soupsieve==2.3.2.post1 40 | stability-sdk==0.3.1 41 | tqdm==4.64.1 42 | types-pytz==2022.7.0.0 43 | typing_extensions==4.4.0 44 | uritemplate==4.1.1 45 | urllib3==1.26.13 46 | -------------------------------------------------------------------------------- /copilot.py: -------------------------------------------------------------------------------- 1 | import os 2 | import openai 3 | import json 4 | from text_to_speech import TextToSpeech 5 | from dotenv import load_dotenv 6 | 7 | 8 | class Copilot: 9 | GET_IN_UZBEK = " provide answer in Uzbek language" 10 | 11 | 12 | def clear_text(self, text): 13 | a = text.replace("\n", " ") 14 | b = a.split() 15 | c = " ".join(b) 16 | 17 | return c 18 | 19 | 20 | def get_answer(self, question): 21 | prompt = question + self.GET_IN_UZBEK 22 | 23 | load_dotenv() 24 | 25 | openai.api_key = "YOUR API KEY" 26 | response = openai.Completion.create( 27 | engine="text-davinci-003", 28 | prompt=prompt, 29 | max_tokens=512, 30 | temperature=0.5, 31 | ) 32 | 33 | json_object = response 34 | 35 | # Convert the JSON object to a JSON string 36 | json_string = json.dumps(json_object) 37 | 38 | # Parse the JSON string using json.loads() 39 | parsed_json = json.loads(json_string) 40 | 41 | text = parsed_json['choices'][0]['text'] 42 | cleared_text = self.clear_text(text) 43 | 44 | return cleared_text 45 | 46 | 47 | # Usage: 48 | # copilot = Copilot() 49 | # a = copilot.get_answer("Birinchi Jahon urishini kim boshlagan?") 50 | 51 | # print(a) 52 | 53 | # Usage: 54 | # tts = TextToSpeech() 55 | # tts.to_speech(a) 56 | -------------------------------------------------------------------------------- /text_to_image.py: -------------------------------------------------------------------------------- 1 | import os 2 | import io 3 | import warnings 4 | from PIL import Image 5 | from stability_sdk import client 6 | import stability_sdk.interfaces.gooseai.generation.generation_pb2 as generation 7 | from dotenv import load_dotenv 8 | 9 | load_dotenv() 10 | 11 | # Our Host URL should not be prepended with "https" nor should it have a trailing slash. 12 | os.environ['STABILITY_HOST'] = 'grpc.stability.ai:443' 13 | 14 | # Sign up for an account at the following link to get an API Key. 15 | # https://beta.dreamstudio.ai/membership 16 | 17 | # Click on the following link once you have created an account to be taken to your API Key. 18 | # https://beta.dreamstudio.ai/membership?tab=apiKeys 19 | 20 | class TextToImage: 21 | PATH_TO_IMAGES = "images/" 22 | 23 | def __init__(self): 24 | pass 25 | 26 | def to_image(self, prompt): 27 | # Set up our connection to the API. 28 | stability_api = client.StabilityInference( 29 | key=os.getenv("STABLE_DIFFUSION_API_KEY"), # API Key reference. 30 | verbose=True, # Print debug messages. 31 | engine="stable-diffusion-v1-5", # Set the engine to use for generation. 32 | # Available engines: stable-diffusion-v1 stable-diffusion-v1-5 stable-diffusion-512-v2-0 stable-diffusion-768-v2-0 33 | # stable-diffusion-512-v2-1 stable-diffusion-768-v2-1 stable-inpainting-v1-0 stable-inpainting-512-v2-0 34 | ) 35 | 36 | # Set up our initial generation parameters. 37 | answers = stability_api.generate( 38 | prompt=prompt, # The prompt to use for generation. 39 | seed=992446758, # If a seed is provided, the resulting generated image will be deterministic. 40 | # What this means is that as long as all generation parameters remain the same, you can always recall the same image simply by generating it again. 41 | # Note: This isn't quite the case for Clip Guided generations, which we'll tackle in a future example notebook. 42 | steps=30, # Amount of inference steps performed on image generation. Defaults to 30. 43 | cfg_scale=8.0, # Influences how strongly your generation is guided to match your prompt. 44 | # Setting this value higher increases the strength in which it tries to match your prompt. 45 | # Defaults to 7.0 if not specified. 46 | width=512, # Generation width, defaults to 512 if not included. 47 | height=512, # Generation height, defaults to 512 if not included. 48 | samples=1, # Number of images to generate, defaults to 1 if not included. 49 | sampler=generation.SAMPLER_K_DPMPP_2M # Choose which sampler we want to denoise our generation with. 50 | # Defaults to k_dpmpp_2m if not specified. Clip Guidance only supports ancestral samplers. 51 | # (Available Samplers: ddim, plms, k_euler, k_euler_ancestral, k_heun, k_dpm_2, k_dpm_2_ancestral, k_dpmpp_2s_ancestral, k_lms, k_dpmpp_2m) 52 | ) 53 | 54 | # Set up our warning to print to the console if the adult content classifier is tripped. 55 | # If adult content classifier is not tripped, save generated images. 56 | for resp in answers: 57 | for artifact in resp.artifacts: 58 | if artifact.finish_reason == generation.FILTER: 59 | warnings.warn( 60 | "Your request activated the API's safety filters and could not be processed." 61 | "Please modify the prompt and try again.") 62 | if artifact.type == generation.ARTIFACT_IMAGE: 63 | img = Image.open(io.BytesIO(artifact.binary)) 64 | img_path = self.PATH_TO_IMAGES + str(artifact.seed)+ ".png" 65 | img.save(img_path) # Save our generated images with their seed number as the filename. 66 | 67 | return img_path 68 | 69 | 70 | 71 | # tti = TextToImage() 72 | # tti.get_image("imagine a world where the sun is a giant ball of cheese") 73 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | # v = "0.0.1" 2 | 3 | import os 4 | import json 5 | import requests 6 | import time 7 | 8 | from copilot import Copilot 9 | from text_to_speech import TextToSpeech 10 | from translate import Translator 11 | 12 | from dotenv import load_dotenv 13 | 14 | from telegram import ( 15 | ReplyKeyboardMarkup, 16 | Update, 17 | KeyboardButton, 18 | ) 19 | 20 | from telegram.ext import ( 21 | Application, 22 | CommandHandler, 23 | ContextTypes, 24 | ConversationHandler, 25 | MessageHandler, 26 | filters, 27 | ) 28 | 29 | 30 | (ENTRY_STATE, 31 | QUESTION_STATE, 32 | AUDIO_STATE,) = range(3) 33 | 34 | 35 | def _generate_copilot(prompt: str): 36 | """Gets answer from copilot""" 37 | 38 | copilot = Copilot() 39 | c = copilot.get_answer(prompt) 40 | 41 | return c 42 | 43 | 44 | def _translate(text: str): 45 | """Translates the text to English""" 46 | 47 | translator = Translator() 48 | t = translator.translate(text) 49 | 50 | return t 51 | 52 | 53 | def _to_speech(text: str): 54 | """Converts text to speech""" 55 | 56 | tts = TextToSpeech() 57 | p = tts.to_speech(text) 58 | 59 | return p 60 | 61 | 62 | #Starting the bot 63 | async def start(update: Update, context: ContextTypes): 64 | """Start the conversation and ask user for input.""" 65 | 66 | button = [[KeyboardButton(text="Savol javob")]] 67 | reply_markup = ReplyKeyboardMarkup( 68 | button, resize_keyboard=True 69 | ) 70 | 71 | await update.message.reply_text( 72 | "Tanlang: 👇🏻", 73 | reply_markup=reply_markup, 74 | ) 75 | 76 | return ENTRY_STATE 77 | 78 | 79 | #Handling the question 80 | async def pre_query_handler(update: Update, context: ContextTypes): 81 | """Ask the user for a query.""" 82 | 83 | button = [[KeyboardButton(text="Orqaga")]] 84 | reply_markup = ReplyKeyboardMarkup( 85 | button, resize_keyboard=True 86 | ) 87 | 88 | await update.message.reply_text( 89 | "Savolingizni yozing: 👇🏻", 90 | reply_markup=reply_markup, 91 | ) 92 | 93 | return QUESTION_STATE 94 | 95 | 96 | #Handling the answer 97 | async def pre_query_answer_handler(update: Update, context: ContextTypes): 98 | """Display the answer to the user.""" 99 | 100 | button = [[KeyboardButton(text="Orqaga")], [KeyboardButton(text="Audioni eshitish")]] 101 | reply_markup = ReplyKeyboardMarkup( 102 | button, resize_keyboard=True 103 | ) 104 | 105 | question = update.message.text 106 | 107 | answer = _generate_copilot(question) 108 | context.user_data['answer'] = answer 109 | 110 | await update.message.reply_text( 111 | answer, 112 | reply_markup=reply_markup, 113 | ) 114 | 115 | return QUESTION_STATE 116 | 117 | 118 | #Handling the audio 119 | async def pre_query_audio_handler(update: Update, context: ContextTypes): 120 | """Display the answer to the user.""" 121 | 122 | fp = _to_speech(context.user_data['answer']) 123 | 124 | await update.message.reply_audio(audio=open(fp, 'rb')) 125 | 126 | os.remove(fp) 127 | 128 | return QUESTION_STATE 129 | 130 | 131 | if __name__ == '__main__': 132 | load_dotenv() 133 | 134 | application = Application.builder().token(os.getenv("TELEGRAM_BOT_TOKEN")).read_timeout(100).get_updates_read_timeout(100).build() 135 | 136 | conv_handler = ConversationHandler( 137 | entry_points=[CommandHandler('start', start)], 138 | states={ 139 | ENTRY_STATE: [ 140 | MessageHandler(filters.Regex('^Orqaga$'), start), 141 | MessageHandler(filters.Regex('^Savol javob$'), pre_query_handler), 142 | ], 143 | QUESTION_STATE: [ 144 | MessageHandler(filters.Regex('^Orqaga$'), start), 145 | MessageHandler(filters.Regex('^Audioni eshitish$'), pre_query_audio_handler), 146 | MessageHandler(filters.TEXT, pre_query_answer_handler), 147 | ], 148 | AUDIO_STATE: [ 149 | MessageHandler(filters.Regex('^Orqaga$'), start), 150 | MessageHandler(filters.TEXT, pre_query_answer_handler), 151 | ], 152 | }, 153 | fallbacks=[], 154 | ) 155 | 156 | application.add_handler(conv_handler) 157 | 158 | print("Bot started") 159 | application.run_polling() 160 | -------------------------------------------------------------------------------- /bot.py: -------------------------------------------------------------------------------- 1 | # v = "0.0.2" 2 | 3 | import os 4 | import json 5 | import requests 6 | import time 7 | 8 | from copilot import Copilot 9 | from text_to_speech import TextToSpeech 10 | from text_to_image import TextToImage 11 | # from speech_to_text import SpeechToText 12 | from translate import Translator 13 | 14 | from dotenv import load_dotenv 15 | 16 | from telegram import ( 17 | ReplyKeyboardMarkup, 18 | Update, 19 | KeyboardButton, 20 | ) 21 | from telegram.ext import ( 22 | Application, 23 | CommandHandler, 24 | ContextTypes, 25 | ConversationHandler, 26 | MessageHandler, 27 | filters, 28 | ) 29 | 30 | 31 | (ENTRY_STATE, 32 | QUESTION_STATE, 33 | AUDIO_STATE, 34 | IMAGE_STATE, ) = range(4) 35 | 36 | 37 | def _generate_copilot(prompt: str): 38 | """Gets answer from copilot""" 39 | 40 | copilot = Copilot() 41 | c = copilot.get_answer(prompt) 42 | 43 | return c 44 | 45 | 46 | def _translate(text: str): 47 | """Translates the text to English""" 48 | 49 | translator = Translator() 50 | t = translator.translate(text) 51 | 52 | return t 53 | 54 | 55 | def _to_speech(text: str): 56 | """Converts text to speech""" 57 | 58 | tts = TextToSpeech() 59 | p = tts.to_speech(text) 60 | 61 | return p 62 | 63 | 64 | def _to_image(text: str): 65 | """Converts text to image""" 66 | 67 | tti = TextToImage() 68 | i = tti.to_image(text) 69 | 70 | return i 71 | 72 | 73 | #Starting the bot 74 | async def start(update: Update, context: ContextTypes): 75 | """Start the conversation and ask user for input.""" 76 | 77 | button = [[KeyboardButton(text="Savol javob")], [KeyboardButton(text="Rasm generatsiya")]] 78 | reply_markup = ReplyKeyboardMarkup( 79 | button, resize_keyboard=True 80 | ) 81 | 82 | await update.message.reply_text( 83 | "Tanlang: 👇🏻", 84 | reply_markup=reply_markup, 85 | ) 86 | 87 | return ENTRY_STATE 88 | 89 | 90 | #Handling the question 91 | async def pre_query_handler(update: Update, context: ContextTypes): 92 | """Ask the user for a query.""" 93 | 94 | button = [[KeyboardButton(text="Orqaga")]] 95 | reply_markup = ReplyKeyboardMarkup( 96 | button, resize_keyboard=True 97 | ) 98 | 99 | await update.message.reply_text( 100 | "Savolingizni yozing: 👇🏻", 101 | reply_markup=reply_markup, 102 | ) 103 | 104 | return QUESTION_STATE 105 | 106 | 107 | #Handling the question 108 | async def pre_image_handler(update: Update, context: ContextTypes): 109 | """Ask the user for a query.""" 110 | 111 | button = [[KeyboardButton(text="Orqaga")]] 112 | reply_markup = ReplyKeyboardMarkup( 113 | button, resize_keyboard=True 114 | ) 115 | 116 | await update.message.reply_text( 117 | "Rasmni ifodalang: 👇🏻", 118 | reply_markup=reply_markup, 119 | ) 120 | 121 | return IMAGE_STATE 122 | 123 | 124 | #Handling the answer 125 | async def pre_query_answer_handler(update: Update, context: ContextTypes): 126 | """Display the answer to the user.""" 127 | 128 | button = [[KeyboardButton(text="Orqaga")], [KeyboardButton(text="Audioni eshitish")]] 129 | reply_markup = ReplyKeyboardMarkup( 130 | button, resize_keyboard=True 131 | ) 132 | 133 | question = update.message.text 134 | 135 | answer = _generate_copilot(question) 136 | context.user_data['answer'] = answer 137 | 138 | await update.message.reply_text( 139 | answer, 140 | reply_markup=reply_markup, 141 | ) 142 | 143 | return QUESTION_STATE 144 | 145 | 146 | #Handling the answer 147 | async def pre_image_answer_handler(update: Update, context: ContextTypes): 148 | """Display the answer to the user.""" 149 | 150 | button = [[KeyboardButton(text="Orqaga")]] 151 | reply_markup = ReplyKeyboardMarkup( 152 | button, resize_keyboard=True 153 | ) 154 | 155 | question = update.message.text 156 | print(question) 157 | 158 | en_v = _translate(question) 159 | print(en_v) 160 | 161 | path = _to_image(en_v) 162 | context.user_data['image_path'] = _to_image 163 | 164 | await update.message.reply_photo( 165 | photo=open(path, 'rb'), 166 | reply_markup=reply_markup, 167 | caption=question, 168 | ) 169 | 170 | os.remove(path) 171 | 172 | return IMAGE_STATE 173 | 174 | 175 | #Handling the audio 176 | async def pre_query_audio_handler(update: Update, context: ContextTypes): 177 | """Display the answer to the user.""" 178 | 179 | fp = _to_speech(context.user_data['answer']) 180 | 181 | await update.message.reply_audio(audio=open(fp, 'rb')) 182 | 183 | os.remove(fp) 184 | 185 | return QUESTION_STATE 186 | 187 | 188 | if __name__ == '__main__': 189 | load_dotenv() 190 | 191 | application = Application.builder().token("YOUR TOKEN").read_timeout(100).get_updates_read_timeout(100).build() 192 | 193 | conv_handler = ConversationHandler( 194 | entry_points=[CommandHandler('start', start)], 195 | states={ 196 | ENTRY_STATE: [ 197 | MessageHandler(filters.Regex('^Orqaga$'), start), 198 | MessageHandler(filters.Regex('^Savol javob$'), pre_query_handler), 199 | MessageHandler(filters.Regex('^Rasm generatsiya$'), pre_image_handler), 200 | ], 201 | QUESTION_STATE: [ 202 | MessageHandler(filters.Regex('^Orqaga$'), start), 203 | MessageHandler(filters.Regex('^Audioni eshitish$'), pre_query_audio_handler), 204 | MessageHandler(filters.TEXT, pre_query_answer_handler), 205 | ], 206 | AUDIO_STATE: [ 207 | MessageHandler(filters.Regex('^Orqaga$'), start), 208 | MessageHandler(filters.TEXT, pre_query_answer_handler), 209 | ], 210 | IMAGE_STATE: [ 211 | MessageHandler(filters.Regex('^Orqaga$'), start), 212 | MessageHandler(filters.TEXT, pre_image_answer_handler), 213 | ], 214 | }, 215 | fallbacks=[], 216 | ) 217 | 218 | application.add_handler(conv_handler) 219 | 220 | print("Bot started") 221 | application.run_polling() 222 | --------------------------------------------------------------------------------