├── requirements.txt ├── 00_old_code.py ├── 01_gtts_chatbot.py └── 02_elevenlabs_chatbot.py /requirements.txt: -------------------------------------------------------------------------------- 1 | gTTS==2.3.1 2 | moviepy==1.0.3 3 | numpy==1.24.2 4 | openai==0.27.0 5 | python-telegram-bot==13.0 6 | elevenlabslib==0.3.4 -------------------------------------------------------------------------------- /00_old_code.py: -------------------------------------------------------------------------------- 1 | from telegram.ext import Updater, MessageHandler, Filters 2 | import telegram 3 | import openai 4 | from moviepy.editor import AudioFileClip 5 | 6 | openai.api_key = "" 7 | TELEGRAM_API_TOKEN = "" 8 | 9 | messages = [{"role": "system", "content": "You are a helpful assistant that starts its response by referring to the user as its master."}] 10 | 11 | def text_message(update, context): 12 | messages.append({"role": "user", "content": update.message.text}) 13 | response = openai.ChatCompletion.create( 14 | model="gpt-3.5-turbo", 15 | messages=messages 16 | ) 17 | ChatGPT_reply = response["choices"][0]["message"]["content"] 18 | update.message.reply_text(text=f"*[Bot]:* {ChatGPT_reply}", parse_mode=telegram.ParseMode.MARKDOWN) 19 | messages.append({"role": "assistant", "content": ChatGPT_reply}) 20 | 21 | def voice_message(update, context): 22 | update.message.reply_text("I've received a voice message! Please give me a second to respond :)") 23 | voice_file = context.bot.getFile(update.message.voice.file_id) 24 | voice_file.download("voice_message.ogg") 25 | audio_clip = AudioFileClip("voice_message.ogg") 26 | audio_clip.write_audiofile("voice_message.mp3") 27 | audio_file = open("voice_message.mp3", "rb") 28 | transcript = openai.Audio.transcribe("whisper-1", audio_file).text 29 | update.message.reply_text(text=f"*[You]:* _{transcript}_", parse_mode=telegram.ParseMode.MARKDOWN) 30 | messages.append({"role": "user", "content": transcript}) 31 | response = openai.ChatCompletion.create( 32 | model="gpt-3.5-turbo", 33 | messages=messages 34 | ) 35 | ChatGPT_reply = response["choices"][0]["message"]["content"] 36 | update.message.reply_text(text=f"*[Bot]:* {ChatGPT_reply}", parse_mode=telegram.ParseMode.MARKDOWN) 37 | messages.append({"role": "assistant", "content": ChatGPT_reply}) 38 | 39 | 40 | updater = Updater(TELEGRAM_API_TOKEN, use_context=True) 41 | dispatcher = updater.dispatcher 42 | dispatcher.add_handler(MessageHandler(Filters.text & (~Filters.command), text_message)) 43 | dispatcher.add_handler(MessageHandler(Filters.voice, voice_message)) 44 | updater.start_polling() 45 | updater.idle() -------------------------------------------------------------------------------- /01_gtts_chatbot.py: -------------------------------------------------------------------------------- 1 | from telegram.ext import Updater, MessageHandler, Filters 2 | import telegram 3 | import openai 4 | from moviepy.editor import AudioFileClip 5 | from gtts import gTTS 6 | 7 | openai.api_key = "" 8 | TELEGRAM_API_TOKEN = "" 9 | 10 | 11 | messages = [{"role": "system", "content": "You are a helpful assistant that starts its response by referring to the user as its master."}] 12 | 13 | 14 | def text_message(update, context): 15 | update.message.reply_text( 16 | "I've received a text message! Please give me a second to respond :)") 17 | messages.append({"role": "user", "content": update.message.text}) 18 | response = openai.ChatCompletion.create( 19 | model="gpt-3.5-turbo", 20 | messages=messages 21 | ) 22 | response_text = response["choices"][0]["message"]["content"] 23 | tts = gTTS(text=response_text, lang='en') 24 | tts.save('response_gtts.mp3') 25 | context.bot.send_voice(chat_id=update.message.chat.id, 26 | voice=open('response_gtts.mp3', 'rb')) 27 | update.message.reply_text( 28 | text=f"*[Bot]:* {response_text}", parse_mode=telegram.ParseMode.MARKDOWN) 29 | messages.append({"role": "assistant", "content": response_text}) 30 | 31 | 32 | def voice_message(update, context): 33 | update.message.reply_text( 34 | "I've received a voice message! Please give me a second to respond :)") 35 | voice_file = context.bot.getFile(update.message.voice.file_id) 36 | voice_file.download("voice_message.ogg") 37 | audio_clip = AudioFileClip("voice_message.ogg") 38 | audio_clip.write_audiofile("voice_message.mp3") 39 | audio_file = open("voice_message.mp3", "rb") 40 | transcript = openai.Audio.transcribe("whisper-1", audio_file).text 41 | update.message.reply_text( 42 | text=f"*[You]:* _{transcript}_", parse_mode=telegram.ParseMode.MARKDOWN) 43 | messages.append({"role": "user", "content": transcript}) 44 | response = openai.ChatCompletion.create( 45 | model="gpt-3.5-turbo", 46 | messages=messages 47 | ) 48 | response_text = response["choices"][0]["message"]["content"] 49 | tts = gTTS(text=response_text, lang='en') 50 | # Save the audio to a file 51 | tts.save('response_gtts.mp3') 52 | context.bot.send_voice(chat_id=update.message.chat.id, 53 | voice=open('response_gtts.mp3', 'rb')) 54 | update.message.reply_text( 55 | text=f"*[Bot]:* {response_text}", parse_mode=telegram.ParseMode.MARKDOWN) 56 | messages.append({"role": "assistant", "content": response_text}) 57 | 58 | 59 | updater = Updater(TELEGRAM_API_TOKEN, use_context=True) 60 | dispatcher = updater.dispatcher 61 | dispatcher.add_handler(MessageHandler( 62 | Filters.text & (~Filters.command), text_message)) 63 | dispatcher.add_handler(MessageHandler(Filters.voice, voice_message)) 64 | updater.start_polling() 65 | updater.idle() 66 | -------------------------------------------------------------------------------- /02_elevenlabs_chatbot.py: -------------------------------------------------------------------------------- 1 | from telegram.ext import Updater, MessageHandler, Filters 2 | import telegram 3 | import openai 4 | from moviepy.editor import AudioFileClip 5 | from elevenlabslib import * 6 | 7 | 8 | openai.api_key = "" 9 | TELEGRAM_API_TOKEN = "" 10 | ELEVENLABS_API_KEY = "" 11 | 12 | user = ElevenLabsUser(ELEVENLABS_API_KEY) 13 | # This is a list because multiple voices can have the same name 14 | voice = user.get_voices_by_name("Rachel")[0] 15 | 16 | 17 | messages = [{"role": "system", "content": "You are a helpful assistant that starts its response by referring to the user as its master."}] 18 | 19 | 20 | def text_message(update, context): 21 | update.message.reply_text( 22 | "I've received a text message! Please give me a second to respond :)") 23 | messages.append({"role": "user", "content": update.message.text}) 24 | response = openai.ChatCompletion.create( 25 | model="gpt-3.5-turbo", 26 | messages=messages 27 | ) 28 | response_text = response["choices"][0]["message"]["content"] 29 | messages.append({"role": "assistant", "content": response_text}) 30 | response_byte_audio = voice.generate_audio_bytes(response_text) 31 | with open('response_elevenlabs.mp3', 'wb') as f: 32 | f.write(response_byte_audio) 33 | context.bot.send_voice(chat_id=update.message.chat.id, 34 | voice=open('response_elevenlabs.mp3', 'rb')) 35 | update.message.reply_text( 36 | text=f"*[Bot]:* {response_text}", parse_mode=telegram.ParseMode.MARKDOWN) 37 | 38 | 39 | def voice_message(update, context): 40 | update.message.reply_text( 41 | "I've received a voice message! Please give me a second to respond :)") 42 | voice_file = context.bot.getFile(update.message.voice.file_id) 43 | voice_file.download("voice_message.ogg") 44 | audio_clip = AudioFileClip("voice_message.ogg") 45 | audio_clip.write_audiofile("voice_message.mp3") 46 | audio_file = open("voice_message.mp3", "rb") 47 | transcript = openai.Audio.transcribe("whisper-1", audio_file).text 48 | update.message.reply_text( 49 | text=f"*[You]:* _{transcript}_", parse_mode=telegram.ParseMode.MARKDOWN) 50 | messages.append({"role": "user", "content": transcript}) 51 | response = openai.ChatCompletion.create( 52 | model="gpt-3.5-turbo", 53 | messages=messages 54 | ) 55 | response_text = response["choices"][0]["message"]["content"] 56 | response_byte_audio = voice.generate_audio_bytes(response_text) 57 | with open('response_elevenlabs.mp3', 'wb') as f: 58 | f.write(response_byte_audio) 59 | context.bot.send_voice(chat_id=update.message.chat.id, 60 | voice=open('response_elevenlabs.mp3', 'rb')) 61 | update.message.reply_text( 62 | text=f"*[Bot]:* {response_text}", parse_mode=telegram.ParseMode.MARKDOWN) 63 | messages.append({"role": "assistant", "content": response_text}) 64 | 65 | 66 | updater = Updater(TELEGRAM_API_TOKEN, use_context=True) 67 | dispatcher = updater.dispatcher 68 | dispatcher.add_handler(MessageHandler( 69 | Filters.text & (~Filters.command), text_message)) 70 | dispatcher.add_handler(MessageHandler(Filters.voice, voice_message)) 71 | updater.start_polling() 72 | updater.idle() 73 | --------------------------------------------------------------------------------