├── .env.example ├── .gitignore ├── CodingDemos ├── .env.example ├── advancedDemo.py ├── config.py └── easyDemo.py ├── Integrando ChatGPT en tu codigo.pdf ├── README.md ├── brain.py ├── captures └── capture.jpg ├── config.py ├── demo.py ├── engines ├── __init__.py ├── budaApi.py ├── cameraManager.py └── musicPlayer.py ├── openai_service.py ├── requirements.txt ├── songs ├── Bowie.mp3 ├── JohnXina.mp3 ├── NeverGonnaGiveYouUp.mp3 └── RedSun.mp3 └── telegramBot.py /.env.example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY=your-key-here 2 | TELEGRAM_TOKEN=your-token-here -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | __pycache__ -------------------------------------------------------------------------------- /CodingDemos/.env.example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY=your-key-here -------------------------------------------------------------------------------- /CodingDemos/advancedDemo.py: -------------------------------------------------------------------------------- 1 | import os 2 | import openai 3 | from config import settings 4 | import json 5 | 6 | openai.api_key = settings.OPENAI_API_KEY 7 | system_prompt = { 8 | "role": "system", 9 | "content": 'Analyze any text and return the emotion of the text. in a JSON RFC 8259 format. ' \ 10 | 'response_text should be in the user language. ' \ 11 | 'Example 1: {"emotion": "happy", "score": "0.9", "response_text": "Sample response text"}. ' \ 12 | 'Example 2: {"emotion": "sad", "score": "0.1", "response_text": "Sample response text"}. ' \ 13 | 'Example 3: {"emotion": "angry", "score": "0.5", "response_text": "Sample response text"}. ' \ 14 | 'Example 4: {"emotion": "neutral", "score": "0.5", "response_text": "Sample response text"}. ' \ 15 | } 16 | 17 | 18 | messageHistory = [system_prompt] 19 | 20 | while True: 21 | try: 22 | user_message = input("User: ") 23 | messageHistory.append({'role': 'user', 'content': user_message}) 24 | 25 | raw_response = openai.ChatCompletion.create( 26 | model="gpt-4", 27 | messages= messageHistory 28 | ) 29 | 30 | response = raw_response.choices[0].message.content 31 | 32 | parsed_response = json.loads(response) 33 | emotion = parsed_response['emotion'] 34 | score = parsed_response['score'] 35 | response_text = parsed_response['response_text'] 36 | messageHistory.append({'role': 'assistant', 'content': response }) 37 | 38 | print(f"Bot: Emocion detectada: {emotion}, Score: {score}, Texto: {response_text}") 39 | except Exception as e: 40 | print(e) 41 | print("Algo ocurrió mal, intenta de nuevo") -------------------------------------------------------------------------------- /CodingDemos/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | import dotenv 3 | dotenv.load_dotenv() 4 | 5 | class ConfigTokens: 6 | def __init__(self) -> None: 7 | self.OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") 8 | 9 | settings = ConfigTokens() -------------------------------------------------------------------------------- /CodingDemos/easyDemo.py: -------------------------------------------------------------------------------- 1 | import os 2 | import openai 3 | from config import settings 4 | 5 | openai.api_key = settings.OPENAI_API_KEY 6 | 7 | user_message = input("User: ") 8 | 9 | response = openai.ChatCompletion.create( 10 | model="gpt-3.5-turbo", 11 | messages= [ 12 | {'role': 'user', 'content': user_message}, 13 | ] 14 | ) 15 | 16 | print(f"Bot: {response.choices[0].message.content}") -------------------------------------------------------------------------------- /Integrando ChatGPT en tu codigo.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CarloGauss33/OSUC-PythonOpenAI-Demo/fe868026f606c8a11ed080561a9d9e7359f6cc84/Integrando ChatGPT en tu codigo.pdf -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # PythonOpenAI demo 2 | 3 | Ejemplo practico de como integrar GPT-3.5 con Python. Utilizado en la charla de OpenSource UC Abril 2023. 4 | 5 | ## Actividad 6 | 7 | La actividad es realizar un mini bot de telegram que categorizara la instruccion enviada por el usuario. Le respondera para ser lo más humano posible. y Procesara la categoria para realizar una accion. 8 | 9 | Además se guardara el historial de mensajes en un archivo de texto. 10 | 11 | ## Categorias 12 | 13 | - Precio del Bitcoin (BTCPrice) 14 | - Prender luces de la casa (TurnOnLights) 15 | - Apagar luces de la casa (TurnOffLights) 16 | - Prender TV (TurnOnTV) 17 | - Apagar TV (TurnOffTV) 18 | - Poner musica (PlayMusic) 19 | - Apagar musica (StopMusic) 20 | 21 | ## Requerimientos 22 | - Python >= 3.7 23 | - Cuenta en OpenAI (Token) 24 | 25 | 26 | ## Instalación 27 | 28 | ```bash 29 | pip install -r requirements.txt 30 | ``` -------------------------------------------------------------------------------- /brain.py: -------------------------------------------------------------------------------- 1 | from openai_service import completions_bot, chat_manager 2 | import engines.budaApi as budaApi 3 | import engines.musicPlayer as music_player 4 | import engines.cameraManager as camera_manager 5 | import json 6 | 7 | class ChatbotManager: 8 | def __init__(self): 9 | self.budaApi = budaApi.BudaAPI() 10 | self.music_player = music_player.MusicPlayer() 11 | self.camera_manager = camera_manager.CameraManager() 12 | self.mappings = {} 13 | self.base_message = self.build_system_message() 14 | self.build_mappings() 15 | 16 | def turns_lights_on(self, args=None): 17 | print("Encendiendo luces") 18 | 19 | def turns_lights_off(self, args=None): 20 | print("Apagando luces") 21 | 22 | def take_video(self, args=None): 23 | self.camera_manager.take_video() 24 | 25 | def take_picture(self, args=None): 26 | self.camera_manager.take_picture() 27 | 28 | def play_song(self, song_name): 29 | print(f"Playing {song_name}") 30 | self.music_player.play_song(song_name) 31 | 32 | def stop_song(self): 33 | self.music_player.stop_song() 34 | 35 | def get_bitcoin_price(self, args=None): 36 | return self.budaApi.get_bitcoin_price() 37 | 38 | def get_songs(self, args=None): 39 | return self.music_player.get_songs() 40 | 41 | def available_songs(self, args=None): 42 | return self.music_player.get_songs() 43 | 44 | def build_mappings(self): 45 | self.mappings = { 46 | 'LIGHTS_ON': self.turns_lights_on, 47 | 'LIGHTS_OFF': self.turns_lights_off, 48 | 'PLAY_SONG': self.play_song, 49 | 'STOP_SONG': self.stop_song, 50 | 'TAKE_PHOTO': self.take_picture, 51 | 'TAKE_VIDEO': self.take_video, 52 | 'AVAILABLE_SONGS': self.available_songs, 53 | 'GET_BITCOIN_PRICE': self.get_bitcoin_price, 54 | 'DO_NOTHING': lambda args=None: None, 55 | 'ANSWER_QUESTION': lambda args=None: None, 56 | } 57 | 58 | def build_system_message(self): 59 | message = "You are an assistant bot for the user that can call certain actions. " 60 | message += "You can call the following actions: " 61 | message += "DO_NOTHING - does nothing, " 62 | message += "LIGHTS_ON - turns lights on, " 63 | message += "LIGHTS_OFF - turns lights off, " 64 | message += "TAKE_PHOTO - takes a photo, " 65 | message += "TAKE_VIDEO - takes a video, " 66 | message += "GET_BITCOIN_PRICE - gets the bitcoin price, bitcoin price will be sent in another message" 67 | message += "PLAY_SONG - plays a song " 68 | message += "AVAILABLE_SONGS - gets the available songs " 69 | message += "STOP_SONG - stops the song " 70 | message += "ANSWER_QUESTION - answers a question the response will be sent on MESSAGE field" 71 | message += " Do not include any explanations, only provide a RFC8259 compliant JSON response following this format without deviation." 72 | message += "{ ACTION_NAME: action_name, ARGS: args, MESSAGE: message }" 73 | message += "message should be in the language of the user." 74 | message += "The JSON response: " 75 | return message 76 | 77 | def ask(self, message): 78 | chat_manager.add_message("user", message) 79 | messages = chat_manager.get_messages(self.base_message) 80 | 81 | raw_response = completions_bot.get_chat_completion(messages) 82 | 83 | try: 84 | response = json.loads(raw_response) 85 | action = response.get("ACTION_NAME") 86 | args = response.get("ARGS") 87 | message = response.get("MESSAGE") 88 | print(action) 89 | action_response = self.mappings[action](args) 90 | 91 | if action_response: 92 | message += '\n' + action_response 93 | 94 | chat_manager.add_message("assistant", raw_response) 95 | 96 | return message 97 | 98 | except Exception as e: 99 | print(e) 100 | return 'No se pudo procesar la respuesta' 101 | 102 | 103 | if __name__ == "__main__": 104 | chatbot_manager = ChatbotManager() 105 | 106 | print("Comenzando conversación") 107 | 108 | while True: 109 | message = input("User: ") 110 | response = chatbot_manager.ask(message) 111 | print("Assistant: ", response) -------------------------------------------------------------------------------- /captures/capture.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CarloGauss33/OSUC-PythonOpenAI-Demo/fe868026f606c8a11ed080561a9d9e7359f6cc84/captures/capture.jpg -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | import os 2 | import dotenv 3 | 4 | class ConfigTokens: 5 | def __init__(self) -> None: 6 | dotenv.load_dotenv() 7 | self.OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") 8 | self.TELEGRAM_TOKEN = os.getenv("TELEGRAM_TOKEN") 9 | 10 | settings = ConfigTokens() -------------------------------------------------------------------------------- /demo.py: -------------------------------------------------------------------------------- 1 | import os 2 | import openai 3 | from config import settings 4 | 5 | openai.api_key = settings.OPENAI_API_KEY 6 | 7 | user_message = input("User: ") 8 | 9 | response = openai.ChatCompletion.create( 10 | model="gpt-3.5-turbo", 11 | messages= [ 12 | {'role': 'user', 'content': user_message}, 13 | ] 14 | ) 15 | 16 | print(f"Bot: {response.choices[0].message.content}") -------------------------------------------------------------------------------- /engines/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CarloGauss33/OSUC-PythonOpenAI-Demo/fe868026f606c8a11ed080561a9d9e7359f6cc84/engines/__init__.py -------------------------------------------------------------------------------- /engines/budaApi.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | class BudaAPI: 4 | def __init__(self) -> None: 5 | self.base_url = "https://www.buda.com/api/v2/markets" 6 | 7 | def get_bitcoin_price(self): 8 | bitcoin_url = f"{self.base_url}/btc-clp/ticker" 9 | response = requests.get(bitcoin_url).json()['ticker']['last_price'] 10 | return ''.join(response) 11 | -------------------------------------------------------------------------------- /engines/cameraManager.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import cv2 4 | 5 | class CameraManager: 6 | def __init__(self, storage_folder_path="captures", camera_id=0) -> None: 7 | self.storage_folder_path = storage_folder_path 8 | self.camera_id = camera_id 9 | self.camera = cv2.VideoCapture(self.camera_id) 10 | self.find_or_create_storage_folder() 11 | 12 | def find_or_create_storage_folder(self): 13 | if not os.path.exists(self.storage_folder_path): 14 | os.mkdir(self.storage_folder_path) 15 | 16 | def take_picture(self, file_name="capture.jpg"): 17 | ret, frame = self.camera.read() 18 | cv2.imwrite(os.path.join(self.storage_folder_path, file_name), frame) 19 | return os.path.join(self.storage_folder_path, file_name) 20 | 21 | def take_video(self, video_duration_seconds=5, file_name="capture.mp4"): 22 | # Este codigo da lo mismo. Esta verboso porque no me dio el tiempo :sad: 23 | 24 | camera_frame_rate = self.camera.get(cv2.CAP_PROP_FPS) 25 | video_frame_rate = 20 26 | video_frame_size = (int(self.camera.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.camera.get(cv2.CAP_PROP_FRAME_HEIGHT))) 27 | video_file_path = os.path.join(self.storage_folder_path, file_name) 28 | video_writer = cv2.VideoWriter(video_file_path, cv2.VideoWriter_fourcc(*"mp4v"), video_frame_rate, video_frame_size) 29 | for i in range(int(camera_frame_rate * video_duration_seconds)): 30 | ret, frame = self.camera.read() 31 | video_writer.write(frame) 32 | video_writer.release() 33 | return 34 | 35 | def list_images(self): 36 | return os.listdir(self.storage_folder_path) 37 | 38 | def list_videos(self): 39 | return os.listdir(self.storage_folder_path) 40 | 41 | def release_camera(self): 42 | self.camera.release() 43 | cv2.destroyAllWindows() 44 | return -------------------------------------------------------------------------------- /engines/musicPlayer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from pygame import mixer 4 | 5 | class MusicPlayer: 6 | def __init__(self, music_folder="songs") -> None: 7 | self.music_folder = music_folder 8 | self.available_songs = [] 9 | self.load_available_songs() 10 | 11 | def load_available_songs(self): 12 | for song in os.listdir(self.music_folder): 13 | if song.endswith(".mp3"): 14 | self.available_songs.append(song) 15 | 16 | def get_songs(self): 17 | return '\n- '.join(self.available_songs) 18 | 19 | def play_song(self, song_name): 20 | if song_name in self.available_songs: 21 | print(f"Playing {song_name}") 22 | mixer.init() 23 | mixer.music.load(os.path.join(self.music_folder, song_name)) 24 | mixer.music.play() 25 | return 26 | 27 | def stop_song(self, args=None): 28 | mixer.music.stop() 29 | return -------------------------------------------------------------------------------- /openai_service.py: -------------------------------------------------------------------------------- 1 | import openai 2 | import json 3 | import os 4 | from config import settings 5 | 6 | openai.api_key = settings.OPENAI_API_KEY 7 | 8 | class ChatManager: 9 | def __init__(self, history_file_path="history.json"): 10 | self.history_file_path = history_file_path 11 | self.history = [] 12 | self.find_or_create_history() 13 | self.load_history() 14 | 15 | def find_or_create_history(self): 16 | if not os.path.exists(self.history_file_path): 17 | with open(self.history_file_path, "w") as f: 18 | json.dump([], f) 19 | 20 | def load_history(self): 21 | with open(self.history_file_path, "r") as f: 22 | self.history = json.load(f) 23 | 24 | def save_history(self): 25 | with open(self.history_file_path, "w") as f: 26 | json.dump(self.history, f) 27 | 28 | def add_message(self, role, content): 29 | self.history.append({"role": role, "content": content}) 30 | self.save_history() 31 | 32 | def get_messages(self, system_message=""): 33 | return [{ "role": "system", "content": system_message }] + self.history 34 | 35 | class CompletionsBot: 36 | def __init__(self): 37 | self.engine = "gpt-4" 38 | 39 | def get_chat_completion(self, messages, temperature=0.1, max_tokens=500): 40 | response = openai.ChatCompletion.create( 41 | model=self.engine, 42 | messages=messages, 43 | temperature=temperature, 44 | max_tokens=max_tokens, 45 | ) 46 | 47 | if response.choices: 48 | return response.choices[0].message.content 49 | return 'No se pudo generar una respuesta' 50 | 51 | 52 | completions_bot = CompletionsBot() 53 | chat_manager = ChatManager() -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | requests 2 | pygame 3 | openai==0.27.0 4 | python-dotenv 5 | python-telegram-bot 6 | opencv-python -------------------------------------------------------------------------------- /songs/Bowie.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CarloGauss33/OSUC-PythonOpenAI-Demo/fe868026f606c8a11ed080561a9d9e7359f6cc84/songs/Bowie.mp3 -------------------------------------------------------------------------------- /songs/JohnXina.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CarloGauss33/OSUC-PythonOpenAI-Demo/fe868026f606c8a11ed080561a9d9e7359f6cc84/songs/JohnXina.mp3 -------------------------------------------------------------------------------- /songs/NeverGonnaGiveYouUp.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CarloGauss33/OSUC-PythonOpenAI-Demo/fe868026f606c8a11ed080561a9d9e7359f6cc84/songs/NeverGonnaGiveYouUp.mp3 -------------------------------------------------------------------------------- /songs/RedSun.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CarloGauss33/OSUC-PythonOpenAI-Demo/fe868026f606c8a11ed080561a9d9e7359f6cc84/songs/RedSun.mp3 -------------------------------------------------------------------------------- /telegramBot.py: -------------------------------------------------------------------------------- 1 | from config import settings 2 | from telegram import Update 3 | from telegram.ext import ApplicationBuilder, ContextTypes, CommandHandler, MessageHandler 4 | from telegram.ext.filters import BaseFilter 5 | from brain import ChatbotManager 6 | 7 | chat_manager = ChatbotManager() 8 | 9 | async def start(update: Update, context: ContextTypes.DEFAULT_TYPE): 10 | await context.bot.send_message(chat_id=update.effective_chat.id, text="Hola!") 11 | 12 | async def message_callback(update: Update, context: ContextTypes.DEFAULT_TYPE): 13 | message = update.message.text 14 | gpt_response = chat_manager.ask(message) 15 | 16 | await context.bot.send_message(chat_id=update.effective_chat.id, text=gpt_response) 17 | 18 | 19 | if __name__ == '__main__': 20 | print('Starting bot...') 21 | application = ApplicationBuilder().token(settings.TELEGRAM_TOKEN).build() 22 | start_handler = CommandHandler('start', start) 23 | message_handler = MessageHandler(filters=BaseFilter(), callback=message_callback) 24 | 25 | application.add_handler(message_handler) 26 | application.add_handler(start_handler) 27 | application.run_polling() --------------------------------------------------------------------------------