├── .gitignore ├── .streamlit └── config.toml ├── README.md ├── [Documentation Lumen] └── Lumen Documentation.pdf ├── functions ├── _functions_global_ │ ├── app_button.py │ ├── config_model.py │ ├── get_model.py │ └── llm_model_functions │ │ ├── llm_generate.py │ │ ├── llm_load_hist.py │ │ └── llm_save_hist.py ├── _ui_global │ ├── custom_ui.py │ └── page_title.py ├── code_page │ ├── code_command.py │ └── code_mode.py ├── config_page │ ├── cam_index.py │ ├── custom_model │ │ ├── colors.py │ │ └── modelfile.py │ ├── json │ │ ├── create_trigger_app_json.py │ │ └── create_trigger_response_json.py │ ├── mic_sensitivity.py │ ├── narrator_voice.py │ ├── selection_microphone.py │ ├── speech_to_text │ │ ├── record.py │ │ └── speech.py │ └── visual_system_voice.py ├── discussion_page │ ├── discussion_command.py │ └── discussion_mode.py ├── menu_page │ ├── commands │ │ ├── app_chrono.py │ │ ├── audio_gestion.py │ │ ├── cam │ │ │ ├── cam_index.py │ │ │ ├── screen_cam.py │ │ │ ├── video_capture.py │ │ │ └── video_command.py │ │ ├── cybersec_info_system.py │ │ ├── data_time.py │ │ ├── json │ │ │ ├── load_json.py │ │ │ └── text_add_json.py │ │ ├── math_calcul.py │ │ ├── screenshot.py │ │ ├── speech_to_en.py │ │ ├── take_note.py │ │ └── web │ │ │ ├── check_connection.py │ │ │ └── search_web.py │ ├── get_language.py │ └── start_assistant.py ├── precision_page │ ├── precision_command.py │ └── precision_mode.py └── vision_page │ ├── encode_img.py │ ├── vision_command.py │ └── vision_mode.py ├── menu.py ├── pages ├── 1_🛠️Configuration.py ├── 2_💬Discussion.py ├── 3_🎯Precision.py ├── 4_👨💻Code.py └── 5_👁️Vision.py ├── requirements.txt ├── ressources ├── lumen_color_palette.png └── lumen_logo.png └── start-assistant.bat /.gitignore: -------------------------------------------------------------------------------- 1 | # env files 2 | __pycache__ 3 | .env 4 | 5 | # save config 6 | save_config_txt 7 | 8 | # Json files 9 | config_json 10 | 11 | # Temp audio files 12 | functions/config_page/speech_to_text/temp_audio 13 | temp_output_voice 14 | 15 | # Chat History 16 | functions/discussion_page/discussion_history 17 | functions/code_page/code_history 18 | functions/precision_page/precision_history 19 | functions/vision_page/vision_history 20 | 21 | # Temp code & elements 22 | temp_code 23 | temp_element 24 | 25 | # Model parameters 26 | temp_modelfile 27 | 28 | # Photos cam 29 | photos 30 | 31 | -------------------------------------------------------------------------------- /.streamlit/config.toml: -------------------------------------------------------------------------------- 1 | [theme] 2 | primaryColor="#c05bb6" 3 | backgroundColor="#121b3b" 4 | secondaryBackgroundColor="#2264b5" 5 | textColor="#f0d6ec" 6 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # **Lumen** (for Windows 11) 3 | 4 | **Assistant to support you in your projects and tasks.** 5 | 6 | 7 | ## Installation 8 | 9 | Run the **start-assistant.bat** and follow the instructions on the shell. 10 | 11 | **ATTENTION :** 12 | This application is not suitable for **Mac** and **Linux** and may not work depending on the hardware of your Windows computer. 13 | ## Synthetic Voices 14 | 15 | If you want to have more synthetic voices available, on Windows you have to go to the narrator settings and you can download the voices you want. 16 | 17 | If this doesn't work and doesn't recognize the voices you have installed on the narrator settings, follow this steps : 18 | 1. Open the **Registry Editor** by pressing the **“Windows” and “R”** keys simultaneously, then type **“regedit”** and press Enter. 19 | 20 | 2. Navigate to the registry key : **HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech_OneCore\Voices\Tokens**. 21 | 22 | 3. Export this key to a **REG file** (with a right click on the file). 23 | 24 | 4. Open this file with a text editor and replace all occurrences of **HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech_OneCore\Voices\Tokens** 25 | with **HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\SPEECH\Voices\Tokens**. 26 | 27 | 5. Save the modified file and double-click it to import the changes to the registry. 28 | 29 | 30 | ## Tech Stack 31 | 32 | **Language :** Python 3.11.7 33 | 34 | **CUDA Compiler/Toolkit** : Cuda 11.8 35 | 36 | **Local AI Models:** Ollama (version 0.3.2) 37 | 38 | **Voice Recognition:** openai/whisper-large-v3 39 | 40 | **Audio Device Scanning:** pyaudio 41 | 42 | **Synthetic Voices:** pyttsx3 43 | 44 | **Interface:** streamlit 45 | 46 | **Computer Commands:** webbrowser (search on web) / pywhatkit (search on youtube) / pycaw (change volume computer) / sympy (calcul) 47 | 48 | 49 | ## Author 50 | 51 | - [@nixiz0](https://github.com/nixiz0) 52 | -------------------------------------------------------------------------------- /[Documentation Lumen]/Lumen Documentation.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nixiz0/Lumen/68e6f872704ddd32e384a3da5fbc59ea71bbde4c/[Documentation Lumen]/Lumen Documentation.pdf -------------------------------------------------------------------------------- /functions/_functions_global_/app_button.py: -------------------------------------------------------------------------------- 1 | import os 2 | import string 3 | import base64 4 | import streamlit as st 5 | import pandas as pd 6 | import json 7 | 8 | 9 | class AppButton: 10 | def __init__(self, lang, history_dir, selected_file): 11 | self.lang = lang 12 | self.history_dir = history_dir 13 | self.selected_file = selected_file 14 | 15 | def rename_file(self): 16 | new_file_name = st.sidebar.text_input('Nouveau nom' if self.lang == 'Fr' else 'New name') 17 | if new_file_name: 18 | new_file_name = new_file_name.translate(str.maketrans('', '', string.punctuation)) 19 | if st.sidebar.button('Renommer' if self.lang == 'Fr' else 'Rename'): 20 | if not new_file_name.endswith('.json'): 21 | new_file_name += '.json' 22 | os.rename(os.path.join(self.history_dir, self.selected_file), os.path.join(self.history_dir, new_file_name)) 23 | st.rerun() 24 | else: 25 | st.sidebar.warning('Veuillez entrer un nom de fichier.' if self.lang == 'Fr' else 'Please enter a file name.') 26 | 27 | def delete_file(self): 28 | if st.sidebar.button('Supprimer' if self.lang == 'Fr' else 'Delete'): 29 | os.remove(os.path.join(self.history_dir, self.selected_file)) 30 | st.rerun() 31 | 32 | def download_as_csv(self): 33 | if st.sidebar.button('Télécharger CSV' if self.lang == 'Fr' else 'Download CSV'): 34 | with open(os.path.join(self.history_dir, self.selected_file), 'r', encoding='utf-8') as f: 35 | data = json.load(f) 36 | df = pd.DataFrame(data) 37 | csv = df.to_csv(index=False, encoding='utf-8') 38 | b64 = base64.b64encode(csv.encode()).decode() 39 | csv_file_name = self.selected_file.replace('.json', '.csv') 40 | if self.lang == 'Fr': 41 | href = f'Cliquez pour télécharger' 42 | else: 43 | href = f'Click to download' 44 | st.sidebar.markdown(href, unsafe_allow_html=True) -------------------------------------------------------------------------------- /functions/_functions_global_/config_model.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import os 3 | 4 | from functions._functions_global_.get_model import get_model_names 5 | 6 | 7 | def configuration_model(lang, filename_model): 8 | # Get the names of the models 9 | model_names = get_model_names() 10 | model_names.insert(0, "") 11 | 12 | # Select model 13 | model_use = st.sidebar.selectbox('🔬 Modèles' if lang == "Fr" else '🔬 Models', model_names) 14 | 15 | # Create the folder if it doesn't exist 16 | if not os.path.exists('save_config_txt'): 17 | os.makedirs('save_config_txt') 18 | 19 | # Write the name of the model in the .txt only if a model is selected 20 | if model_use != "": 21 | with open(filename_model, 'w') as f: 22 | f.write(model_use) 23 | -------------------------------------------------------------------------------- /functions/_functions_global_/get_model.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | 4 | def get_model_names(): 5 | # Run 'ollama list' command and get the output 6 | output = subprocess.check_output("ollama list", shell=True).decode() 7 | 8 | # Split the output into lines and ignore the first line 9 | lines = output.split('\n')[2:] 10 | 11 | # Retrieve only model names 12 | model_names = [line.split()[0] for line in lines if line] 13 | 14 | return model_names -------------------------------------------------------------------------------- /functions/_functions_global_/llm_model_functions/llm_generate.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import requests 3 | import json 4 | 5 | 6 | class LLMGenerate: 7 | def __init__(self, talk): 8 | self.url = "http://localhost:11434/api/chat" 9 | self.headers = {'Content-Type': "application/json",} 10 | self.talk = talk 11 | 12 | def beforeSay(self, response): 13 | return response 14 | 15 | def say(self, response): 16 | if len(response) == 0: 17 | return 18 | self.talk(self.beforeSay(response)) 19 | 20 | def generate_response(self, llm_model, prompt, chat_history): 21 | if len(prompt) == 0: 22 | return "", chat_history 23 | 24 | full_prompt = [] 25 | for i in chat_history: 26 | full_prompt.append({ 27 | "role": "user", 28 | "content": i[0] 29 | }) 30 | full_prompt.append({ 31 | "role": "assistant", 32 | "content": i[1] 33 | }) 34 | full_prompt.append({ 35 | "role": "user", 36 | "content": prompt 37 | }) 38 | 39 | data = { 40 | "model": llm_model, 41 | "stream": True, 42 | "messages": full_prompt, 43 | } 44 | 45 | response = requests.post(self.url, headers=self.headers, data=json.dumps(data), stream=True) 46 | if response.status_code == 200: 47 | all_response = '' 48 | this_response = '' 49 | for line in response.iter_lines(): 50 | if line: 51 | jsonData = json.loads(line) 52 | this_response += jsonData["message"]['content'] 53 | if '.' in this_response or '?' in this_response or '!' in this_response: 54 | self.say(this_response) 55 | all_response += this_response 56 | this_response = '' 57 | if len(this_response) > 0: 58 | self.say(this_response) 59 | all_response += this_response 60 | this_response = '' 61 | chat_history.append((prompt, all_response)) 62 | 63 | # Display the last user message and assistant response on Streamlit 64 | if chat_history[-1][0]: # Last user's message 65 | st.write(f"User: {chat_history[-1][0]}") 66 | if chat_history[-1][1]: # Last assistant's message 67 | st.write(f"Assistant: {chat_history[-1][1]}") 68 | 69 | return "", chat_history 70 | else: 71 | return "Error: Unable to fetch response", chat_history 72 | -------------------------------------------------------------------------------- /functions/_functions_global_/llm_model_functions/llm_load_hist.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import json 3 | 4 | 5 | def load_and_display_chat(filename): 6 | # Load the chat history from the json file 7 | with open(filename, 'r', encoding='utf-8') as f: 8 | chat_history = json.load(f) 9 | 10 | # Store the chat history in the session state 11 | st.session_state['session_state'] = chat_history 12 | 13 | # Display the chat history on Streamlit 14 | for message in st.session_state['session_state']: 15 | with st.chat_message(message['role']): 16 | st.write(message['content']) 17 | -------------------------------------------------------------------------------- /functions/_functions_global_/llm_model_functions/llm_save_hist.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import nltk 4 | import string 5 | nltk.download('punkt') 6 | 7 | 8 | def save_conversation(chat_history, directory): 9 | # Convert chat history to the desired format 10 | formatted_history = [] 11 | user_text = "" 12 | for i in chat_history: 13 | formatted_history.append({ 14 | "role": "user", 15 | "content": i[0] 16 | }) 17 | formatted_history.append({ 18 | "role": "assistant", 19 | "content": i[1] 20 | }) 21 | # Concatenate user text 22 | user_text += " " + i[0] 23 | 24 | # Tokenize the text and extract the desired words 25 | words = nltk.word_tokenize(user_text) 26 | 27 | # Remove punctuation from the words 28 | words = [word for word in words if word not in string.punctuation] 29 | filename_words = words[3:13] # Take words after the first 3 words, up to 10 words 30 | filename = '_'.join(filename_words) + '.json' 31 | 32 | # Create the directory if it doesn't exist 33 | if not os.path.exists(directory): 34 | os.makedirs(directory) 35 | 36 | # Save to a json file in the specified directory 37 | with open(os.path.join(directory, filename), 'w', encoding='utf-8') as f: 38 | json.dump(formatted_history, f, ensure_ascii=False, indent=4) 39 | -------------------------------------------------------------------------------- /functions/_ui_global/custom_ui.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | 3 | 4 | def custom_ui(): 5 | # Hide deploybutton in streamlit ui 6 | st.markdown(""" 7 | 10 | """, unsafe_allow_html=True) -------------------------------------------------------------------------------- /functions/_ui_global/page_title.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | 3 | 4 | def set_page_title(title, user_title): 5 | if title != user_title: 6 | title = user_title 7 | st.markdown(unsafe_allow_html=True, body=f""" 8 | 11 | """) 12 | -------------------------------------------------------------------------------- /functions/code_page/code_command.py: -------------------------------------------------------------------------------- 1 | from functions.code_page.code_mode import start_code 2 | 3 | 4 | class CodeCommand: 5 | def __init__(self, listen, device, language, talk, model_path, hist_dir): 6 | self.listen = listen 7 | self.device = device 8 | self.language = language 9 | self.talk = talk 10 | self.model_path = model_path 11 | self.hist_dir = hist_dir 12 | 13 | def launch_code_mode(self): 14 | code_llm_start_keywords = ['lumen passe en mode code', 'lumen passe en code', 'lumen passage en mode code', 15 | 'lumen mode code', 'lumen switch to code mode', 'lumen switch to code', 'lumen code mode'] 16 | if any(keyword in self.listen for keyword in code_llm_start_keywords): 17 | if self.language == 'Fr': 18 | self.talk("Mode Code Activé") 19 | else: 20 | self.talk("Coding Mode Activated") 21 | start_code(self.device, self.language, self.talk, self.model_path, self.hist_dir) -------------------------------------------------------------------------------- /functions/code_page/code_mode.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import os 3 | import time 4 | from functions.config_page.speech_to_text.speech import SpeechToText 5 | from functions.config_page.speech_to_text.record import record_audio 6 | from functions._functions_global_.llm_model_functions.llm_generate import LLMGenerate 7 | from functions._functions_global_.llm_model_functions.llm_save_hist import save_conversation 8 | 9 | 10 | def start_code(device, lang, talk, model_path, hist_dir): 11 | llm = LLMGenerate(talk) 12 | conversation_history = [] 13 | speech_to_text = SpeechToText() 14 | working = True 15 | filename_temp_audio = 'functions/config_page/speech_to_text/temp_audio/audio.wav' 16 | 17 | if os.path.isfile(model_path): 18 | with open(model_path, 'r') as file: 19 | llm_model = file.read().strip() 20 | else: 21 | st.sidebar.error("Veuillez configurer le modèle à utiliser dans la page code." if lang == 'Fr' else 22 | "Please configure the template to be used in the code page.") 23 | 24 | detect_code_keywords = ['je veux rentrer du code', 'modification de code', 'I want to enter code'] 25 | st.success(f"Si vous souhaitez rentrer du code pour parler dessus dire {detect_code_keywords}" if lang == 'Fr' else 26 | f"If you want to enter code to talk about it, say {detect_code_keywords}") 27 | 28 | def ask_user_intent(): 29 | trigger_phrases = ["c'est bon", "j'ai déposé mon code", 'code déposé', 30 | "it's okay", "I submitted my code", 'code submitted', 31 | "it is okay", "okai", "ok", "okay"] 32 | st.warning(f"Pour continuer, dire:\n {trigger_phrases}\n") if lang == 'Fr' else print(f"To continue, say:\n{trigger_phrases}\n") 33 | 34 | # Create the path if it doesn't exist 35 | file_path = 'functions/code_page/temp_code/temp_code.txt' 36 | os.makedirs(os.path.dirname(file_path), exist_ok=True) 37 | 38 | # clean the file before opening it 39 | with open(file_path, 'w') as file: 40 | file.write('') 41 | 42 | os.system(f'notepad {file_path}') 43 | while True: 44 | record_audio(language=lang, device_index=device) 45 | listen = speech_to_text.transcribe(filename_temp_audio) 46 | if any(keyword in listen for keyword in trigger_phrases): 47 | break 48 | time.sleep(1) 49 | 50 | with open(file_path, 'r') as file: 51 | code = file.read() 52 | 53 | if lang == 'Fr': 54 | talk("Que voulez-vous faire avec le code fourni ?") 55 | else: 56 | talk("What do you want to do with the code provided ?") 57 | record_audio(language=lang, device_index=device) 58 | user_intent = speech_to_text.transcribe(filename_temp_audio) 59 | return code, user_intent 60 | 61 | while True: 62 | if working: 63 | st.markdown("
Mode code LLM..
" if lang == 'Fr' else 64 | "Code LLM mode..
", unsafe_allow_html=True) 65 | record_audio(language=lang, device_index=device) 66 | listen = speech_to_text.transcribe(filename_temp_audio) 67 | 68 | # Check if the user wants to enter code 69 | if any(keyword in listen for keyword in detect_code_keywords): 70 | code, user_intent = ask_user_intent() 71 | 72 | if lang == 'Fr': 73 | lang_preprompt = "Parle en Français et réponds en français, " 74 | else: 75 | lang_preprompt = "Speak in English and respond in English, " 76 | full_prompt_user = lang_preprompt + user_intent 77 | _, chat_history = llm.generate_response(llm_model, full_prompt_user + " " + code, conversation_history) 78 | continue 79 | 80 | # Check if the user wants to save the conversation 81 | detect_save_keyords = ['sauvegarde notre discussion', 'sauvegarde notre conversation', 'sauvegarde la discussion', 'sauvegarde la conversation', 82 | 'save our discussion', 'save our conversation', 'save the discussion', 'save the conversation'] 83 | if any(keyword in listen for keyword in detect_save_keyords): 84 | talk("La conversation a été sauvegardé" if lang=='Fr' else "The conversation has been saved") 85 | st.success("Conversation sauvegardé" if lang == 'Fr' else "Conversation saved.") 86 | save_conversation(chat_history, hist_dir) 87 | continue 88 | 89 | # Check if the user wants to stop the LLM conversation for return to basic voice detection 90 | detect_stop_llm_code_keyords = ['c\'est bon tu peux arrêter', 'tu peux arrêter', 'désactive llm', 'passe en mode classique', 91 | 'passage en mode classique', 'désactive code llm', 'passe classique','disable llm', 92 | 'switch to classic mode','switch classic mode', 'disable code llm', 93 | 'it\'s okay you can stop', 'it is okay you can stop', 'you can stop'] 94 | if any(keyword in listen for keyword in detect_stop_llm_code_keyords): 95 | talk("Passage en mode exécution de commandes" if lang == 'Fr' else "Switching to commands execution mode") 96 | st.error("Arrêt du code." if lang == 'Fr' else "Stopping the code.") 97 | working = False 98 | break 99 | 100 | # Generate a response 101 | lang_preprompt = "Réponds en français, " if lang == 'Fr' else "Respond in English, " 102 | user_input_str = str(listen) 103 | full_prompt_user = lang_preprompt + user_input_str 104 | _, chat_history = llm.generate_response(llm_model, full_prompt_user, conversation_history) -------------------------------------------------------------------------------- /functions/config_page/cam_index.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import streamlit as st 3 | import os 4 | 5 | 6 | def list_cameras(): 7 | index = 0 8 | arr = [] 9 | while True: 10 | cap = cv2.VideoCapture(index) 11 | if not cap.read()[0]: 12 | break 13 | else: 14 | arr.append(f"Caméra {index}") 15 | cap.release() 16 | index += 1 17 | return arr 18 | 19 | def choose_cam(lang): 20 | file_path = 'save_config_txt/chosen_cam.txt' 21 | # Check if the file exists 22 | if os.path.exists(file_path): 23 | with open(file_path, 'r') as file: 24 | default_index = int(file.read().strip()) 25 | else: 26 | default_index = 0 # Default value 27 | # Create the file and write the default 28 | with open(file_path, 'w') as file: 29 | file.write(str(default_index)) 30 | 31 | st.markdown("ID de la Voix du Narrateur selectionné: " + content + "
" if lang == 'Fr' else 58 | "Selected Narrator Voice ID: " + content + "
", unsafe_allow_html=True) 59 | 60 | return selected_voice_id 61 | 62 | def speak(self, text): 63 | # Split the text into text and code segments 64 | segments = split_text_and_code(text) 65 | 66 | # Read Voice on the .txt file 67 | filename_voice = 'save_config_txt/select_voice.txt' 68 | if os.path.exists(filename_voice): 69 | with open(filename_voice, 'r') as file: 70 | voice = file.read().strip() 71 | else: 72 | st.sidebar.error("Veuillez aller à la page configuration pour choisir une voix." if self.lang == 'Fr' else 73 | "Please go to the configuration page to choose a voice.") 74 | return 75 | 76 | for segment in segments: 77 | # Check if the segment is code 78 | if segment.startswith('```') and segment.endswith('```'): 79 | st.sidebar.warning("Code détecté, pas lu à haute voix." if self.lang == 'Fr' else "Code detected, not reading out loud.") 80 | else: 81 | # Create the directories if they don't exist 82 | directory = "functions/config_page/temp_output_voice/" 83 | if not os.path.exists(directory): 84 | os.makedirs(directory) 85 | 86 | # Create the full file path 87 | full_path = os.path.join(directory, "voice_output.wav") 88 | 89 | # If the file already exists, remove it 90 | if os.path.exists(full_path): 91 | os.remove(full_path) 92 | 93 | # Set narrator voice 94 | self.engine.setProperty('voice', voice) 95 | 96 | # Convert text to speech and save it to a file 97 | self.engine.save_to_file(segment, full_path) 98 | 99 | # Wait for any pending speech to complete 100 | self.engine.runAndWait() 101 | 102 | # Create an instance of the class and call the play_audio method 103 | visual_voice = VisualSyntheticVoice() 104 | visual_voice.play_audio(filename=full_path) 105 | -------------------------------------------------------------------------------- /functions/config_page/selection_microphone.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import pyaudio 3 | import os 4 | 5 | 6 | def get_microphone_list(): 7 | p = pyaudio.PyAudio() 8 | info = p.get_host_api_info_by_index(0) 9 | numdevices = info.get('deviceCount') 10 | devices = {} 11 | for i in range(0, numdevices): 12 | if (p.get_device_info_by_host_api_device_index(0, i).get('maxInputChannels')) > 0: 13 | devices[i] = p.get_device_info_by_host_api_device_index(0, i).get('name') 14 | return devices 15 | 16 | def select_microphone(lang): 17 | devices = get_microphone_list() 18 | device_names = [''] + [devices[i] for i in devices] # Add an empty string to the top of the list 19 | config_txt_file = "save_config_txt/select_microphone.txt" 20 | 21 | selected_device_index = 0 22 | if lang == "Fr": 23 | selected_device_name = st.selectbox('Sélectionnez le microphone à utiliser', device_names, index=selected_device_index) 24 | else: 25 | selected_device_name = st.selectbox('Select the microphone to use', device_names, index=selected_device_index) 26 | 27 | # If the user has not selected a microphone, do nothing 28 | if selected_device_name == '': 29 | return None 30 | 31 | selected_device_index = list(devices.values()).index(selected_device_name) 32 | 33 | # Write the name of the selected microphone in a text file 34 | with open(config_txt_file, 'w') as f: 35 | f.write(str(selected_device_index)) 36 | 37 | # Read the content of the text file and display it 38 | with open(config_txt_file, 'r') as f: 39 | content = f.read() 40 | st.markdown("Index du Microphone selectionné: " + content + "
" if lang == 'Fr' else 41 | "Selected Microphone Index: " + content + "
", unsafe_allow_html=True) 42 | 43 | return selected_device_index 44 | 45 | def get_microphone(lang): 46 | # Read the microphone frome the .txt file 47 | filename_microphone = 'save_config_txt/select_microphone.txt' 48 | if os.path.exists(filename_microphone): 49 | with open(filename_microphone, 'r') as file: 50 | device = int(file.read().strip()) 51 | else: 52 | st.sidebar.error("Veuillez aller à la page configuration pour définir le microphone." if lang == 'Fr' else 53 | "Please go to the configuration page to set the microphone.") 54 | 55 | return device -------------------------------------------------------------------------------- /functions/config_page/speech_to_text/record.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import pyaudio 3 | import wave 4 | import numpy as np 5 | import os 6 | import collections 7 | 8 | 9 | message_placeholder = st.sidebar.empty() # Message placeholders for record 10 | def record_audio(language="En", filename="functions/config_page/speech_to_text/temp_audio/audio.wav", 11 | device_index=0, rate=44100, chunk=1024, threshold=500, pre_recording_buffer_length=2): 12 | if not os.path.exists(os.path.dirname(filename)): 13 | os.makedirs(os.path.dirname(filename)) 14 | 15 | # Read the device index from the 16 | with open('save_config_txt/select_microphone.txt', 'r') as file: 17 | device_index = int(file.read().strip()) 18 | 19 | # Read the threshold from the 20 | if os.path.exists('save_config_txt/mic_record_thresholds.txt'): 21 | with open('save_config_txt/mic_record_thresholds.txt', 'r') as file: 22 | threshold = int(file.read().strip()) 23 | 24 | p = pyaudio.PyAudio() 25 | stream = p.open(format=pyaudio.paInt16, 26 | channels=1, 27 | rate=rate, 28 | input=True, 29 | frames_per_buffer=chunk, 30 | input_device_index=device_index) 31 | 32 | message_placeholder.success("Écoute.." if language == 'Fr' else "Listen..") 33 | frames = collections.deque(maxlen=int(rate / chunk * pre_recording_buffer_length)) # buffer to store 2 seconds of audio 34 | recording_frames = [] 35 | recording = False 36 | silence_count = 0 37 | while True: 38 | data = stream.read(chunk) 39 | rms = np.linalg.norm(np.frombuffer(data, dtype=np.int16)) / np.sqrt(len(data)) 40 | frames.append(data) 41 | if rms >= threshold: 42 | if not recording: # start recording 43 | recording = True 44 | recording_frames.extend(frames) # add the buffered audio 45 | silence_count = 0 46 | elif recording and rms < threshold: 47 | silence_count += 1 48 | if silence_count > rate / chunk * 3: # if 3 seconds of silence, stop recording 49 | break 50 | if recording: 51 | recording_frames.append(data) 52 | 53 | stream.stop_stream() 54 | stream.close() 55 | p.terminate() 56 | 57 | # Only create the file if there is audio data 58 | if recording_frames: 59 | message_placeholder.warning("Transcription en cours.." if language == 'Fr' else "Transcription in progress..") 60 | wf = wave.open(filename, 'wb') 61 | wf.setnchannels(1) 62 | wf.setsampwidth(p.get_sample_size(pyaudio.paInt16)) 63 | wf.setframerate(rate) 64 | wf.writeframes(b''.join(recording_frames)) 65 | wf.close() 66 | return True 67 | else: 68 | message_placeholder.error("Pas d'audio détectez" if language == 'Fr' else "No audio detected.") 69 | return False -------------------------------------------------------------------------------- /functions/config_page/speech_to_text/speech.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import string 3 | from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline 4 | 5 | 6 | punctuation_keep = "".join([char for char in string.punctuation if char not in ["'", '"', "-"]]) 7 | translator = str.maketrans('', '', punctuation_keep) 8 | 9 | class SpeechToText: 10 | def __init__(self): 11 | self.device = "cuda:0" if torch.cuda.is_available() else "cpu" 12 | self.torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 13 | 14 | self.model_id = "openai/whisper-large-v3" 15 | self.model = AutoModelForSpeechSeq2Seq.from_pretrained( 16 | self.model_id, torch_dtype=self.torch_dtype, low_cpu_mem_usage=True, use_safetensors=True 17 | ) 18 | self.model.to(self.device) 19 | self.processor = AutoProcessor.from_pretrained(self.model_id) 20 | 21 | self.pipe = pipeline( 22 | "automatic-speech-recognition", 23 | model=self.model, 24 | tokenizer=self.processor.tokenizer, 25 | feature_extractor=self.processor.feature_extractor, 26 | max_new_tokens=128, 27 | chunk_length_s=30, 28 | batch_size=64, 29 | return_timestamps=True, 30 | torch_dtype=self.torch_dtype, 31 | device=self.device, 32 | ) 33 | 34 | def transcribe(self, audio_output): 35 | # Transcribe Speech to Text 36 | results = self.pipe(audio_output) 37 | results = results['text'].lower() 38 | results = results.translate(translator) 39 | return results 40 | 41 | def translate_to_en(self, audio_output): 42 | # Translate Speech to English 43 | results = self.pipe(audio_output, generate_kwargs={"task": "translate"}) 44 | results = results['text'].lower() 45 | results = results.translate(translator) 46 | return results -------------------------------------------------------------------------------- /functions/config_page/visual_system_voice.py: -------------------------------------------------------------------------------- 1 | import os 2 | os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = '1' 3 | import pygame 4 | import threading 5 | import numpy as np 6 | import math 7 | import wave 8 | import simpleaudio as sa 9 | 10 | 11 | class VisualSyntheticVoice: 12 | def __init__(self): 13 | self.screen_width = 260 14 | self.screen_height = 260 15 | self.CHUNK = 380 # More high value of CHUNK => More quickly are sinusoid 16 | pygame.init() 17 | pygame.display.set_caption("Synthetic Voice") 18 | self.screen = pygame.display.set_mode((self.screen_width, self.screen_height)) 19 | self.clock = pygame.time.Clock() 20 | 21 | def draw_sine_wave(self, amplitude): 22 | self.screen.fill((0,0,0)) 23 | points = [] 24 | if amplitude > 10: 25 | for x in range(self.screen_width): 26 | y = self.screen_height/2 + int(amplitude * math.sin(x * 0.05)) 27 | points.append((x,y)) 28 | else: 29 | points.append((0, self.screen_height/2)) 30 | points.append((self.screen_width, self.screen_height/2)) 31 | pygame.draw.lines(self.screen, (0, 189, 255), False, points, 2) # Cyan color lines 32 | pygame.display.flip() 33 | 34 | def play_audio(self, filename="functions/config_page/temp_output_voice/voice_output.wav"): 35 | # Load .wav file 36 | wave_obj = sa.WaveObject.from_wave_file(filename) 37 | 38 | # Define a function to play the audio 39 | def play_audio_thread(): 40 | # Play .wav file 41 | play_obj = wave_obj.play() 42 | 43 | # Wait for the .wav file to end 44 | play_obj.wait_done() 45 | 46 | # Start a new thread to play the audio 47 | audio_thread = threading.Thread(target=play_audio_thread) 48 | audio_thread.start() 49 | 50 | # Playing the .wav file 51 | wav_file = wave.open(filename, 'rb') 52 | 53 | running = True 54 | while running: 55 | data = wav_file.readframes(self.CHUNK) 56 | if data: 57 | data_np = np.abs(np.frombuffer(data, dtype=np.int16)) 58 | data_np = np.square(data_np) 59 | if np.isnan(data_np).any() or np.isinf(data_np).any(): 60 | data_np = np.nan_to_num(data_np) # replace NaN or inf values with zero 61 | mean_val = np.mean(data_np) 62 | rms = np.sqrt(mean_val + 1e-10) if mean_val >= 0 else 0 # add a small constant for numerical stability 63 | self.draw_sine_wave(rms) 64 | else: 65 | break 66 | 67 | for event in pygame.event.get(): 68 | if event.type == pygame.QUIT: 69 | running = False 70 | self.clock.tick(60) 71 | wav_file.close() -------------------------------------------------------------------------------- /functions/discussion_page/discussion_command.py: -------------------------------------------------------------------------------- 1 | from functions.discussion_page.discussion_mode import start_discussion 2 | 3 | 4 | class DiscussionCommand: 5 | def __init__(self, listen, device, lang, talk, model_path, hist_dir): 6 | self.listen = listen 7 | self.device = device 8 | self.lang = lang 9 | self.talk = talk 10 | self.model_path = model_path 11 | self.hist_dir = hist_dir 12 | 13 | def launch_discussion_mode(self): 14 | text_llm_keywords = ['lumen passe en mode discussion', 'lumen passe en discussion', 'lumen passage en mode discussion', 15 | 'lumen mode discussion', 'lumen switch to discussion mode', 'lumen switch to discussion', 'lumen discussion mode'] 16 | if any(keyword in self.listen for keyword in text_llm_keywords): 17 | self.talk("Mode Discussion Activé" if self.lang == 'Fr'else "Discussion Mode Activated") 18 | start_discussion(self.device, self.lang, self.talk, self.model_path, self.hist_dir) -------------------------------------------------------------------------------- /functions/discussion_page/discussion_mode.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import os 3 | from functions.config_page.speech_to_text.speech import SpeechToText 4 | from functions.config_page.speech_to_text.record import record_audio 5 | from functions._functions_global_.llm_model_functions.llm_generate import LLMGenerate 6 | from functions._functions_global_.llm_model_functions.llm_save_hist import save_conversation 7 | 8 | 9 | def start_discussion(device, lang, talk, model_path, hist_dir): 10 | llm = LLMGenerate(talk) 11 | conversation_history = [] 12 | speech_to_text = SpeechToText() 13 | working = True 14 | filename_temp_audio = 'functions/config_page/speech_to_text/temp_audio/audio.wav' 15 | 16 | if os.path.isfile(model_path): 17 | with open(model_path, 'r') as file: 18 | llm_model = file.read().strip() 19 | else: 20 | st.sidebar.error("Veuillez configurer le modèle à utiliser dans la page discussion." if lang == 'Fr' else 21 | "Please configure the template to be used in the discussion page.") 22 | 23 | while True: 24 | if working: 25 | st.markdown("Mode texte LLM..
" if lang == 'Fr' else 26 | "Text LLM mode..
", unsafe_allow_html=True) 27 | record_audio(language=lang, device_index=device) 28 | listen = speech_to_text.transcribe(filename_temp_audio) 29 | 30 | # Check if the user wants to save the conversation 31 | detect_save_keyords = ['sauvegarde notre discussion', 'sauvegarde notre conversation', 'sauvegarde la discussion', 'sauvegarde la conversation', 32 | 'save our discussion', 'save our conversation', 'save the discussion', 'save the conversation'] 33 | if any(keyword in listen for keyword in detect_save_keyords): 34 | talk("La conversation a été sauvegardé" if lang=='Fr' else "The conversation has been saved") 35 | st.success("Conversation sauvegardé" if lang == 'Fr' else "Conversation saved.") 36 | save_conversation(chat_history, hist_dir) 37 | continue 38 | 39 | # Check if the user wants to stop the LLM conversation for return to basic voice detection 40 | detect_stop_llm_keyords = ['c\'est bon tu peux arrêter', 'tu peux arrêter', 'désactive llm', 41 | 'passe en mode classique', 'passage en mode classique', 42 | 'passe classique','disable llm', 'switch to classic mode', 43 | 'switch classic mode', 'it\'s okay you can stop', 'it is okay you can stop', 'you can stop'] 44 | if any(keyword in listen for keyword in detect_stop_llm_keyords): 45 | talk("Passage en mode exécution de commandes" if lang == 'Fr' else "Switching to commands execution mode") 46 | st.error("Arrêt de la conversation avec le LLM" if lang == 'Fr' else "Stopping the conversation with the LLM.") 47 | working = False 48 | break 49 | 50 | # Generate a response 51 | lang_preprompt = "Réponds en français, " if lang == 'Fr' else "Respond in English, " 52 | user_input_str = str(listen) 53 | full_prompt_user = lang_preprompt + user_input_str 54 | _, chat_history = llm.generate_response(llm_model, full_prompt_user, conversation_history) -------------------------------------------------------------------------------- /functions/menu_page/commands/app_chrono.py: -------------------------------------------------------------------------------- 1 | import platform 2 | import subprocess 3 | 4 | 5 | class ChronoCommand: 6 | def __init__(self, listen, language, talk): 7 | self.listen = listen 8 | self.language = language 9 | self.talk = talk 10 | self.os = platform.system() 11 | 12 | def start_chrono(self): 13 | start_chronometer_keywords = ['lumen démarre le chronomètre', 'lumen démarre le chrono', 14 | 'lumen start the chronometer', 'lumen start the chrono'] 15 | if any(keyword in self.listen for keyword in start_chronometer_keywords): 16 | if self.os == 'Windows': 17 | if self.language == 'Fr': 18 | self.talk("Chronomètre ouvert.") 19 | else: 20 | self.talk("Chronometer open.") 21 | subprocess.Popen('start explorer.exe shell:AppsFolder\Microsoft.WindowsAlarms_8wekyb3d8bbwe!App', shell=True) 22 | elif self.os == 'Linux': 23 | if self.language == 'Fr': 24 | self.talk("Chronomètre ouvert.") 25 | else: 26 | self.talk("Chronometer open.") 27 | subprocess.Popen(['gnome-clocks']) 28 | elif self.os == 'Darwin': 29 | if self.language == 'Fr': 30 | self.talk("Chronomètre ouvert.") 31 | else: 32 | self.talk("Chronometer open.") 33 | subprocess.Popen(['open', '/Applications/Clock.app']) 34 | -------------------------------------------------------------------------------- /functions/menu_page/commands/audio_gestion.py: -------------------------------------------------------------------------------- 1 | from pycaw.pycaw import AudioUtilities, ISimpleAudioVolume 2 | 3 | 4 | def set_volume(vol): 5 | sessions = AudioUtilities.GetAllSessions() 6 | for session in sessions: 7 | interface = session._ctl.QueryInterface(ISimpleAudioVolume) 8 | # set the volume (0.0 to 1.0) 9 | interface.SetMasterVolume(vol, None) 10 | 11 | def change_volume(delta): 12 | sessions = AudioUtilities.GetAllSessions() 13 | for session in sessions: 14 | interface = session._ctl.QueryInterface(ISimpleAudioVolume) 15 | # Get the actual volume 16 | current_volume = interface.GetMasterVolume() 17 | # Calcul the new volume 18 | new_volume = max(0.0, min(1.0, current_volume + delta)) 19 | # Set the volume 20 | interface.SetMasterVolume(new_volume, None) 21 | 22 | class VolumeCommands: 23 | def __init__(self, listen, language, talk): 24 | self.listen = listen 25 | self.language = language 26 | self.talk = talk 27 | 28 | def mute(self): 29 | # Volume Mute 30 | mute_keywords = ['lumen mute', 'lumen silence', 'lumen mode silence', 'lumen silence mode'] 31 | if any(keyword in self.listen for keyword in mute_keywords): 32 | self.talk('Mute') 33 | set_volume(0.0) 34 | 35 | def demute(self): 36 | # Volume deMute 37 | demute_keywords = ['lumen des mutes', 'lumen remets le volume', 'lumen demute', 'lumen de mute'] 38 | if any(keyword in self.listen for keyword in demute_keywords): 39 | if self.language == 'Fr': 40 | self.talk('Volume remis') 41 | else: 42 | self.talk('Volume restarted') 43 | set_volume(0.5) 44 | 45 | def volume_increase(self): 46 | # Volume Increase 47 | volume_increase_keywords = ['lumen augmente le volume', 'lumen monte le volume', 'lumen increase the volume'] 48 | if any(keyword in self.listen for keyword in volume_increase_keywords): 49 | if self.language == 'Fr': 50 | self.talk('Volume augmenté') 51 | else: 52 | self.talk('Volume increased') 53 | change_volume(0.2) 54 | 55 | def volume_decrease(self): 56 | # Volume Decreases 57 | volume_decrease_keywords = ['lumen diminue le volume', 'lumen descend le volume', 'lumen decreases the volume'] 58 | if any(keyword in self.listen for keyword in volume_decrease_keywords): 59 | if self.language == 'Fr': 60 | self.talk('Volume diminué') 61 | else: 62 | self.talk('Volume decreased') 63 | change_volume(-0.2) -------------------------------------------------------------------------------- /functions/menu_page/commands/cam/cam_index.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | def read_cam_index(): 5 | file_path = 'save_config_txt/chosen_cam.txt' 6 | if os.path.exists(file_path): 7 | with open(file_path, 'r') as file: 8 | cam_index = int(file.read().strip()) 9 | return cam_index 10 | else: 11 | return None -------------------------------------------------------------------------------- /functions/menu_page/commands/cam/screen_cam.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import cv2 3 | import os 4 | import time 5 | 6 | from functions.menu_page.commands.cam.cam_index import read_cam_index 7 | 8 | 9 | def capture_camera(cam_index): 10 | if not os.path.exists('photos'): 11 | os.makedirs('photos') 12 | cap = cv2.VideoCapture(int(cam_index)) 13 | ret, frame = cap.read() 14 | time.sleep(1) 15 | cv2.imwrite('photos/camera.png', frame) 16 | cap.release() 17 | 18 | class CamCommand: 19 | def __init__(self, listen, device, language, talk): 20 | self.listen = listen 21 | self.device = device 22 | self.language = language 23 | self.talk = talk 24 | 25 | def screen_cam(self): 26 | screen_keywords = ['lumen screen avec la caméra', 'lumen screen avec la cam', 'lumen screen cam', 'lumen screencam', 27 | 'lumen screen with camera', 'lumen cam screen'] 28 | for keyword in screen_keywords: 29 | if keyword in self.listen: 30 | if self.language == 'Fr': 31 | self.talk("Lancement du screen avec caméra") 32 | else: 33 | self.talk("Launch of the screen with camera") 34 | 35 | cam_index = read_cam_index() 36 | if cam_index is not None: 37 | capture_camera(cam_index) 38 | if self.language == 'Fr': 39 | self.talk("Screen de la caméra effectué") 40 | else: 41 | self.talk("Camera screen taken") 42 | return True 43 | else: 44 | if self.language == 'Fr': 45 | st.warning("Veuillez aller dans la page de configuration pour configurer une caméra.") 46 | else: 47 | st.warning("Please go to the configuration page to set up a camera.") 48 | return False 49 | -------------------------------------------------------------------------------- /functions/menu_page/commands/cam/video_capture.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import os 3 | import cv2 4 | from pathlib import Path 5 | 6 | from functions.config_page.speech_to_text.record import record_audio 7 | from functions.config_page.speech_to_text.speech import SpeechToText 8 | from functions.menu_page.commands.cam.cam_index import read_cam_index 9 | 10 | 11 | def video_capture(listen, device, lang, talk): 12 | speech_to_text = SpeechToText() 13 | filename_temp_audio = 'functions/config_page/speech_to_text/temp_audio/audio.wav' 14 | 15 | if lang == 'Fr': 16 | talk("Voulez-vous définir un titre pour votre vidéo ?") 17 | else: 18 | talk("Do you want to set a title for your video ?") 19 | while True: 20 | record_audio(language=lang, device_index=device) 21 | listen = speech_to_text.transcribe(filename_temp_audio) 22 | user_response = listen 23 | st.write(user_response) 24 | 25 | if 'oui' in user_response or 'yes' in user_response: 26 | user_response = 'yes' 27 | break 28 | elif 'non' in user_response or 'no' in user_response: 29 | user_response = 'no' 30 | break 31 | else: 32 | if lang == 'Fr': 33 | talk("Veuillez répondre Oui ou Non.") 34 | else: 35 | talk("Please answer yes or no.") 36 | 37 | if user_response == 'yes': 38 | if lang == 'Fr': 39 | talk("Veuillez indiquer le titre de la vidéo.") 40 | else: 41 | talk("Please indicate the title of the video.") 42 | record_audio(language=lang, device_index=device) 43 | listen = speech_to_text.transcribe(filename_temp_audio) 44 | title_video_input = listen 45 | st.write(title_video_input) 46 | 47 | title_video_command = title_video_input.strip() or "video_save.mp4" 48 | title_video = title_video_command if title_video_command.endswith(".mp4") else title_video_command + ".mp4" 49 | else: 50 | title_video = "video_save.mp4" 51 | 52 | num_cam = read_cam_index() 53 | fps = 60 54 | desktop_path = str(Path.home() / "Downloads") 55 | video_path = os.path.join(desktop_path, title_video) 56 | cap = cv2.VideoCapture(num_cam) 57 | if not cap.isOpened(): 58 | if lang == 'Fr': 59 | talk("Erreur : Impossible d'ouvrir la caméra.") 60 | else: 61 | talk("Error: Unable to open the camera.") 62 | return 63 | 64 | width = int(cap.get(3)) 65 | height = int(cap.get(4)) 66 | 67 | # Set video codec and creator 68 | fourcc = cv2.VideoWriter_fourcc(*'mp4v') 69 | video_writer = cv2.VideoWriter(video_path, fourcc, fps, (width, height)) 70 | if lang == 'Fr': 71 | talk("Lancement de la vidéo") 72 | else: 73 | talk("Video launch") 74 | while True: 75 | ret, frame = cap.read() 76 | if not ret: 77 | if lang == 'Fr': 78 | st.error("Erreur lecture de la caméra.") 79 | else: 80 | st.error("Error reading camera.") 81 | break 82 | 83 | cv2.imshow("Live video (press 'space' to exit)", frame) 84 | video_writer.write(frame) 85 | # Stop video capture when 'space' key is pressed 86 | if cv2.waitKey(1) == 32: 87 | break 88 | listen = "" 89 | cap.release() 90 | video_writer.release() 91 | cv2.destroyAllWindows() -------------------------------------------------------------------------------- /functions/menu_page/commands/cam/video_command.py: -------------------------------------------------------------------------------- 1 | from functions.menu_page.commands.cam.video_capture import video_capture 2 | 3 | 4 | class VideoCommand: 5 | def __init__(self, listen, device, language, talk): 6 | self.listen = listen 7 | self.device = device 8 | self.language = language 9 | self.talk = talk 10 | 11 | def start_video(self): 12 | video_keywords = ['lumen commence à filmer', 'lumen lance la vidéo', 'lumen film', 'lumen filme', 13 | 'lumen start filming', 'lumen start video capture', 'lumen start a video'] 14 | for keyword in video_keywords: 15 | if keyword in self.listen: 16 | if self.language == 'Fr': 17 | self.talk("Lancement de la vidéo") 18 | else: 19 | self.talk("Starting the video") 20 | video_capture(self.listen, self.device, self.language, self.talk) -------------------------------------------------------------------------------- /functions/menu_page/commands/cybersec_info_system.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import platform 3 | import subprocess 4 | 5 | 6 | def run_command(command): 7 | """ 8 | Runs a command in a new terminal. 9 | """ 10 | if platform.system() == 'Windows': 11 | subprocess.Popen(['start', 'cmd', '/k', command], shell=True) 12 | elif platform.system() == 'Linux': 13 | subprocess.Popen(['gnome-terminal', '-e', command]) 14 | elif platform.system() == 'Darwin': 15 | subprocess.Popen(['open', '-a', 'Terminal.app', command]) 16 | 17 | class InfoCommands: 18 | def __init__(self, listen): 19 | self.listen = listen 20 | 21 | def ip_config(self): 22 | """ 23 | Displays the computer's IP configuration. 24 | """ 25 | ip_keywords = ['lumen quel est mon ip', 'lumen quelle est mon ip', 'lumen mon ip', 'lumen my ip', 26 | 'lumen what is my ip', 'lumen what\'s my ip'] 27 | if any(keyword in self.listen for keyword in ip_keywords): 28 | if platform.system() == 'Windows': 29 | run_command('ipconfig') 30 | elif platform.system() == 'Linux': 31 | run_command('ifconfig') 32 | elif platform.system() == 'Darwin': 33 | run_command('ifconfig') 34 | 35 | def system_info(self): 36 | """ 37 | Displays computer system information. 38 | """ 39 | sys_info_keywords = ['lumen informations sur mon système', 'lumen information sur mon système', 40 | 'lumen informations système', 'lumen information système', 41 | 'lumen informations of my system', 'lumen information of my system', 42 | 'lumen informations system', 'lumen information system'] 43 | if any(keyword in self.listen for keyword in sys_info_keywords): 44 | if platform.system() == 'Windows': 45 | run_command('systeminfo') 46 | elif platform.system() == 'Linux': 47 | run_command('uname -a') 48 | elif platform.system() == 'Darwin': 49 | run_command('system_profiler SPSoftwareDataType') 50 | 51 | def net_info(self): 52 | """ 53 | Displays netstat network information. 54 | """ 55 | netstat_info_keywords = ['lumen informations sur le netstat', 'lumen informations netstat', 56 | 'lumen informations of my netstat', 'lumen informations netstat', 57 | 'lumen information sur le netstat', 'lumen information netstat', 58 | 'lumen information of my netstat', 'lumen information netstat'] 59 | if any(keyword in self.listen for keyword in netstat_info_keywords): 60 | if platform.system() == 'Windows': 61 | run_command('netstat -a') 62 | elif platform.system() == 'Linux': 63 | run_command('netstat -a') 64 | elif platform.system() == 'Darwin': 65 | run_command('netstat -a') 66 | 67 | def arp_info(self): 68 | """ 69 | Displays arp network information. 70 | """ 71 | arp_info_keywords = ['lumen informations sur l\'arp', 'lumen informations arp', 72 | 'lumen informations of my arp', 'lumen informations arp', 73 | 'lumen information sur l\'arp', 'lumen information arp', 74 | 'lumen information of my arp', 'lumen information arp'] 75 | if any(keyword in self.listen for keyword in arp_info_keywords): 76 | if platform.system() == 'Windows': 77 | run_command('arp -a') 78 | elif platform.system() == 'Linux': 79 | run_command('arp -a') 80 | elif platform.system() == 'Darwin': 81 | run_command('arp -a') 82 | 83 | def route_info(self): 84 | """ 85 | Displays route information. 86 | """ 87 | route_info_keywords = ['lumen informations route', 'lumen informations routes', 88 | 'lumen information route', 'lumen information routes'] 89 | if any(keyword in self.listen for keyword in route_info_keywords): 90 | if platform.system() == 'Windows': 91 | run_command('route print') 92 | elif platform.system() == 'Linux' or platform.system() == 'Darwin': 93 | run_command('netstat -r') 94 | else: 95 | st.sidebar.error("The route print command is not available on this OS.") 96 | 97 | def schtasks_info(self): 98 | """ 99 | Displays scheduled tasks information. 100 | """ 101 | tasks_info_keywords = ['lumen informations task', 'lumen informations tasks', 102 | 'lumen information task', 'lumen information tasks'] 103 | if any(keyword in self.listen for keyword in tasks_info_keywords): 104 | if platform.system() == 'Windows': 105 | run_command('schtasks /query') 106 | else: 107 | st.sidebar.error("The schtasks /query command is not available on this OS.") 108 | 109 | def driver_info(self): 110 | """ 111 | Displays driver information. 112 | """ 113 | driver_info_keywords = ['lumen informations driver', 'lumen informations drivers', 114 | 'lumen driver informations', 'lumen drivers informations', 115 | 'lumen information driver', 'lumen information drivers', 116 | 'lumen driver information', 'lumen drivers information'] 117 | if any(keyword in self.listen for keyword in driver_info_keywords): 118 | if platform.system() == 'Windows': 119 | run_command('driverquery') 120 | else: 121 | st.sidebar.error("The driverquery command is not available on this operating system.") 122 | 123 | def msinfo32_info(self): 124 | """ 125 | Displays msinfo32 information. 126 | """ 127 | msinfo_info_keywords = ['lumen informations ms', 'lumen informations machine système', 128 | 'lumen informations système machine', 'lumen informations machine system', 129 | 'lumen information ms', 'lumen information machine système', 130 | 'lumen information système machine', 'lumen information machine system'] 131 | if any(keyword in self.listen for keyword in msinfo_info_keywords): 132 | if platform.system() == 'Windows': 133 | run_command('msinfo32') 134 | else: 135 | st.sidebar.error("The msinfo32 command is not available on this operating system.") -------------------------------------------------------------------------------- /functions/menu_page/commands/data_time.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import locale 3 | 4 | 5 | def time_in_locale(language): 6 | if language == 'Fr': 7 | locale.setlocale(locale.LC_TIME, 'fr_FR.UTF-8') # Set the locale to French 8 | formatted_time = datetime.datetime.now().strftime('%H:%M:%S') 9 | return formatted_time 10 | 11 | elif language == 'En': 12 | locale.setlocale(locale.LC_TIME, 'en_US.UTF-8') # Set the locale to English 13 | formatted_time = datetime.datetime.now().strftime('%H:%M:%S') 14 | return formatted_time 15 | 16 | def date_in_locale(language): 17 | if language == 'Fr': 18 | locale.setlocale(locale.LC_TIME, 'fr_FR') # Set the locale to French 19 | current_datetime = datetime.datetime.now() 20 | formatted_datetime = current_datetime.strftime('%A %d %B %Y') 21 | return formatted_datetime 22 | 23 | elif language == 'En': 24 | locale.setlocale(locale.LC_TIME, 'en_US') # Set the locale to English 25 | current_datetime = datetime.datetime.now() 26 | formatted_datetime = current_datetime.strftime('%A %d %B %Y') 27 | return formatted_datetime 28 | 29 | class TimeCommands: 30 | def __init__(self, listen, language, talk): 31 | self.listen = listen 32 | self.language = language 33 | self.talk = talk 34 | 35 | def time_command(self): 36 | # Current Time 37 | detect_time_keywords = ['lumen il est quelle heure', 'lumen quelle heure est-il', 'lumen l\'heure actuelle', 38 | 'lumen what time is it', 'lumen actual time'] 39 | if any(keyword in self.listen for keyword in detect_time_keywords): 40 | formatted_time = time_in_locale(self.language) 41 | self.talk(formatted_time) 42 | 43 | def date_command(self): 44 | # Current Date 45 | detect_datetime_keywords = ['lumen date actuelle', 'lumen date d\'aujourd\'hui', 46 | 'lumen current date', 'lumen today\'s date', 'lumen date of today'] 47 | if any(keyword in self.listen for keyword in detect_datetime_keywords): 48 | formatted_datetime = date_in_locale(self.language) 49 | self.talk(formatted_datetime) -------------------------------------------------------------------------------- /functions/menu_page/commands/json/load_json.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | 5 | def load_sentences(): 6 | sentences = {} 7 | json_dir = os.path.join('config_json') 8 | target_file = 'text.json' 9 | 10 | if not os.path.exists(json_dir): 11 | os.makedirs(json_dir) 12 | 13 | if target_file in os.listdir(json_dir): 14 | with open(os.path.join(json_dir, target_file), 'r', encoding='utf-8') as f: 15 | if f.read().strip(): 16 | f.seek(0) 17 | sentences.update(json.load(f)) 18 | return sentences 19 | 20 | def load_app_paths(): 21 | app_paths = {} 22 | json_dir = os.path.join('config_json') 23 | target_file = 'app.json' 24 | 25 | if target_file in os.listdir(json_dir): 26 | with open(os.path.join(json_dir, target_file), 'r', encoding='utf-8') as f: 27 | if f.read().strip(): 28 | f.seek(0) 29 | app_paths.update(json.load(f)) 30 | return app_paths 31 | -------------------------------------------------------------------------------- /functions/menu_page/commands/json/text_add_json.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import os 3 | import json 4 | from functions.config_page.speech_to_text.record import record_audio 5 | from functions.config_page.speech_to_text.speech import SpeechToText 6 | 7 | 8 | class TextAddCommands: 9 | def __init__(self, listen, device, language, talk): 10 | self.listen = listen 11 | self.device = device 12 | self.language = language 13 | self.talk = talk 14 | 15 | def add_text(self): 16 | filename_temp_audio = 'functions/config_page/speech_to_text/temp_audio/audio.wav' 17 | add_text_keywords = ['lumen ajout de texte', 'lumen ajout de text', 'lumen ajoute texte', 'lumen ajoute texte', 18 | 'lumen ajoute du texte', 'lumen text add', 'lumen add text', 'lumen add of text', 'lumen add some text'] 19 | if any(keyword in self.listen for keyword in add_text_keywords): 20 | dir_path = 'config_json' 21 | json_app = 'text.json' 22 | app_path = os.path.join(dir_path, json_app) 23 | speech_to_text = SpeechToText() 24 | if not os.path.exists(app_path): 25 | with open(app_path, 'w', encoding='utf-8') as f: 26 | json.dump({}, f) 27 | if self.language == 'Fr': 28 | st.sidebar.warning("Veuillez indiquer le texte déclencheur souhaité") 29 | self.talk("Veuillez indiquer le texte déclencheur souhaité") 30 | else: 31 | st.sidebar.warning("Please indicate the trigger text you want") 32 | self.talk("Please indicate the trigger text you want") 33 | 34 | # Record the trigger phrase 35 | record_audio(language=self.language, device_index=self.device) 36 | listen = speech_to_text.transcribe(filename_temp_audio).strip() 37 | st.write(listen) 38 | trigger_phrase = listen 39 | 40 | # Save the trigger phrase to the JSON file 41 | with open(app_path, 'r+', encoding='utf-8') as f: 42 | try: 43 | data = json.load(f) 44 | except json.JSONDecodeError: # If the file is empty 45 | data = {} 46 | data[trigger_phrase] = None # Use the trigger phrase as the key 47 | f.seek(0) # Move the cursor to the beginning of the file 48 | f.truncate() # Remove the rest of the file's content 49 | json.dump(data, f, ensure_ascii=False) 50 | 51 | # Ask the user for the response phrase 52 | if self.language == 'Fr': 53 | st.sidebar.warning("Quelle phrase de réponse voulez-vous avoir ?") 54 | self.talk("Quelle phrase de réponse voulez-vous avoir ?") 55 | else: 56 | st.sidebar.warning("What response sentence do you want to have ?") 57 | self.talk("What response sentence do you want to have ?") 58 | 59 | # Record the response phrase 60 | record_audio(device_index=self.device) 61 | listen = speech_to_text.transcribe(filename_temp_audio).strip() 62 | st.write(listen) 63 | response_phrase = listen 64 | 65 | # Save the response phrase to the JSON file 66 | with open(app_path, 'r+', encoding='utf-8') as f: 67 | data = json.load(f) 68 | data[trigger_phrase] = response_phrase 69 | f.seek(0) 70 | f.truncate() 71 | json.dump(data, f, indent=4, ensure_ascii=False) 72 | if self.language == 'Fr': 73 | st.sidebar.success("Texte ajouté") 74 | self.talk("Texte ajouté") 75 | else: 76 | st.sidebar.success("Text added") 77 | self.talk("Text added") -------------------------------------------------------------------------------- /functions/menu_page/commands/math_calcul.py: -------------------------------------------------------------------------------- 1 | import sympy 2 | import re 3 | 4 | 5 | def calculate(calc_command, language): 6 | # Define the words and their replacements 7 | words = { 8 | 'Fr': ['moins', 'plus', 'divisé', 'x', 'fois', 'multiplié', 'au carré', 'puissance deux',\ 9 | 'puissance 2', 'sinusoide', 'cosinus', 'tangente', 'racine', 'intégrale', 'intégral', 'dérivée'], 10 | 'En': ['minus', 'plus', 'divided', 'x', 'times', 'multiplied', 'squared', 'power of two', 'power 2', 'sine',\ 11 | 'cosine', 'tangent', 'root', 'integral', 'derivative'] 12 | } 13 | replacements = { 14 | 'Fr': ['-', '+', '/', '*', '*', '*', '**2', '**2', '**2', 'sin', 'cos', 'tan', 'sqrt', 'integrate', 'integrate', 'diff'], 15 | 'En': ['-', '+', '/', '*', '*', '*', '**2', '**2', '**2', 'sin', 'cos', 'tan', 'sqrt', 'integrate', 'diff'] 16 | } 17 | 18 | # Extraction of numbers, mathematical operators and mathematical functions 19 | calc_command = re.findall(r"[\d]+|[\+\-\*\/]|" + "|".join(words[language]), calc_command) 20 | calc_command = ' '.join(calc_command) 21 | 22 | # Replacing words with their respective mathematical symbols 23 | for word, replacement in zip(words[language], replacements[language]): 24 | calc_command = calc_command.replace(word, replacement) 25 | 26 | # Added parentheses around arguments of mathematical functions 27 | calc_command = re.sub(r"(sin|cos|tan|sqrt|integrate|diff) (\d+)", r"\1(\2)", calc_command) 28 | 29 | try: 30 | result = sympy.sympify(calc_command) 31 | result = round(result, 2) 32 | return f"Cela fait {result}" if language == 'Fr' else f"This makes {result}" 33 | except Exception as e: 34 | return "Désolé, je n'ai pas pu effectuer le calcul." if language == 'Fr' else "Sorry, I couldn't do the calculation." 35 | 36 | class CalculCommands: 37 | def __init__(self, listen, language, talk): 38 | self.listen = listen 39 | self.language = language 40 | self.talk = talk 41 | 42 | def calcul_command(self): 43 | # Calcul 44 | calc_keywords = ['lumen calcule', 'lumen calcul', 'lumen compute', 'lumen calculate'] 45 | if any(keyword in self.listen for keyword in calc_keywords): 46 | calc_command = self.listen.replace('lumen calcule', '').replace('lumen calcul', '').replace('lumen compute', '').replace('lumen calculate', '') 47 | self.talk(calculate(calc_command, self.language)) -------------------------------------------------------------------------------- /functions/menu_page/commands/screenshot.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pyautogui 3 | 4 | 5 | class ScreenCommand: 6 | def __init__(self, listen, language, talk): 7 | self.listen = listen 8 | self.language = language 9 | self.talk = talk 10 | 11 | def screen(self): 12 | screen_keywords = ['lumen prends un screen', 'lumen prends une capture d\'écran', 'lumen capture d\'écran', 13 | 'lumen prend un screen', 'lumen prend une capture d\'écran', 14 | 'lumen take a screen', 'lumen take a screenshot', 'lumen screenshot'] 15 | for keyword in screen_keywords: 16 | if keyword in self.listen: 17 | download_dir = str('photos/') 18 | if not os.path.exists(download_dir): 19 | os.makedirs(download_dir) 20 | base_filename = 'screenshot' 21 | extension = '.png' 22 | filename = f"{base_filename}{extension}" 23 | screenshot = pyautogui.screenshot() 24 | screenshot.save(os.path.join(download_dir, filename)) 25 | if self.language == 'Fr': 26 | self.talk("Capture d'écran effectuée") 27 | else: 28 | self.talk("Screenshot taken") 29 | return True -------------------------------------------------------------------------------- /functions/menu_page/commands/speech_to_en.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from functions.config_page.speech_to_text.record import record_audio 3 | from functions.config_page.speech_to_text.speech import SpeechToText 4 | 5 | 6 | class SpeechToEnCommand: 7 | def __init__(self, listen, device, language, talk): 8 | self.listen = listen 9 | self.device = device 10 | self.language = language 11 | self.talk = talk 12 | 13 | def translate(self): 14 | filename_temp_audio = 'functions/config_page/speech_to_text/temp_audio/audio.wav' 15 | translate_keywords = ['lumen mode traduction', 'lumen passe en mode traduction', 16 | 'lumen translation mode', 'lumen switch to translation mode'] 17 | for keyword in translate_keywords: 18 | if keyword in self.listen: 19 | if self.language == 'Fr': 20 | st.sidebar.success("Mode Traduction (Langue détecté vers l'Anglais) [dire 'stop' pour arrêter de traduire]") 21 | self.talk("Mode Traduction Activé") 22 | else: 23 | st.sidebar.success("Translation Mode (Language detected to English) [Say 'stop' to stop the translation") 24 | self.talk("Traduction Mode Activated") 25 | 26 | speech_to_text = SpeechToText() 27 | talking = False 28 | while True: 29 | record_audio(language=self.language, device_index=self.device) 30 | listen = speech_to_text.translate_to_en(filename_temp_audio) 31 | st.write(listen) 32 | 33 | if talking: 34 | self.talk(listen) 35 | 36 | talking_on_keywords = ['activate the voice', 'activate voice', 'voice activate'] 37 | if any(keyword in listen for keyword in talking_on_keywords): 38 | if self.language == 'Fr': 39 | self.talk("Voix activée") 40 | else: 41 | self.talk("Voice activated") 42 | talking = True 43 | 44 | talking_off_keywords = ['desactivate the voice', 'desactivate voice', 'voice desactivate'] 45 | if any(keyword in listen for keyword in talking_off_keywords): 46 | if self.language == 'Fr': 47 | self.talk("Voix activée") 48 | else: 49 | self.talk("Voice activated") 50 | talking = True 51 | 52 | translate_keywords = ['stop', 'stopp'] 53 | if any(keyword in listen for keyword in translate_keywords): 54 | if self.language == 'Fr': 55 | st.sidebar.success("Mode Traduction Désactivé") 56 | self.talk("Mode Traduction Désactivé") 57 | else: 58 | st.sidebar.success("Traduction Mode Desactivated") 59 | self.talk("Traduction Mode Desactivated") 60 | break -------------------------------------------------------------------------------- /functions/menu_page/commands/take_note.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import os 3 | 4 | 5 | class NoteCommands: 6 | def __init__(self, listen, language, talk): 7 | self.listen = listen 8 | self.language = language 9 | self.talk = talk 10 | 11 | def vocal_note(self): 12 | text_note_keywords = ['lumen prends note', 'lumen prend note', 'lumen take note'] 13 | for keyword in text_note_keywords: 14 | if keyword in self.listen: 15 | self.listen = self.listen.replace(keyword, '').strip() 16 | # Gets the user's download path 17 | download_path = os.path.join(os.path.expanduser("~"), 'Downloads') 18 | file_path = os.path.join(download_path, 'vocal_note.txt') 19 | 20 | # Checks if the file is empty or not 21 | if os.path.exists(file_path) and os.path.getsize(file_path) > 0: 22 | # If the file is not empty, append what the user said to the end of the file 23 | with open(file_path, 'a', encoding="utf-8") as f: 24 | f.write('\n' + self.listen) 25 | else: 26 | # If the file is empty, writes what the user said to the file 27 | with open(file_path, 'a', encoding="utf-8") as f: 28 | f.write(self.listen) 29 | 30 | st.markdown(f"{self.listen}
", unsafe_allow_html=True) # Display on the interface the note taken 31 | 32 | # Talk to the user 33 | if self.language == 'Fr': 34 | self.talk("C'est noté") 35 | else: 36 | self.talk("Noted") -------------------------------------------------------------------------------- /functions/menu_page/commands/web/check_connection.py: -------------------------------------------------------------------------------- 1 | import socket 2 | 3 | 4 | def is_connected(): 5 | try: 6 | # if connected it's reachable 7 | socket.create_connection(("www.google.com", 80)) 8 | return True 9 | except OSError: 10 | pass 11 | return False -------------------------------------------------------------------------------- /functions/menu_page/commands/web/search_web.py: -------------------------------------------------------------------------------- 1 | import webbrowser 2 | from functions.menu_page.commands.web.check_connection import is_connected 3 | 4 | if is_connected(): 5 | import pywhatkit 6 | 7 | 8 | class WebCommands: 9 | def __init__(self, listen, language, talk): 10 | self.listen = listen 11 | self.language = language 12 | self.talk = talk 13 | 14 | def search_ytb(self): 15 | # Search on YouTube 16 | youtube_keywords = ['lumen cherche sur youtube', 'lumen recherche sur youtube', 'lumen rechercher sur youtube', 17 | 'lumen find on youtube', 'lumen find in youtube'] 18 | if any(keyword in self.listen for keyword in youtube_keywords): 19 | ytb_command = self.listen.replace('Open YouTube and find', '') 20 | self.talk(f"Je cherche cela de suite !") if self.language == 'Fr' else self.talk(f"I'm looking for that right away !") 21 | pywhatkit.playonyt(ytb_command) 22 | 23 | def search_google(self): 24 | # Google 25 | google_keywords = ['lumen cherche sur google', 'lumen recherche sur google', 'lumen find on google', 'lumen find in google'] 26 | for keyword in google_keywords: 27 | if keyword in self.listen: 28 | search = self.listen.replace(keyword, '').strip() 29 | if search.startswith('re '): 30 | search = search[3:] 31 | url = "https://www.google.com/search?q=" + search 32 | self.talk(f"Recherche sur Google...{search}") if self.language == 'Fr' else self.talk(f"Search on Google...{search}") 33 | webbrowser.open(url) 34 | break 35 | 36 | def search_wikipedia(self): 37 | # Wikipedia 38 | wikipedia_keywords = ['lumen cherche sur wikipédia', 'lumen recherche sur wikipédia', 'lumen recherche wikipédia', 39 | 'lumen cherche wikipédia', 'lumen find on wikipedia', 'lumen find in wikipedia'] 40 | for keyword in wikipedia_keywords: 41 | if keyword in self.listen: 42 | search = self.listen.replace(keyword, '').strip() 43 | if search.startswith('re '): 44 | search = search[3:] 45 | url = "https://fr.wikipedia.org/wiki/" + search 46 | self.talk(f"Recherche sur Wikipédia...{search}") if self.language == 'Fr' else self.talk(f"Search on Wikipedia...{search}") 47 | webbrowser.open(url) 48 | break 49 | 50 | def search_bing(self): 51 | # Bing 52 | bing_keywords = ['lumen cherche sur bing', 'lumen recherche sur bing', 'lumen find on bing', 'lumen find in bing'] 53 | for keyword in bing_keywords: 54 | if keyword in self.listen: 55 | search = self.listen.replace(keyword, '').strip() 56 | if search.startswith('re '): 57 | search = search[3:] 58 | url = "https://www.bing.com/search?q=" + search 59 | self.talk(f"Recherche sur Bing...{search}") if self.language == 'Fr' else self.talk(f"Search on Bing...{search}") 60 | webbrowser.open(url) 61 | break 62 | 63 | def search_gpt(self): 64 | # Chat GPT 65 | openai_keywords = ['lumen ouvre chat ia', 'lumen ouvre chat gpt', 'lumen ouvre le chat gpt', 'lumen recherche sur chat ia', 66 | 'lumen search on ai chat', 'lumen cherche sur chat ia', 'lumen search in ai chat', 'lumen start ai chat'] 67 | if any(keyword in self.listen for keyword in openai_keywords): 68 | url = "https://chat.openai.com/" 69 | self.talk(f"Ouverture de Chat GPT...") if self.language == 'Fr' else self.talk(f"Open Chat GPT...") 70 | webbrowser.open(url) -------------------------------------------------------------------------------- /functions/menu_page/get_language.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import os 3 | 4 | 5 | def define_lang(): 6 | # Check if the directory save_config_txt exists, if not, create it 7 | if not os.path.exists('save_config_txt'): 8 | os.makedirs('save_config_txt') 9 | 10 | # Check if the lang.txt file exists 11 | lang_file = 'save_config_txt/lang.txt' 12 | if os.path.exists(lang_file): 13 | # Read the value from the file 14 | with open(lang_file, 'r') as file: 15 | saved_lang = file.read().strip() 16 | else: 17 | # If the file doesn't exist, use a default value 18 | saved_lang = 'Fr' 19 | 20 | # Add a language switcher to the sidebar with the read or default value 21 | lang = st.sidebar.selectbox('🔤 Language', ['Fr', 'En'], index=['Fr', 'En'].index(saved_lang)) 22 | 23 | # Write the selected language to the file 24 | with open(lang_file, 'w') as file: 25 | file.write(lang) 26 | return lang 27 | 28 | def get_lang(): 29 | # Check if the file exists 30 | lang_file = 'save_config_txt/lang.txt' 31 | if os.path.exists(lang_file): 32 | # Read the language from the file 33 | with open(lang_file, 'r') as file: 34 | lang = file.read().strip() 35 | else: 36 | # Display an error message in the sidebar 37 | st.sidebar.error("The language file doesn't exist. Please go to the menu to set a language.") 38 | return lang -------------------------------------------------------------------------------- /functions/menu_page/start_assistant.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import subprocess 3 | import os 4 | 5 | from functions.config_page.speech_to_text.speech import SpeechToText 6 | from functions.config_page.selection_microphone import get_microphone 7 | from functions.config_page.narrator_voice import NarratorVoice 8 | from functions.config_page.speech_to_text.record import record_audio 9 | from functions.menu_page.commands.json.load_json import load_sentences, load_app_paths 10 | from functions.menu_page.commands.json.text_add_json import TextAddCommands 11 | from functions.menu_page.commands.web.check_connection import is_connected 12 | from functions.menu_page.commands.web.search_web import WebCommands 13 | from functions.menu_page.commands.speech_to_en import SpeechToEnCommand 14 | from functions.menu_page.commands.audio_gestion import VolumeCommands 15 | from functions.menu_page.commands.app_chrono import ChronoCommand 16 | from functions.menu_page.commands.take_note import NoteCommands 17 | from functions.menu_page.commands.cybersec_info_system import InfoCommands 18 | from functions.menu_page.commands.data_time import TimeCommands 19 | from functions.menu_page.commands.screenshot import ScreenCommand 20 | from functions.menu_page.commands.math_calcul import CalculCommands 21 | from functions.menu_page.commands.cam.screen_cam import CamCommand 22 | from functions.menu_page.commands.cam.video_command import VideoCommand 23 | from functions.discussion_page.discussion_command import DiscussionCommand 24 | from functions.code_page.code_command import CodeCommand 25 | from functions.precision_page.precision_command import PrecisionCommand 26 | from functions.vision_page.vision_command import VisionCommand 27 | 28 | 29 | def run_assistant(lang): 30 | speech_to_text = SpeechToText() 31 | device = get_microphone(lang) 32 | voice = NarratorVoice(lang) 33 | record_working = True 34 | 35 | def talk(text): 36 | voice.speak(text) 37 | 38 | sentences = load_sentences() 39 | app_paths = load_app_paths() 40 | sorted_keys_sentences = sorted(sentences.keys(), key=len, reverse=True) 41 | sorted_keys_app_paths = sorted(app_paths.keys(), key=len, reverse=True) 42 | while True: 43 | if record_working == False: 44 | btn_unpause_conv = st.sidebar.button("Reprendre" if lang == 'Fr' else "Take back") 45 | if btn_unpause_conv: 46 | record_working = True 47 | 48 | if record_working: 49 | filename_temp_audio = 'functions/config_page/speech_to_text/temp_audio/audio.wav' 50 | record_audio(language=lang, device_index=device) 51 | listen = speech_to_text.transcribe(filename_temp_audio) 52 | st.write(listen) 53 | 54 | for key in sorted_keys_sentences: 55 | # Split the key and command into words 56 | key_words = key.split() 57 | command_words = listen.split() 58 | 59 | # Check if all words in the key are in the command and in the correct order 60 | if all(word in command_words[i:] for i, word in enumerate(key_words)): 61 | talk(sentences[key]) 62 | break 63 | 64 | for app_path in sorted_keys_app_paths: 65 | # Get the trigger phrase for this app path 66 | trigger_phrase = app_paths[app_path] 67 | 68 | # Split the trigger phrase and command into words 69 | trigger_words = trigger_phrase.split() 70 | command_words = listen.split() 71 | 72 | # Check if all words in the trigger phrase are in the command and in the correct order 73 | if all(word in command_words[i:] for i, word in enumerate(trigger_words)): 74 | if lang == 'Fr': 75 | talk("Lancement de l'application " + trigger_phrase) 76 | else: 77 | talk("Starting the application " + trigger_phrase) 78 | 79 | # Check if the application path is a .bat file 80 | if app_path.endswith('.bat'): 81 | subprocess.Popen(['start', app_path], shell=True) 82 | else: 83 | subprocess.Popen(app_path, shell=True) 84 | break 85 | 86 | if is_connected(): 87 | # Search on Web Commands 88 | web_commands = WebCommands(listen, lang, talk) 89 | web_commands.search_ytb() 90 | web_commands.search_google() 91 | web_commands.search_wikipedia() 92 | web_commands.search_bing() 93 | web_commands.search_gpt() 94 | 95 | # Add JSON Text 96 | text_add = TextAddCommands(listen, device, lang, talk) 97 | text_add.add_text() 98 | 99 | # Translate language detected to English 100 | speech_to_en = SpeechToEnCommand(listen, device, lang, talk) 101 | speech_to_en.translate() 102 | 103 | # Volume-Commands 104 | volume_commands = VolumeCommands(listen, lang, talk) 105 | volume_commands.mute() 106 | volume_commands.demute() 107 | volume_commands.volume_increase() 108 | volume_commands.volume_decrease() 109 | 110 | # Start Chrono App 111 | start_chrono_app = ChronoCommand(listen, lang, talk) 112 | start_chrono_app.start_chrono() 113 | 114 | # Take Note (.txt) Command 115 | take_note_commands = NoteCommands(listen, lang, talk) 116 | take_note_commands.vocal_note() 117 | 118 | # Cyber-Commands (System Info) 119 | cyber_commands = InfoCommands(listen) 120 | cyber_commands.ip_config() 121 | cyber_commands.system_info() 122 | cyber_commands.net_info() 123 | cyber_commands.arp_info() 124 | cyber_commands.route_info() 125 | cyber_commands.schtasks_info() 126 | cyber_commands.driver_info() 127 | cyber_commands.msinfo32_info() 128 | 129 | # Actual Time/Date Commands 130 | time_commands = TimeCommands(listen, lang, talk) 131 | time_commands.time_command() 132 | time_commands.date_command() 133 | 134 | # Math Calcul-Commands 135 | calcul_commands = CalculCommands(listen, lang, talk) 136 | calcul_commands.calcul_command() 137 | 138 | # Do screenshot of the actual screen 139 | screenshot_command = ScreenCommand(listen, lang, talk) 140 | screenshot_command.screen() 141 | 142 | # Do camera screenshot of the actual camera 143 | screen_cam_command = CamCommand(listen, device, lang, talk) 144 | screen_cam_command.screen_cam() 145 | 146 | # Start filming video & choose a camera 147 | video_capture = VideoCommand(listen, device, lang, talk) 148 | video_capture.start_video() 149 | 150 | # Start Discussion (use Ollama) 151 | model_discu = os.path.join('save_config_txt', 'discussion_model.txt') 152 | hist_discu = 'functions/discussion_page/discussion_history' 153 | start_discussion = DiscussionCommand(listen, device, lang, talk, model_discu, hist_discu) 154 | start_discussion.launch_discussion_mode() 155 | 156 | # Start Coding (use Ollama) 157 | model_code = os.path.join('save_config_txt', 'code_model.txt') 158 | hist_code = 'functions/code_page/code_history' 159 | start_code = CodeCommand(listen, device, lang, talk, model_code, hist_code) 160 | start_code.launch_code_mode() 161 | 162 | # Start Precision (use Ollama) 163 | model_precision = os.path.join('save_config_txt', 'precision_model.txt') 164 | hist_precision = 'functions/precision_page/precision_history' 165 | start_precision = PrecisionCommand(listen, device, lang, talk, model_precision, hist_precision) 166 | start_precision.launch_precision_mode() 167 | 168 | # Start Vision LLM (use Ollama) 169 | model_vision = os.path.join('save_config_txt', 'vision_model.txt') 170 | hist_vision = 'functions/vision_page/vision_history' 171 | start_llm_vision = VisionCommand(listen, device, lang, talk, model_vision, hist_vision) 172 | start_llm_vision.launch_vision_mode() 173 | 174 | 175 | 176 | # Pause Conversation 177 | pause_keywords = ['lumen mode pause', 'lumen mets-toi en pause', 'lumen mets toi en pause', 'lumen pause mode'] 178 | if any(keyword in listen for keyword in pause_keywords): 179 | record_working = False 180 | continue 181 | 182 | # Stop the system 183 | stop_keywords = ['lumen éteins-toi', 'lumen arrête-toi', 'lumen stop tout', 'lumen mode arrêt' 184 | 'lumen shutdown mode', 'lumen shoot down mode', 'lumen shootdown mode'] 185 | if any(keyword in listen for keyword in stop_keywords): 186 | if lang == 'Fr': 187 | talk('Bien Monsieur, arrêt des systèmes en cours..') 188 | else: 189 | talk('Yes sir, systems shutdown in progress..') 190 | record_working = False 191 | break 192 | -------------------------------------------------------------------------------- /functions/precision_page/precision_command.py: -------------------------------------------------------------------------------- 1 | from functions.precision_page.precision_mode import start_precision 2 | 3 | 4 | class PrecisionCommand: 5 | def __init__(self, listen, device, language, talk, model_path, hist_dir): 6 | self.listen = listen 7 | self.device = device 8 | self.language = language 9 | self.talk = talk 10 | self.model_path = model_path 11 | self.hist_dir = hist_dir 12 | 13 | def launch_precision_mode(self): 14 | precision_llm_start_keywords = ['lumen passe en mode précision', 'lumen passe en précision', 'lumen passage en mode précision', 15 | 'lumen mode précision', 'lumen switch to precision mode', 'lumen switch to precision', 'lumen precision mode'] 16 | if any(keyword in self.listen for keyword in precision_llm_start_keywords): 17 | if self.language == 'Fr': 18 | self.talk("Mode Précision Activé") 19 | else: 20 | self.talk("Precision Mode Activated") 21 | start_precision(self.device, self.language, self.talk, self.model_path, self.hist_dir) -------------------------------------------------------------------------------- /functions/precision_page/precision_mode.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import os 3 | import time 4 | from functions.config_page.speech_to_text.speech import SpeechToText 5 | from functions.config_page.speech_to_text.record import record_audio 6 | from functions._functions_global_.llm_model_functions.llm_generate import LLMGenerate 7 | from functions._functions_global_.llm_model_functions.llm_save_hist import save_conversation 8 | 9 | 10 | def start_precision(device, lang, talk, model_path, hist_dir): 11 | llm = LLMGenerate(talk) 12 | conversation_history = [] 13 | speech_to_text = SpeechToText() 14 | working = True 15 | filename_temp_audio = 'functions/config_page/speech_to_text/temp_audio/audio.wav' 16 | 17 | if os.path.isfile(model_path): 18 | with open(model_path, 'r') as file: 19 | llm_model = file.read().strip() 20 | else: 21 | st.sidebar.error("Veuillez configurer le modèle à utiliser dans la page précision." if lang == 'Fr' else 22 | "Please configure the template to be used in the precision page.") 23 | 24 | detect_element_keywords = ['je veux rentrer des données', 'aide moi sur ça', 'je veux rentrer du code', 'insérer des éléments' 25 | 'insérer un élément', 'I want to enter code', 'help me on this', 'insert elements'] 26 | st.success(f"Si vous souhaitez rentrer des éléments pour parler dessus dire {detect_element_keywords}" if lang == 'Fr' else 27 | f"If you want to enter some elements to talk about it, say {detect_element_keywords}") 28 | 29 | def ask_user_intent(): 30 | trigger_phrases = ["c'est bon", "j'ai déposé mon élément", "j'ai déposé mes éléments", "code déposé", "éléments déposé", "élément déposé", 31 | "it's okay", "I submitted my code", 'code submitted', "it is okay", "okai", "ok", "okay", 32 | "I submitted my element", "I submitted my elements"] 33 | st.warning(f"Pour continuer, dire:\n {trigger_phrases}\n") if lang == 'Fr' else print(f"To continue, say:\n{trigger_phrases}\n") 34 | 35 | # Create the path if it doesn't exist 36 | file_path = 'functions/precision_page/temp_element/temp_element.txt' 37 | os.makedirs(os.path.dirname(file_path), exist_ok=True) 38 | 39 | # clean the file before opening it 40 | with open(file_path, 'w') as file: 41 | file.write('') 42 | 43 | os.system(f'notepad {file_path}') 44 | while True: 45 | record_audio(language=lang, device_index=device) 46 | listen = speech_to_text.transcribe(filename_temp_audio) 47 | if any(keyword in listen for keyword in trigger_phrases): 48 | break 49 | time.sleep(1) 50 | 51 | with open(file_path, 'r') as file: 52 | elements = file.read() 53 | 54 | if lang == 'Fr': 55 | talk("Que voulez-vous faire avec le/les élément/s fourni/s ?") 56 | else: 57 | talk("What do you want to do with the element/s provided ?") 58 | record_audio(language=lang, device_index=device) 59 | user_intent = speech_to_text.transcribe(filename_temp_audio) 60 | return elements, user_intent 61 | 62 | while True: 63 | if working: 64 | st.markdown("Mode Précision LLM..
" if lang == 'Fr' else 65 | "Precision LLM mode..
", unsafe_allow_html=True) 66 | record_audio(language=lang, device_index=device) 67 | listen = speech_to_text.transcribe(filename_temp_audio) 68 | 69 | # Check if the user wants to enter elements 70 | if any(keyword in listen for keyword in detect_element_keywords): 71 | elements, user_intent = ask_user_intent() 72 | 73 | if lang == 'Fr': 74 | lang_preprompt = "Parle en Français et réponds en français, " 75 | else: 76 | lang_preprompt = "Speak in English and respond in English, " 77 | full_prompt_user = lang_preprompt + user_intent 78 | _, chat_history = llm.generate_response(llm_model, full_prompt_user + " " + elements, conversation_history) 79 | continue 80 | 81 | # Check if the user wants to save the conversation 82 | detect_save_keyords = ['sauvegarde notre discussion', 'sauvegarde notre conversation', 'sauvegarde la discussion', 'sauvegarde la conversation', 83 | 'save our discussion', 'save our conversation', 'save the discussion', 'save the conversation'] 84 | if any(keyword in listen for keyword in detect_save_keyords): 85 | talk("La conversation a été sauvegardé" if lang=='Fr' else "The conversation has been saved") 86 | st.success("Conversation sauvegardé" if lang == 'Fr' else "Conversation saved.") 87 | save_conversation(chat_history, hist_dir) 88 | continue 89 | 90 | # Check if the user wants to stop the LLM conversation for return to basic voice detection 91 | detect_stop_llm_precision_keyords = ['c\'est bon tu peux arrêter', 'tu peux arrêter', 'désactive llm', 'passe en mode classique', 92 | 'passage en mode classique', 'désactive précision llm', 'passe classique','disable llm', 93 | 'switch to classic mode','switch classic mode', 'disable precision llm', 94 | 'it\'s okay you can stop', 'it is okay you can stop', 'you can stop'] 95 | if any(keyword in listen for keyword in detect_stop_llm_precision_keyords): 96 | talk("Passage en mode exécution de commandes" if lang == 'Fr' else "Switching to commands execution mode") 97 | st.error("Arrêt du mode précision." if lang == 'Fr' else "Stopping the precision mode.") 98 | working = False 99 | break 100 | 101 | # Generate a response 102 | lang_preprompt = "Réponds en français, " if lang == 'Fr' else "Respond in English, " 103 | user_input_str = str(listen) 104 | full_prompt_user = lang_preprompt + user_input_str 105 | _, chat_history = llm.generate_response(llm_model, full_prompt_user, conversation_history) -------------------------------------------------------------------------------- /functions/vision_page/encode_img.py: -------------------------------------------------------------------------------- 1 | import base64 2 | 3 | 4 | def encode_image_to_base64(image_path): 5 | with open(image_path, 'rb') as image_file: 6 | return base64.b64encode(image_file.read()).decode('utf-8') -------------------------------------------------------------------------------- /functions/vision_page/vision_command.py: -------------------------------------------------------------------------------- 1 | from functions.vision_page.vision_mode import start_vision 2 | 3 | 4 | class VisionCommand: 5 | def __init__(self, listen, device, language, talk, model_path, hist_dir): 6 | self.listen = listen 7 | self.device = device 8 | self.language = language 9 | self.talk = talk 10 | self.model_path = model_path 11 | self.hist_dir = hist_dir 12 | 13 | def launch_vision_mode(self): 14 | llm_vision_start_keywords = ['lumen passe en mode analyse', 'lumen passe en analyse', 'lumen passage en mode analyse', 'lumen passage en mode vision', 15 | 'lumen mode vision', 'lumen passage en vision', 'lumen passe en vision', 'lumen passe en mode vision', 16 | 'lumen switch to analysis mode', 'lumen switch to analyse', 'lumen switch to vision', 'lumen switch to vision mode', 17 | 'lumen switch to analysis', 'lumen switch to analyse mode', 'lumen vision mode'] 18 | if any(keyword in self.listen for keyword in llm_vision_start_keywords): 19 | if self.language == 'Fr': 20 | self.talk("Mode Vision Activé") 21 | else: 22 | self.talk("Vision Mode Activated") 23 | start_vision(self.device, self.language, self.talk, self.model_path, self.hist_dir) -------------------------------------------------------------------------------- /functions/vision_page/vision_mode.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import os 3 | import requests 4 | import json 5 | 6 | from functions.config_page.speech_to_text.speech import SpeechToText 7 | from functions.config_page.speech_to_text.record import record_audio 8 | from functions.menu_page.commands.screenshot import ScreenCommand 9 | from functions.menu_page.commands.cam.screen_cam import CamCommand 10 | from functions.vision_page.encode_img import encode_image_to_base64 11 | from functions._functions_global_.llm_model_functions.llm_save_hist import save_conversation 12 | 13 | 14 | def start_vision(device, lang, talk, model_path, hist_dir): 15 | url = "http://localhost:11434/api/generate" 16 | headers = {'Content-Type': "application/json",} 17 | vision_history = [] 18 | speech_to_text = SpeechToText() 19 | working = True 20 | filename_temp_audio = 'functions/config_page/speech_to_text/temp_audio/audio.wav' 21 | 22 | if os.path.isfile(model_path): 23 | with open(model_path, 'r') as file: 24 | vision_model = file.read().strip() 25 | else: 26 | st.sidebar.error("Veuillez configurer le modèle à utiliser dans la page vision." if lang == 'Fr' else 27 | "Please configure the template to be used in the vision page.") 28 | 29 | def analyze_image(image_path, custom_prompt): 30 | vision_history.append(custom_prompt) 31 | image_base64 = encode_image_to_base64(image_path) 32 | 33 | payload = { 34 | "model": vision_model, 35 | "prompt": custom_prompt, 36 | "images": [image_base64] 37 | } 38 | 39 | response = requests.post(url, headers=headers, json=payload) 40 | try: 41 | response_lines = response.text.strip().split('\n') 42 | full_response = ''.join(json.loads(line)['response'] for line in response_lines if 'response' in json.loads(line)) 43 | 44 | st.write(custom_prompt) 45 | st.write(full_response) 46 | chat_history = [ 47 | [custom_prompt, full_response] 48 | ] 49 | 50 | # Read response to user 51 | talk(full_response) 52 | 53 | return full_response, chat_history 54 | except Exception as e: 55 | return f"Error: {e}" 56 | 57 | while True: 58 | if working: 59 | st.markdown("Mode Vision LLM..
" if lang == 'Fr' else 60 | "Vision LLM mode..
", unsafe_allow_html=True) 61 | record_audio(language=lang, device_index=device) 62 | listen = speech_to_text.transcribe(filename_temp_audio) 63 | st.write(listen) 64 | 65 | screenshot = ScreenCommand(listen, lang, talk) 66 | if screenshot.screen(): 67 | image_path = "photos/screenshot.png" 68 | if lang == 'Fr': 69 | talk("Dites moi ce que je dois faire avec cette image ?") 70 | lang_preprompt = "Parle en Français et réponds en français, " 71 | else: 72 | talk("Tell me what I should do with this image ?") 73 | lang_preprompt = "Speak in English and respond in English, " 74 | record_audio(language=lang, device_index=device) 75 | listen = speech_to_text.transcribe(filename_temp_audio) 76 | full_prompt = lang_preprompt + listen 77 | full_response, chat_history = analyze_image(image_path, full_prompt) 78 | 79 | screen_cam = CamCommand(listen, device, lang, talk) 80 | if screen_cam.screen_cam(): 81 | image_path = "photos/camera.png" 82 | if lang == 'Fr': 83 | talk("Dites moi ce que je dois faire avec cette image ?") 84 | lang_preprompt = "Parle en Français et réponds en français," 85 | else: 86 | talk("Tell me what I should do with this image ?") 87 | lang_preprompt = "Speak in English and respond in English," 88 | 89 | record_audio(language=lang, device_index=device) 90 | listen = speech_to_text.transcribe(filename_temp_audio) 91 | full_prompt = lang_preprompt + listen 92 | full_response, chat_history = analyze_image(image_path, full_prompt) 93 | 94 | # Check if the user wants to save the conversation 95 | detect_save_keyords = ['sauvegarde notre discussion', 'sauvegarde notre conversation', 'sauvegarde la discussion', 'sauvegarde la conversation', 96 | 'save our discussion', 'save our conversation', 'save the discussion', 'save the conversation'] 97 | if any(keyword in listen for keyword in detect_save_keyords): 98 | talk("La conversation a été sauvegardé" if lang=='Fr' else "The conversation has been saved") 99 | st.success("Conversation sauvegardé" if lang == 'Fr' else "Conversation saved.") 100 | save_conversation(chat_history, hist_dir) 101 | continue 102 | 103 | # Check if the user wants to stop the LLM Vision for return to basic voice detection 104 | detect_stop_llm_keyords = ['désactive llm', 'désactive vision', 'passe en mode classique', 'passage en mode classique', 105 | 'disable llm', 'switch to classic mode', 'switch classic mode'] 106 | if any(keyword in listen for keyword in detect_stop_llm_keyords): 107 | if lang=='Fr': 108 | talk("Passage en mode exécution de commandes") 109 | else: 110 | talk("Switching to commands execution mode") 111 | st.error("Stopping the vision with the model.") 112 | break -------------------------------------------------------------------------------- /menu.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import base64 3 | from PIL import Image 4 | from io import BytesIO 5 | 6 | from functions._ui_global.page_title import set_page_title 7 | from functions._ui_global.custom_ui import custom_ui 8 | from functions.menu_page.get_language import define_lang 9 | from functions.menu_page.start_assistant import run_assistant 10 | 11 | 12 | # Add a language switcher to the sidebar 13 | lang = define_lang() 14 | st.sidebar.markdown(" \
48 | Lumen est un assistant intelligent capable de voir 👀 écouter 👂 discuter 💬 et coder 💻 \
49 | vous permettant de naviguer sur le web, contrôler votre multimédia et automatiser des tâches \
50 | en toute simplicité.
Avec ses capacités de reconnaissance visuelle et vocale, Lumen s'adapte \
51 | à vos besoins pour optimiser votre productivité et enrichir votre expérience numérique.
\
52 | Découvrez une nouvelle façon d'interagir avec la technologie grâce à Lumen, \
53 | votre partenaire technologique au quotidien. ✨ \
54 |
\
58 | Lumen is an intelligent assistant capable of seeing 👀 listening 👂 conversing 💬 and coding 💻 \
59 | allowing you to browse the web, control your multimedia, and automate tasks with ease.
\
60 | With its visual and voice recognition capabilities, Lumen adapts to your needs to enhance \
61 | productivity and enrich your digital experience.
Discover a new way to interact with technology \
62 | with Lumen, your everyday tech partner. ✨ \
63 |