├── .gitignore ├── .streamlit └── config.toml ├── README.md ├── [Documentation Lumen] └── Lumen Documentation.pdf ├── functions ├── _functions_global_ │ ├── app_button.py │ ├── config_model.py │ ├── get_model.py │ └── llm_model_functions │ │ ├── llm_generate.py │ │ ├── llm_load_hist.py │ │ └── llm_save_hist.py ├── _ui_global │ ├── custom_ui.py │ └── page_title.py ├── code_page │ ├── code_command.py │ └── code_mode.py ├── config_page │ ├── cam_index.py │ ├── custom_model │ │ ├── colors.py │ │ └── modelfile.py │ ├── json │ │ ├── create_trigger_app_json.py │ │ └── create_trigger_response_json.py │ ├── mic_sensitivity.py │ ├── narrator_voice.py │ ├── selection_microphone.py │ ├── speech_to_text │ │ ├── record.py │ │ └── speech.py │ └── visual_system_voice.py ├── discussion_page │ ├── discussion_command.py │ └── discussion_mode.py ├── menu_page │ ├── commands │ │ ├── app_chrono.py │ │ ├── audio_gestion.py │ │ ├── cam │ │ │ ├── cam_index.py │ │ │ ├── screen_cam.py │ │ │ ├── video_capture.py │ │ │ └── video_command.py │ │ ├── cybersec_info_system.py │ │ ├── data_time.py │ │ ├── json │ │ │ ├── load_json.py │ │ │ └── text_add_json.py │ │ ├── math_calcul.py │ │ ├── screenshot.py │ │ ├── speech_to_en.py │ │ ├── take_note.py │ │ └── web │ │ │ ├── check_connection.py │ │ │ └── search_web.py │ ├── get_language.py │ └── start_assistant.py ├── precision_page │ ├── precision_command.py │ └── precision_mode.py └── vision_page │ ├── encode_img.py │ ├── vision_command.py │ └── vision_mode.py ├── menu.py ├── pages ├── 1_🛠️Configuration.py ├── 2_💬Discussion.py ├── 3_🎯Precision.py ├── 4_👨‍💻Code.py └── 5_👁️Vision.py ├── requirements.txt ├── ressources ├── lumen_color_palette.png └── lumen_logo.png └── start-assistant.bat /.gitignore: -------------------------------------------------------------------------------- 1 | # env files 2 | __pycache__ 3 | .env 4 | 5 | # save config 6 | save_config_txt 7 | 8 | # Json files 9 | config_json 10 | 11 | # Temp audio files 12 | functions/config_page/speech_to_text/temp_audio 13 | temp_output_voice 14 | 15 | # Chat History 16 | functions/discussion_page/discussion_history 17 | functions/code_page/code_history 18 | functions/precision_page/precision_history 19 | functions/vision_page/vision_history 20 | 21 | # Temp code & elements 22 | temp_code 23 | temp_element 24 | 25 | # Model parameters 26 | temp_modelfile 27 | 28 | # Photos cam 29 | photos 30 | 31 | -------------------------------------------------------------------------------- /.streamlit/config.toml: -------------------------------------------------------------------------------- 1 | [theme] 2 | primaryColor="#c05bb6" 3 | backgroundColor="#121b3b" 4 | secondaryBackgroundColor="#2264b5" 5 | textColor="#f0d6ec" 6 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # **Lumen** (for Windows 11) 3 | 4 | **Assistant to support you in your projects and tasks.** 5 | 6 | 7 | ## Installation 8 | 9 | Run the **start-assistant.bat** and follow the instructions on the shell. 10 | 11 | **ATTENTION :** 12 | This application is not suitable for **Mac** and **Linux** and may not work depending on the hardware of your Windows computer. 13 | ## Synthetic Voices 14 | 15 | If you want to have more synthetic voices available, on Windows you have to go to the narrator settings and you can download the voices you want. 16 | 17 | If this doesn't work and doesn't recognize the voices you have installed on the narrator settings, follow this steps : 18 | 1. Open the **Registry Editor** by pressing the **“Windows” and “R”** keys simultaneously, then type **“regedit”** and press Enter. 19 | 20 | 2. Navigate to the registry key : **HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech_OneCore\Voices\Tokens**. 21 | 22 | 3. Export this key to a **REG file** (with a right click on the file). 23 | 24 | 4. Open this file with a text editor and replace all occurrences of **HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech_OneCore\Voices\Tokens** 25 | with **HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\SPEECH\Voices\Tokens**. 26 | 27 | 5. Save the modified file and double-click it to import the changes to the registry. 28 | 29 | 30 | ## Tech Stack 31 | 32 | **Language :** Python 3.11.7 33 | 34 | **CUDA Compiler/Toolkit** : Cuda 11.8 35 | 36 | **Local AI Models:** Ollama (version 0.3.2) 37 | 38 | **Voice Recognition:** openai/whisper-large-v3 39 | 40 | **Audio Device Scanning:** pyaudio 41 | 42 | **Synthetic Voices:** pyttsx3 43 | 44 | **Interface:** streamlit 45 | 46 | **Computer Commands:** webbrowser (search on web) / pywhatkit (search on youtube) / pycaw (change volume computer) / sympy (calcul) 47 | 48 | 49 | ## Author 50 | 51 | - [@nixiz0](https://github.com/nixiz0) 52 | -------------------------------------------------------------------------------- /[Documentation Lumen]/Lumen Documentation.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nixiz0/Lumen/68e6f872704ddd32e384a3da5fbc59ea71bbde4c/[Documentation Lumen]/Lumen Documentation.pdf -------------------------------------------------------------------------------- /functions/_functions_global_/app_button.py: -------------------------------------------------------------------------------- 1 | import os 2 | import string 3 | import base64 4 | import streamlit as st 5 | import pandas as pd 6 | import json 7 | 8 | 9 | class AppButton: 10 | def __init__(self, lang, history_dir, selected_file): 11 | self.lang = lang 12 | self.history_dir = history_dir 13 | self.selected_file = selected_file 14 | 15 | def rename_file(self): 16 | new_file_name = st.sidebar.text_input('Nouveau nom' if self.lang == 'Fr' else 'New name') 17 | if new_file_name: 18 | new_file_name = new_file_name.translate(str.maketrans('', '', string.punctuation)) 19 | if st.sidebar.button('Renommer' if self.lang == 'Fr' else 'Rename'): 20 | if not new_file_name.endswith('.json'): 21 | new_file_name += '.json' 22 | os.rename(os.path.join(self.history_dir, self.selected_file), os.path.join(self.history_dir, new_file_name)) 23 | st.rerun() 24 | else: 25 | st.sidebar.warning('Veuillez entrer un nom de fichier.' if self.lang == 'Fr' else 'Please enter a file name.') 26 | 27 | def delete_file(self): 28 | if st.sidebar.button('Supprimer' if self.lang == 'Fr' else 'Delete'): 29 | os.remove(os.path.join(self.history_dir, self.selected_file)) 30 | st.rerun() 31 | 32 | def download_as_csv(self): 33 | if st.sidebar.button('Télécharger CSV' if self.lang == 'Fr' else 'Download CSV'): 34 | with open(os.path.join(self.history_dir, self.selected_file), 'r', encoding='utf-8') as f: 35 | data = json.load(f) 36 | df = pd.DataFrame(data) 37 | csv = df.to_csv(index=False, encoding='utf-8') 38 | b64 = base64.b64encode(csv.encode()).decode() 39 | csv_file_name = self.selected_file.replace('.json', '.csv') 40 | if self.lang == 'Fr': 41 | href = f'Cliquez pour télécharger' 42 | else: 43 | href = f'Click to download' 44 | st.sidebar.markdown(href, unsafe_allow_html=True) -------------------------------------------------------------------------------- /functions/_functions_global_/config_model.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import os 3 | 4 | from functions._functions_global_.get_model import get_model_names 5 | 6 | 7 | def configuration_model(lang, filename_model): 8 | # Get the names of the models 9 | model_names = get_model_names() 10 | model_names.insert(0, "") 11 | 12 | # Select model 13 | model_use = st.sidebar.selectbox('🔬 Modèles' if lang == "Fr" else '🔬 Models', model_names) 14 | 15 | # Create the folder if it doesn't exist 16 | if not os.path.exists('save_config_txt'): 17 | os.makedirs('save_config_txt') 18 | 19 | # Write the name of the model in the .txt only if a model is selected 20 | if model_use != "": 21 | with open(filename_model, 'w') as f: 22 | f.write(model_use) 23 | -------------------------------------------------------------------------------- /functions/_functions_global_/get_model.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | 4 | def get_model_names(): 5 | # Run 'ollama list' command and get the output 6 | output = subprocess.check_output("ollama list", shell=True).decode() 7 | 8 | # Split the output into lines and ignore the first line 9 | lines = output.split('\n')[2:] 10 | 11 | # Retrieve only model names 12 | model_names = [line.split()[0] for line in lines if line] 13 | 14 | return model_names -------------------------------------------------------------------------------- /functions/_functions_global_/llm_model_functions/llm_generate.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import requests 3 | import json 4 | 5 | 6 | class LLMGenerate: 7 | def __init__(self, talk): 8 | self.url = "http://localhost:11434/api/chat" 9 | self.headers = {'Content-Type': "application/json",} 10 | self.talk = talk 11 | 12 | def beforeSay(self, response): 13 | return response 14 | 15 | def say(self, response): 16 | if len(response) == 0: 17 | return 18 | self.talk(self.beforeSay(response)) 19 | 20 | def generate_response(self, llm_model, prompt, chat_history): 21 | if len(prompt) == 0: 22 | return "", chat_history 23 | 24 | full_prompt = [] 25 | for i in chat_history: 26 | full_prompt.append({ 27 | "role": "user", 28 | "content": i[0] 29 | }) 30 | full_prompt.append({ 31 | "role": "assistant", 32 | "content": i[1] 33 | }) 34 | full_prompt.append({ 35 | "role": "user", 36 | "content": prompt 37 | }) 38 | 39 | data = { 40 | "model": llm_model, 41 | "stream": True, 42 | "messages": full_prompt, 43 | } 44 | 45 | response = requests.post(self.url, headers=self.headers, data=json.dumps(data), stream=True) 46 | if response.status_code == 200: 47 | all_response = '' 48 | this_response = '' 49 | for line in response.iter_lines(): 50 | if line: 51 | jsonData = json.loads(line) 52 | this_response += jsonData["message"]['content'] 53 | if '.' in this_response or '?' in this_response or '!' in this_response: 54 | self.say(this_response) 55 | all_response += this_response 56 | this_response = '' 57 | if len(this_response) > 0: 58 | self.say(this_response) 59 | all_response += this_response 60 | this_response = '' 61 | chat_history.append((prompt, all_response)) 62 | 63 | # Display the last user message and assistant response on Streamlit 64 | if chat_history[-1][0]: # Last user's message 65 | st.write(f"User: {chat_history[-1][0]}") 66 | if chat_history[-1][1]: # Last assistant's message 67 | st.write(f"Assistant: {chat_history[-1][1]}") 68 | 69 | return "", chat_history 70 | else: 71 | return "Error: Unable to fetch response", chat_history 72 | -------------------------------------------------------------------------------- /functions/_functions_global_/llm_model_functions/llm_load_hist.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import json 3 | 4 | 5 | def load_and_display_chat(filename): 6 | # Load the chat history from the json file 7 | with open(filename, 'r', encoding='utf-8') as f: 8 | chat_history = json.load(f) 9 | 10 | # Store the chat history in the session state 11 | st.session_state['session_state'] = chat_history 12 | 13 | # Display the chat history on Streamlit 14 | for message in st.session_state['session_state']: 15 | with st.chat_message(message['role']): 16 | st.write(message['content']) 17 | -------------------------------------------------------------------------------- /functions/_functions_global_/llm_model_functions/llm_save_hist.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import nltk 4 | import string 5 | nltk.download('punkt') 6 | 7 | 8 | def save_conversation(chat_history, directory): 9 | # Convert chat history to the desired format 10 | formatted_history = [] 11 | user_text = "" 12 | for i in chat_history: 13 | formatted_history.append({ 14 | "role": "user", 15 | "content": i[0] 16 | }) 17 | formatted_history.append({ 18 | "role": "assistant", 19 | "content": i[1] 20 | }) 21 | # Concatenate user text 22 | user_text += " " + i[0] 23 | 24 | # Tokenize the text and extract the desired words 25 | words = nltk.word_tokenize(user_text) 26 | 27 | # Remove punctuation from the words 28 | words = [word for word in words if word not in string.punctuation] 29 | filename_words = words[3:13] # Take words after the first 3 words, up to 10 words 30 | filename = '_'.join(filename_words) + '.json' 31 | 32 | # Create the directory if it doesn't exist 33 | if not os.path.exists(directory): 34 | os.makedirs(directory) 35 | 36 | # Save to a json file in the specified directory 37 | with open(os.path.join(directory, filename), 'w', encoding='utf-8') as f: 38 | json.dump(formatted_history, f, ensure_ascii=False, indent=4) 39 | -------------------------------------------------------------------------------- /functions/_ui_global/custom_ui.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | 3 | 4 | def custom_ui(): 5 | # Hide deploybutton in streamlit ui 6 | st.markdown(""" 7 | 10 | """, unsafe_allow_html=True) -------------------------------------------------------------------------------- /functions/_ui_global/page_title.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | 3 | 4 | def set_page_title(title, user_title): 5 | if title != user_title: 6 | title = user_title 7 | st.markdown(unsafe_allow_html=True, body=f""" 8 |