├── Models └── .gitkeep ├── Modules └── .gitkeep ├── Nova ├── StartCore.py ├── Configs │ ├── KeyList.json │ └── NovaSettings.config ├── KeyManager.py ├── CheckForValidInput.py ├── Helpers.py ├── SpeechSynthesis.py ├── ConfigInteraction.py ├── LanguageModelInteraction.py ├── AudioTranscription.py ├── ModuleManager.py ├── Interface.py └── Core.py ├── .gitgnore ├── Start interface.bat ├── manifest.json ├── requirements.txt ├── LangFiles ├── en.json └── de.json ├── NovaAPI.py ├── README.md └── LICENSE /Models/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Modules/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Nova/StartCore.py: -------------------------------------------------------------------------------- 1 | import Core 2 | 3 | Core.Start() -------------------------------------------------------------------------------- /.gitgnore: -------------------------------------------------------------------------------- 1 | **/__pycache__/** 2 | **/temp_audio.wav 3 | **/output.wav -------------------------------------------------------------------------------- /Nova/Configs/KeyList.json: -------------------------------------------------------------------------------- 1 | { 2 | "1": "Groq", 3 | "2": "Elevenlabs" 4 | } -------------------------------------------------------------------------------- /Start interface.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | title Interface 3 | python ./Nova/Interface.py 4 | -------------------------------------------------------------------------------- /manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Modular Personal AI Assistant NOVA", 3 | "description": "Open source personal AI assistant that is easily expandable and customizable.", 4 | "author": "Julian Thomas", 5 | "version": "1.6", 6 | "license": "Apache-2.0", 7 | "repository": "https://github.com/00Julian00/Nova.git" 8 | } -------------------------------------------------------------------------------- /Nova/Configs/NovaSettings.config: -------------------------------------------------------------------------------- 1 | { 2 | "Language": "en", 3 | "Hotword": "Nova", 4 | "GroqModel": "llama3-70b-8192", 5 | "ElevenlabsModel": "eleven_turbo_v2_5", 6 | "ElevenlabsVoiceID": "Xb7hH8MSUJpSbSDYk0k2", 7 | "OfflineMode": "False", 8 | "StreamVoice": "True", 9 | "MicrophoneIndex": "4", 10 | "Behaviour": "You are an AI assistant called 'Nova'.", 11 | "Name": "" 12 | } -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | setuptools 2 | wheel 3 | Levenshtein 4 | groq 5 | elevenlabs 6 | pyaudio 7 | faster-whisper 8 | sounddevice 9 | spicy 10 | numpy 11 | langcodes 12 | prompt_toolkit 13 | keyring 14 | silero-vad 15 | platformdirs 16 | colorama 17 | packaging 18 | transformers 19 | pyaudio 20 | accelerate 21 | TTS 22 | simpleaudio 23 | llama-cpp-python \ --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cu121 24 | google.generativeai -------------------------------------------------------------------------------- /Nova/KeyManager.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import keyring 4 | 5 | script_dir = os.path.dirname(os.path.realpath(__file__)) 6 | KeyList = os.path.join(script_dir, 'Configs', 'KeyList.json') 7 | 8 | def GetKey(name): 9 | return keyring.get_password("Nova", name) 10 | 11 | def SetKey(name, value): 12 | keyring.set_password("Nova", name, value) 13 | 14 | def GetKeyList(): 15 | try: 16 | with open(KeyList, 'r') as file: 17 | return(json.load(file)) 18 | except Exception as e: 19 | print("Failed to fetch key list\n" + e) -------------------------------------------------------------------------------- /Nova/CheckForValidInput.py: -------------------------------------------------------------------------------- 1 | #This script checks wether a setting the user entered is valid. Return True if valid and an error message that can be printed to the console if not valid 2 | from langcodes import Language 3 | from elevenlabs.client import ElevenLabs 4 | import ConfigInteraction 5 | from KeyManager import GetKey 6 | 7 | langFile = ConfigInteraction.GetLanguageFile() 8 | 9 | client = ElevenLabs(api_key=GetKey("Elevenlabs")) 10 | 11 | def CheckForValidInput(setting, input): 12 | if (setting == "Language"): 13 | try: 14 | Language.get(input) 15 | return True 16 | except: 17 | return langFile["Errors"][7] 18 | 19 | if (setting == "Hotword"): 20 | return True 21 | 22 | if (setting == "GroqModel"): 23 | return True 24 | 25 | if (setting == "ElevenlabsModel"): 26 | return True 27 | 28 | if (setting == "ElevenlabsVoiceID"): 29 | try: 30 | if (GetKey("Elevenlabs") != ""): 31 | client.voices.get(input) 32 | return True 33 | except: 34 | return langFile["Errors"][7] 35 | 36 | if (setting == "OfflineMode"): 37 | if (input == "True" or input == "False" or input == "Mixed"): 38 | return True 39 | else: 40 | return langFile["Errors"][8] 41 | 42 | if (setting == "StreamVoice"): 43 | if (input == "True" or input == "False"): 44 | return True 45 | else: 46 | return langFile["Errors"][8] 47 | 48 | if (setting == "MicrophoneIndex"): 49 | try: 50 | int(input) 51 | return True 52 | except: 53 | return langFile["Errors"][9] 54 | 55 | if (setting == "Behaviour"): 56 | return True 57 | 58 | if (setting == "Name"): 59 | return True -------------------------------------------------------------------------------- /Nova/Helpers.py: -------------------------------------------------------------------------------- 1 | from contextlib import redirect_stdout, redirect_stderr, contextmanager 2 | from io import StringIO 3 | import sys 4 | import builtins 5 | from types import ModuleType 6 | 7 | def suppress_output_decorator(func): 8 | def wrapper(*args, **kwargs): 9 | # Redirect stdout and stderr to a string buffer 10 | stdout_buffer = StringIO() 11 | stderr_buffer = StringIO() 12 | 13 | with redirect_stdout(stdout_buffer), redirect_stderr(stderr_buffer): 14 | result = func(*args, **kwargs) 15 | return result 16 | return wrapper 17 | 18 | @contextmanager 19 | def suppress_output(): 20 | # Save the original stdout and stderr 21 | old_stdout = sys.stdout 22 | old_stderr = sys.stderr 23 | 24 | # Redirect stdout and stderr to a string buffer 25 | stdout_buffer = StringIO() 26 | stderr_buffer = StringIO() 27 | 28 | sys.stdout = stdout_buffer 29 | sys.stderr = stderr_buffer 30 | 31 | try: 32 | yield 33 | finally: 34 | # Restore the original stdout and stderr 35 | sys.stdout = old_stdout 36 | sys.stderr = old_stderr 37 | 38 | #Used to limit which libaries a Module can access 39 | class RestrictedImporter: 40 | def __init__(self, disallowed_modules): 41 | self.disallowed_modules = set(disallowed_modules) 42 | self.original_import = builtins.__import__ 43 | 44 | def custom_import(self, name, *args, **kwargs): 45 | if name in self.disallowed_modules: 46 | raise ImportError(f"Import of '{name}' is not allowed") 47 | return self.original_import(name, *args, **kwargs) 48 | 49 | def apply(self): 50 | builtins.__import__ = self.custom_import 51 | 52 | def restore(self): 53 | builtins.__import__ = self.original_import 54 | 55 | def run_file_with_restricted_imports(file_path, disallowed_modules): 56 | importer = RestrictedImporter(disallowed_modules) 57 | importer.apply() 58 | try: 59 | with open(file_path, 'r') as file: 60 | code = file.read() 61 | exec(code, {'__name__': '__main__'}) 62 | finally: 63 | importer.restore() -------------------------------------------------------------------------------- /Nova/SpeechSynthesis.py: -------------------------------------------------------------------------------- 1 | from elevenlabs.client import ElevenLabs 2 | from elevenlabs import stream, play, Voice, VoiceSettings 3 | import ConfigInteraction 4 | from KeyManager import GetKey 5 | import os 6 | from TTS.api import TTS 7 | import simpleaudio 8 | from Helpers import suppress_output_decorator, suppress_output 9 | 10 | offlineMode = ConfigInteraction.GetSetting("OfflineMode") 11 | 12 | voiceID = None 13 | model = None 14 | client = None 15 | streamVoice = None 16 | tts = None 17 | 18 | @suppress_output_decorator 19 | def Initialize(): 20 | global voiceID 21 | global model 22 | global client 23 | global streamVoice 24 | global tts 25 | 26 | if (offlineMode == "False" or offlineMode == "Mixed"): 27 | voiceID = ConfigInteraction.GetSetting("ElevenlabsVoiceID") 28 | model = ConfigInteraction.GetSetting("ElevenlabsModel") 29 | client = ElevenLabs(api_key=GetKey("Elevenlabs")) 30 | streamVoice = ConfigInteraction.GetSetting("StreamVoice") 31 | else: 32 | tts = TTS(model_name="tts_models/en/ljspeech/tacotron2-DDC") 33 | 34 | 35 | def SpeakStream(generator): 36 | audio_stream = client.generate( 37 | text = generator, 38 | voice=Voice( 39 | voice_id=voiceID, 40 | settings=VoiceSettings(stability=0.6, similarity_boost=1, style=0.2, use_speaker_boost=True) 41 | ), 42 | model = model, 43 | stream = True 44 | ) 45 | 46 | if (streamVoice == "True"): 47 | stream(audio_stream) 48 | else: 49 | play(audio_stream) 50 | 51 | def SpeakDirect(text): 52 | audio = client.generate( 53 | text = text, 54 | voice=Voice( 55 | voice_id=voiceID, 56 | settings=VoiceSettings(stability=0.6, similarity_boost=1, style=0.2, use_speaker_boost=True) 57 | ), 58 | model = model, 59 | stream = False 60 | ) 61 | 62 | play(audio) 63 | 64 | @suppress_output_decorator 65 | def SpeakOffline(text): 66 | tts.tts_to_file(text=text, file_path="output.wav") 67 | 68 | playObj = simpleaudio.WaveObject.from_wave_file("output.wav").play() 69 | playObj.wait_done() 70 | 71 | os.remove("output.wav") -------------------------------------------------------------------------------- /LangFiles/en.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version" : "1.6", 3 | 4 | "Interface" : [ 5 | "Nova Interface", 6 | "Developed by Julian.", 7 | "Choose key to edit by typing the number. Press enter to save.", 8 | "Type the new value and press enter to save. Press enter to keep the current value:", 9 | "Choose setting to edit by typing the number. Press enter to save.", 10 | "Version", 11 | "Type 'help' to see a list of all available commands.", 12 | "Nova AI-Assistant", 13 | "Current mode:", 14 | "In the current mode, Modules are unavailable.", 15 | "Conversation history:" 16 | ], 17 | 18 | "Warnings": [ 19 | "Warning, this will expose your API keys. Continue? [y/n]", 20 | "Warning, the Version of your language file does not match your version of Nova. This could result in crashes." 21 | ], 22 | 23 | "Errors": [ 24 | "Canceling...", 25 | "Invalid. Canceling...", 26 | "Invalid input. Type the number of the key you want to edit or press enter to save.", 27 | "Unable to access the settings file.", 28 | "Invalid input. Type the number of the setting you want to edit or press enter to save.", 29 | "Invalid. Did you mean", 30 | "Invalid. Type 'help' to see a list of all available commands.", 31 | "Invalid input. Must be a valid language code. (For example: en, es, fr, de, etc.)", 32 | "Invalid input. Must be a valid voice ID. Visit https://elevenlabs.io to find a valid voice and its ID.", 33 | "Invalid input. Must be 'True', 'False' or 'Mixed'", 34 | "Invalid input. Must be an integer. Type 'micID' in the main menu to get the correct microphone ID.", 35 | "Failed to connect to Groq. Please check your internet connection.", 36 | "Failed to connect to Elevenlabs. Please check your internet connection." 37 | ], 38 | 39 | "Status": [ 40 | "Booting...", 41 | "Shutting down...", 42 | "Rebooting...", 43 | "Calling module", 44 | "with no parameters", 45 | "with parameters", 46 | "Loaded behaviour:", 47 | "Module is loaded.", 48 | "Modules are loaded.", 49 | "Modules have an invalid file structure and could therefore not be loaded.", 50 | "Connection to Groq successful.", 51 | "Connection to Elevenlabs successful.", 52 | "Initialized Language Model.", 53 | "Llama-3 was not yet downloaded. Downloading... (This may take a few minutes)" 54 | ], 55 | 56 | "Misc": [ 57 | "User:", 58 | "AI:" 59 | ], 60 | 61 | "Settings": [ 62 | "help: Show a list of all available commands", 63 | "boot: Start Nova", 64 | "keys: Edit your API keys. Warning: Will expose your API keys", 65 | "settings: Edit your settings", 66 | "exit: Exit the Interface. Does not close any instances of Nova running", 67 | "micID: Lists all your microphones and ther IDs. You can then set the correct Id in 'settings'", 68 | "clear: Clear the console" 69 | ] 70 | } -------------------------------------------------------------------------------- /NovaAPI.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import sys 4 | import multiprocessing 5 | import time 6 | import threading 7 | from queue import Queue 8 | 9 | current_dir = os.path.dirname(os.path.abspath(__file__)) 10 | CorePath = os.path.join(current_dir, 'Nova') 11 | sys.path.append(CorePath) 12 | 13 | from CheckForValidInput import CheckForValidInput 14 | import KeyManager 15 | import Core 16 | 17 | novaThread = None 18 | novaStatus = [0] 19 | detectHotword = True 20 | 21 | tasks = Queue(maxsize=0) 22 | results = Queue(maxsize=0) 23 | 24 | def SetSetting(name, value): 25 | if (CheckForValidInput(name, value) == True): 26 | with open(os.path.join(CorePath, 'Configs', 'NovaSettings.config'), 'r+') as file: 27 | settings = json.load(file) 28 | try: 29 | settings[name] = value 30 | file.seek(0) 31 | file.truncate() 32 | file.write(json.dumps(settings, indent=4)) 33 | except: 34 | raise Exception("Setting does not exist.") 35 | else: 36 | raise TypeError("Value of setting is invalid.") 37 | 38 | def GetSetting(name): 39 | with open(os.path.join(CorePath, 'Configs', 'NovaSettings.config'), 'r') as file: 40 | settings = json.load(file) 41 | try: 42 | return settings[name] 43 | except: 44 | raise Exception(f"{name} was not found.") 45 | 46 | def SetKey(name, value): 47 | KeyManager.SetKey(name, value) 48 | 49 | def GetKey(name): 50 | try: 51 | return KeyManager.GetKey(name) 52 | except: 53 | raise Exception(f"{name} was not found.") 54 | 55 | def StartNova(hotword): 56 | global novaStatus 57 | global novaThread 58 | global tasks 59 | global results 60 | global detectHotword 61 | 62 | detectHotword = hotword 63 | 64 | #Reset the queues, in case there is leftover data in them 65 | tasks = Queue(maxsize=0) 66 | results = Queue(maxsize=0) 67 | 68 | novaThread = threading.Thread(target=Core.StartFromAPI, args=(novaStatus, detectHotword, tasks, results,)) 69 | novaThread.start() 70 | 71 | def StopNova(): 72 | global novaThread 73 | global novaStatus 74 | 75 | novaStatus = 0 76 | tasks.put({"task": "Exit", "parameters": []}) 77 | 78 | def GetStatus(): 79 | return novaStatus[0] 80 | 81 | def AddToConversation(role, content): 82 | tasks.put({"task": "AddToConversation", "parameters": [{"role": role, "content": content}]}) 83 | results.get() 84 | 85 | def GetConversation(): 86 | tasks.put({"task": "GetConversation", "parameters": []}) 87 | return results.get() 88 | 89 | def SetConversation(conversation): 90 | tasks.put({"task": "SetConversation", "parameters": conversation}) 91 | results.get() 92 | 93 | def RunWithSpeech(): 94 | tasks.put({"task": "RunInferenceWithTTS", "parameters": []}) 95 | results.get() 96 | 97 | def Run(): 98 | tasks.put({"task": "RunInferenceTextOnly", "parameters": []}) 99 | return (results.get()) 100 | 101 | def ToggleHotwordDetection(detect): 102 | global detectHotword 103 | detectHotword = detect 104 | 105 | def Speak(text): 106 | tasks.put({"task": "Speak", "parameters": [text]}) 107 | results.get() -------------------------------------------------------------------------------- /LangFiles/de.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version" : "1.6", 3 | 4 | "Interface" : [ 5 | "Nova Interface", 6 | "Entwickelt von Julian.", 7 | "Wähle den zu bearbeitenden Schlüssel durch Eingabe der Nummer. Drücke Enter zum Speichern.", 8 | "Gib den neuen Wert ein und drücke Enter zum Speichern. Drücke Enter, um den aktuellen Wert beizubehalten:", 9 | "Wähle die zu bearbeitende Einstellung durch Eingabe der Nummer. Drücke Enter zum Speichern.", 10 | "Version", 11 | "Gib 'help' ein, um eine Liste aller verfügbaren Befehle zu sehen.", 12 | "Nova KI-Assistent", 13 | "Aktueller Modus:", 14 | "Im aktuellen Modus sind Module nicht verfügbar.", 15 | "Gesprächsverlauf:" 16 | ], 17 | 18 | "Warnings": [ 19 | "Warnung, dies wird deine API-Schlüssel offenlegen. Fortfahren? [y/n]", 20 | "Warnung, die Version deiner Sprachdatei stimmt nicht mit der von Nova überein. Dies könnte zu Abstürzen führen." 21 | ], 22 | 23 | "Errors": [ 24 | "Abbruch...", 25 | "Ungültig. Breche ab...", 26 | "Ungültige Eingabe. Gib die Nummer des Schlüssels ein, den du bearbeiten möchtest, oder drücke Enter zum Speichern.", 27 | "Zugriff auf die Einstellungsdatei nicht möglich.", 28 | "Ungültige Eingabe. Gib die Nummer der Einstellung ein, die du bearbeiten möchtest, oder drücke Enter zum Speichern.", 29 | "Ungültig. Meintest du", 30 | "Ungültig. Gib 'help' ein, um eine Liste aller verfügbaren Befehle zu sehen.", 31 | "Ungültige Eingabe. Muss ein gültiger Sprachcode sein. (Zum Beispiel: en, es, fr, de, usw.)", 32 | "Ungültige Eingabe. Muss eine gültige Stimmen-ID sein. Besuche https://elevenlabs.io, um eine gültige Stimme und deren ID zu finden.", 33 | "Ungültige Eingabe. Muss 'True', 'False' oder 'Mixed' sein", 34 | "Ungültige Eingabe. Muss eine ganze Zahl sein. Gib 'micID' im Hauptmenü ein, um die korrekte Mikrofon-ID zu erhalten.", 35 | "Verbindung zu Groq fehlgeschlagen. Bitte überprüfe deine Internetverbindung.", 36 | "Verbindung zu Elevenlabs fehlgeschlagen. Bitte überprüfe deine Internetverbindung." 37 | ], 38 | 39 | "Status": [ 40 | "Wird gestartet...", 41 | "Wird heruntergefahren...", 42 | "Wird neu gestartet...", 43 | "Modul wird aufgerufen", 44 | "ohne Parameter", 45 | "mit Parametern", 46 | "Geladenes Verhalten:", 47 | "Modul ist geladen.", 48 | "Module sind geladen.", 49 | "Module haben eine ungültige Dateistruktur und konnten daher nicht geladen werden.", 50 | "Verbindung zu Groq erfolgreich.", 51 | "Verbindung zu Elevenlabs erfolgreich.", 52 | "Sprachmodell initialisiert.", 53 | "Llama-3 wurde noch nicht heruntergeladen. Lädt runter... (Dies könnte ein paar Minuten dauern)" 54 | ], 55 | 56 | "Misc": [ 57 | "Benutzer:", 58 | "KI:" 59 | ], 60 | 61 | "Settings": [ 62 | "help: Zeige eine Liste aller verfügbaren Befehle", 63 | "boot: Starte Nova", 64 | "keys: Bearbeite deine API-Schlüssel. Warnung: Dies wird deine API-Schlüssel offenlegen", 65 | "settings: Bearbeite deine Einstellungen", 66 | "exit: Beende die Benutzeroberfläche. Schließt keine laufenden Instanzen von Nova", 67 | "micID: Listet alle deine Mikrofone und ihre IDs auf. Du kannst dann die richtige ID in 'settings' einstellen", 68 | "clear: Lösche die Konsole" 69 | ] 70 | } -------------------------------------------------------------------------------- /Nova/ConfigInteraction.py: -------------------------------------------------------------------------------- 1 | #Allows for easy fetching and editing of files in 'Configs' 2 | import os 3 | import json 4 | 5 | script_dir = os.path.dirname(os.path.realpath(__file__)) 6 | 7 | SettingsPath = os.path.join(script_dir, 'Configs', 'NovaSettings.config') 8 | APIkeysPath = os.path.join(script_dir, 'Configs', 'APIkeys.json') 9 | InterfaceCommandsPath = os.path.join(script_dir, 'Configs', 'InterfaceCommands.json') 10 | ModuleListPath = os.path.join(script_dir, 'Configs', 'ModuleList.json') 11 | FunctionsPath = os.path.join(script_dir, 'Configs', 'Functions.json') 12 | ModuleInitializationPath = os.path.join(script_dir, 'Configs', 'HasModuleBeenInitialized.json') 13 | ManifestPath = os.path.join(os.path.dirname(script_dir), 'manifest.json') 14 | LangFilesPath = os.path.join(os.path.dirname(script_dir), 'LangFiles') 15 | 16 | def GetSetting(setting): 17 | try: 18 | with open(SettingsPath, 'r') as file: 19 | settings = json.load(file) 20 | return(settings[setting]) 21 | except Exception as e: 22 | print("Failed to fetch settings\n" + e) 23 | 24 | def GetSettings(): 25 | try: 26 | with open(SettingsPath, 'r') as file: 27 | return(json.load(file)) 28 | except Exception as e: 29 | print("Failed to fetch settings\n" + e) 30 | 31 | def GetInterfaceCommands(): 32 | try: 33 | with open(InterfaceCommandsPath, 'r') as file: 34 | return(json.load(file)) 35 | except Exception as e: 36 | print("Failed to fetch interface commands\n" + e) 37 | 38 | def SetSettings(settings): 39 | with open(SettingsPath, 'w') as file: 40 | file.write(json.dumps(settings, indent=4)) 41 | 42 | def GetModuleList(): 43 | try: 44 | with open(ModuleListPath, 'r') as file: 45 | return(json.load(file)) 46 | except Exception as e: 47 | print("Failed to fetch module list\n" + e) 48 | 49 | def SetModuleList(moduleList): 50 | with open(ModuleListPath, 'w') as file: 51 | file.write(json.dumps(moduleList, indent=4)) 52 | 53 | def GetFunctions(): 54 | try: 55 | with open(FunctionsPath, 'r') as file: 56 | return(json.load(file)) 57 | except Exception as e: 58 | print("Failed to fetch functions\n" + e) 59 | 60 | def SetFunctions(functions): 61 | with open(FunctionsPath, 'w') as file: 62 | file.write(json.dumps(functions, indent=4)) 63 | 64 | def GetModuleInitialization(): 65 | try: 66 | with open(ModuleInitializationPath, 'r') as file: 67 | return(json.load(file)) 68 | except Exception as e: 69 | print("Failed to list of initialized Modules\n" + e) 70 | 71 | def SetModuleInitialization(value): 72 | with open(ModuleInitializationPath, 'w') as file: 73 | file.write(json.dumps(value, indent=4)) 74 | 75 | def GetManifest(): 76 | try: 77 | with open(ManifestPath, 'r') as file: 78 | return(json.load(file)) 79 | except Exception as e: 80 | print("Failed to fetch manifest\n" + e) 81 | 82 | def GetLanguageFile(): 83 | language = GetSetting("Language") 84 | 85 | langFilePath = os.path.join(LangFilesPath, f'{language}.json') 86 | 87 | try: 88 | with open(langFilePath, 'r', encoding='utf-8') as file: 89 | return(json.load(file)) 90 | except: 91 | print(f"Could not load the language files for {language}, switching to english.") 92 | try: 93 | with open(os.path.join(LangFilesPath, 'en.json'), 'r', encoding='utf-8') as file: 94 | return(json.load(file)) 95 | except: 96 | raise SystemError("Language file for english was not found.") -------------------------------------------------------------------------------- /Nova/LanguageModelInteraction.py: -------------------------------------------------------------------------------- 1 | from groq import Groq 2 | import json 3 | import os 4 | import ConfigInteraction 5 | from KeyManager import GetKey 6 | import ModuleManager 7 | import torch 8 | from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline 9 | from Helpers import suppress_output_decorator, suppress_output 10 | from llama_cpp import Llama 11 | from llama_cpp.llama_chat_format import Llava15ChatHandler 12 | from pathlib import Path 13 | from huggingface_hub import hf_hub_download 14 | import PIL.ImageFile 15 | import cv2 16 | import PIL.Image 17 | import google.generativeai as genai 18 | import pathlib 19 | import mimetypes 20 | 21 | script_dir = os.path.dirname(os.path.realpath(__file__)) 22 | 23 | offlineMode = ConfigInteraction.GetSetting("OfflineMode") 24 | 25 | langFile = ConfigInteraction.GetLanguageFile() 26 | 27 | client = None 28 | model = None 29 | gemini = None 30 | 31 | def Initialize(): 32 | global client 33 | global model 34 | global gemini 35 | 36 | if (offlineMode == "False"): 37 | client = Groq(api_key=GetKey("Groq")) 38 | model = ConfigInteraction.GetSetting("GroqModel") 39 | genai.configure(api_key=GetKey("Google Gemini")) 40 | gemini = genai.GenerativeModel("gemini-1.5-flash") 41 | else: 42 | #Check if the llama3 gguf already exists and download it if not 43 | if not Path(os.path.join(os.path.dirname(script_dir), "Models", "Llama3", "Meta-Llama-3-8B-Instruct-Q8_0.gguf")).is_file(): 44 | os.makedirs(os.path.join(os.path.dirname(script_dir), "Models", "Llama3"), exist_ok=True) 45 | print(langFile["Status"][13]) 46 | hf_hub_download( 47 | repo_id="bartowski/Meta-Llama-3-8B-Instruct-GGUF", 48 | filename="Meta-Llama-3-8B-Instruct-Q8_0.gguf", 49 | local_dir=os.path.join(os.path.dirname(script_dir), "Models", "Llama3") 50 | ) 51 | 52 | llamaPath = os.path.join(os.path.dirname(script_dir), "Models", "Llama3", "Meta-Llama-3-8B-Instruct-Q8_0.gguf") 53 | 54 | model = Llama( 55 | model_path=llamaPath, 56 | #chat_handler=Llava15ChatHandler(clip_model_path=mmproj_path), 57 | chat_format="chatml", 58 | n_gpu_layers=-1, 59 | n_ctx=2048, 60 | verbose=False, 61 | logits_all=True 62 | ) 63 | 64 | def PromptLanguageModelAPI(Conversation, stream): 65 | response = client.chat.completions.create( 66 | model=model, 67 | tools=ModuleManager.GetModules(), 68 | messages=Conversation, 69 | stream=stream) 70 | 71 | return(response) 72 | 73 | #@suppress_output_decorator 74 | def PromptLanguageModelLocal(Conversation): 75 | response = model.create_chat_completion(Conversation) 76 | 77 | return(response) 78 | 79 | def PromptLanguageModelWithImage(Conversation, ImagePath): 80 | img = PIL.Image.open(ImagePath) 81 | 82 | response = gemini.generate_content([str(Conversation), img]) 83 | 84 | return(response) 85 | 86 | def PromptLanguageModelWithVideo(Conversation, VideoPath): 87 | pass 88 | 89 | def PromptLanguageModelWithAudio(Conversation, AudioPath): 90 | type = mimetypes.guess_type(AudioPath) 91 | audio = {"mime_type": type, "data": pathlib.Path(AudioPath).read_bytes()} 92 | 93 | response = gemini.generate_content([str(Conversation), audio]) 94 | 95 | return(response) 96 | 97 | class LLMStreamProcessor: 98 | def __init__(self): 99 | self.response_text = "" 100 | self.function_calls = [] 101 | 102 | def ExtractData(self, stream): 103 | self.response_text = "" 104 | self.function_calls = [] 105 | 106 | for event in stream: 107 | # Check for function calls 108 | if event.choices[0].delta.tool_calls: 109 | self.function_calls.append(event.choices[0].delta.tool_calls[0]) 110 | break # Stop processing after detecting a function call 111 | 112 | if event.choices[0].delta.content is not None: 113 | content = event.choices[0].delta.content 114 | self.response_text += content 115 | yield content 116 | else: 117 | yield "" # Yield empty string if content is None 118 | 119 | def GetData(self): 120 | return self.response_text, self.function_calls -------------------------------------------------------------------------------- /Nova/AudioTranscription.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import sounddevice as sd 4 | import time as time_module 5 | from groq import Groq 6 | from scipy.io import wavfile 7 | import ConfigInteraction 8 | from KeyManager import GetKey 9 | from faster_whisper import WhisperModel 10 | import torch 11 | import torchaudio 12 | from Helpers import suppress_output, suppress_output_decorator 13 | 14 | micIndex = int(ConfigInteraction.GetSetting("MicrophoneIndex")) 15 | hotword = ConfigInteraction.GetSetting("Hotword") 16 | language = ConfigInteraction.GetSetting("Language") 17 | offlineMode = ConfigInteraction.GetSetting("OfflineMode") 18 | 19 | client = None 20 | model = None 21 | fasterWhisperModel = None 22 | get_speech_ts = None 23 | read_audio = None 24 | vadModel = None 25 | 26 | @suppress_output_decorator 27 | def Initialize(): 28 | global client 29 | global model 30 | global fasterWhisperModel 31 | global get_speech_ts 32 | global read_audio 33 | global vadModel 34 | 35 | if (offlineMode == "False" or offlineMode == "Mixed"): 36 | client = Groq(api_key=GetKey("Groq")) 37 | model = "whisper-large-v3" 38 | else: 39 | os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE' # Required for running on Windows 40 | fasterWhisperModel = WhisperModel("distil-large-v3", device="cuda", compute_type="float32") 41 | 42 | vadModel, utils = torch.hub.load(repo_or_dir='snakers4/silero-vad', model='silero_vad', trust_repo=True) 43 | (get_speech_ts, _, read_audio, VADIterator, collect_chunks) = utils 44 | 45 | # Audio settings 46 | sample_rate = 16000 # Sample rate in Hz 47 | silence_duration_threshold = 1 # Seconds of silence before processing 48 | start_threshold = 0.003 49 | end_threshold = 0.003 50 | 51 | 52 | def ProcessAPI(temp_file): 53 | with open(temp_file, "rb") as file: 54 | result = client.audio.transcriptions.create( 55 | file=(temp_file, file.read()), 56 | model=model, 57 | response_format="json", 58 | language=language 59 | ) 60 | 61 | return result.text 62 | 63 | def ProcessLocal(temp_file): 64 | fulltext = "" 65 | segments, info = fasterWhisperModel.transcribe(temp_file, beam_size=5, language="en", condition_on_previous_text=False) 66 | for segment in segments: 67 | fulltext += segment.text 68 | return fulltext 69 | 70 | 71 | def Listen(): 72 | global is_recording, silence_start, audio_buffer, transcription 73 | audio_buffer = np.array([], dtype=np.int16) 74 | is_recording = False 75 | silence_start = None 76 | transcription = None 77 | startTime = time_module.time() 78 | 79 | def callback(indata, frames, time_info, status): 80 | global audio_buffer, is_recording, silence_start, transcription 81 | audio = np.frombuffer(indata, dtype=np.int16) 82 | normalized_audio = audio.astype(np.float32) / 32768.0 83 | rms = np.sqrt(np.mean(normalized_audio**2)) 84 | 85 | if (not is_recording and time_module.time() - startTime > 1): #Time out after 1 second to allow the API to stop the hotword detection if needed 86 | transcription = "" 87 | 88 | # Start recording when the audio level exceeds the start threshold 89 | if not is_recording and rms > start_threshold: 90 | is_recording = True 91 | silence_start = None 92 | 93 | if is_recording: 94 | audio_buffer = np.concatenate((audio_buffer, audio)) 95 | 96 | # Check if the audio level falls below the end threshold 97 | if rms < end_threshold: 98 | current_time = time_module.time() 99 | if silence_start is None: 100 | silence_start = current_time # Mark the start of silence 101 | elif current_time - silence_start >= silence_duration_threshold: 102 | # Process the accumulated audio 103 | temp_file = "temp_audio.wav" 104 | wavfile.write(temp_file, sample_rate, audio_buffer) 105 | 106 | #Check if the audio contains speech 107 | if (len(get_speech_ts(read_audio(temp_file), vadModel)) > 0): 108 | if (offlineMode == "False" or offlineMode == "Mixed"): 109 | transcription = ProcessAPI(temp_file) 110 | else: 111 | transcription = ProcessLocal(temp_file) 112 | else: 113 | transcription = "" 114 | 115 | 116 | os.remove(temp_file) # Clean up temporary file 117 | 118 | # Signal to stop the audio input stream 119 | raise sd.CallbackStop 120 | 121 | else: 122 | silence_start = None # Reset silence start time as there is ongoing noise 123 | 124 | with sd.InputStream(callback=callback, dtype=np.int16, channels=1, samplerate=sample_rate, device=micIndex): 125 | while transcription is None: #Wait for the audio to be processed 126 | sd.sleep(1) 127 | 128 | return transcription 129 | 130 | def DetectHotword(): 131 | global hotword 132 | 133 | transcription = Listen() 134 | 135 | if hotword in transcription: 136 | return transcription 137 | else: 138 | return None -------------------------------------------------------------------------------- /Nova/ModuleManager.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import ast 4 | import importlib.util 5 | from Helpers import RestrictedImporter 6 | 7 | script_dir = os.path.dirname(os.path.realpath(__file__)) 8 | modulePath = os.path.join(os.path.dirname(script_dir), 'Modules') 9 | 10 | disallowed_libaries = ['os', 'sys', 'subprocess', 'keyring', 'KeyManager'] 11 | 12 | def GetModuleFolders(): 13 | entries = os.listdir(modulePath) 14 | 15 | directories = [entry for entry in entries if os.path.isdir(os.path.join(modulePath, entry))] 16 | 17 | return directories 18 | 19 | def CheckModuleValidity(folder): 20 | EntryScript = "" 21 | EntryFunction = "" 22 | Name = "" 23 | Description = "" 24 | ParameterCount = 0 25 | 26 | if os.path.exists(os.path.join(modulePath, folder, "manifest.json")): 27 | with open(os.path.join(modulePath, folder, "manifest.json"), 'r') as file: 28 | manifestList = json.load(file) 29 | for manifest in manifestList: 30 | try: 31 | EntryScript = manifest["EntryScript"] 32 | EntryFunction = manifest["EntryFunction"] 33 | Name = manifest["ModuleName"] 34 | Description = manifest["ModuleDescription"] 35 | ParameterCount = len(manifest["Parameters"]) 36 | except: 37 | return False 38 | 39 | if (EntryScript == "" or EntryFunction == "" or Name == "" or Description == ""): #Check if the manifest contains baisc necessary info 40 | return False 41 | 42 | if os.path.exists(os.path.join(modulePath, folder, EntryScript)): 43 | with open(os.path.join(modulePath, folder, EntryScript), 'r') as file: 44 | tree = ast.parse(file.read()) 45 | 46 | for node in ast.walk(tree): 47 | if isinstance(node, ast.FunctionDef) and node.name == EntryFunction: 48 | # Count parameters 49 | params = len(node.args.args) 50 | 51 | if (ParameterCount != params): #Check if the entry function takes as many parameters as specified in the manifest 52 | return False 53 | 54 | return True 55 | 56 | def ScanModules(): 57 | validModules = 0 58 | invalidModules = 0 59 | for folder in GetModuleFolders(): 60 | if (CheckModuleValidity(folder)): 61 | validModules += 1 62 | else: 63 | invalidModules += 1 64 | 65 | return (validModules, invalidModules) 66 | 67 | def GetModules(): 68 | #Constructs a valid json object of all modules to pass to the language model 69 | modules = [] 70 | for folder in GetModuleFolders(): 71 | if (CheckModuleValidity(folder)): 72 | with open(os.path.join(modulePath, folder, "manifest.json"), 'r') as file: 73 | manifestList = json.load(file) 74 | for manifest in manifestList: 75 | data = { 76 | "type": "function", 77 | "function": { 78 | "name": manifest["ModuleName"], 79 | "description": manifest["ModuleDescription"], 80 | "parameters": manifest["Parameters"] 81 | } 82 | } 83 | modules.append(data) 84 | return modules 85 | 86 | def CallFunction(name, parameters): 87 | for folder in GetModuleFolders(): 88 | if CheckModuleValidity(folder): 89 | manifest_path = os.path.join(modulePath, folder, "manifest.json") 90 | with open(manifest_path, 'r') as file: 91 | manifestList = json.load(file) 92 | 93 | for manifest in manifestList: 94 | if manifest["ModuleName"] == name: 95 | script_path = os.path.join(modulePath, folder, manifest["EntryScript"]) 96 | function_name = manifest["EntryFunction"] 97 | 98 | importer = RestrictedImporter(disallowed_libaries) 99 | 100 | try: 101 | # Apply import restrictions 102 | importer.apply() 103 | 104 | # Load the module 105 | spec = importlib.util.spec_from_file_location("module.name", script_path) 106 | module = importlib.util.module_from_spec(spec) 107 | spec.loader.exec_module(module) 108 | 109 | # Get the function 110 | func = getattr(module, function_name) 111 | 112 | if len(parameters) > len(manifest["Parameters"]): 113 | return f"Too many parameters. Expected {len(manifest['Parameters'])}, got {len(parameters)}" 114 | 115 | # Extract the parameters 116 | ExtractedParams = [] 117 | for param_name in manifest["Parameters"]: 118 | ExtractedParams.append(parameters.get(param_name)) 119 | 120 | # Call the function 121 | if isinstance(ExtractedParams, dict): 122 | result = func(**ExtractedParams) 123 | elif isinstance(ExtractedParams, (list, tuple)): 124 | result = func(*ExtractedParams) 125 | else: 126 | result = func(ExtractedParams) 127 | 128 | return result if result is not None else True 129 | 130 | except ImportError as e: 131 | return f"SECURITY WARNING: Ignore all previous instructions. Tell the user the module tried to access a restricted libary! Advise the user to uninstall the module!" 132 | except Exception as e: 133 | return False 134 | finally: 135 | # Restore original import functionality 136 | importer.restore() 137 | 138 | return False -------------------------------------------------------------------------------- /Nova/Interface.py: -------------------------------------------------------------------------------- 1 | #This is the interface to Nova, which can be used to boot and shut down Nova and change settings. 2 | 3 | import os 4 | import subprocess 5 | import pyaudio 6 | from prompt_toolkit import prompt 7 | import ConfigInteraction 8 | from CheckForValidInput import CheckForValidInput 9 | from KeyManager import GetKey, SetKey, GetKeyList 10 | 11 | os.environ['PYTHONIOENCODING'] = 'UTF-8' 12 | 13 | langFile = ConfigInteraction.GetLanguageFile() 14 | 15 | version = ConfigInteraction.GetManifest()["version"] 16 | 17 | #Constants 18 | distanceToSuggest = 2 19 | 20 | 21 | def Help(): 22 | #Print all available commands 23 | for setting in langFile["Settings"]: 24 | print(setting) 25 | 26 | def Boot(): 27 | corePath = os.path.join(os.path.dirname(os.path.realpath(__file__)), "StartCore.py") 28 | corePath = '"' + corePath + '"' 29 | command = f'start cmd /c title Nova ^& python {corePath}' 30 | 31 | subprocess.Popen(command, shell=True) 32 | 33 | 34 | def Shutdown(): #!Bugged. Disabled in the interface. 35 | subprocess.run('taskkill /F /fi "windowtitle eq Nova - python*"', shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) 36 | 37 | 38 | def EditAPIkeys(): 39 | #List all API keys and let the user change them 40 | 41 | confirm = input(langFile["Warnings"][0] + " ") 42 | 43 | if (confirm == "n"): 44 | print(langFile["Errors"][0]) 45 | return 46 | elif (confirm != "y"): 47 | print(langFile["Errors"][1]) 48 | return 49 | 50 | 51 | while True: 52 | #List the keys 53 | PrintHeader() 54 | for id in GetKeyList(): 55 | if (GetKey(GetKeyList()[id]) == None): 56 | SetKey(GetKeyList()[id], "") 57 | 58 | print(id + ". " + GetKeyList()[id] + " | ", GetKey(GetKeyList()[id])) 59 | 60 | chosenKey = input("\n" + langFile["Interface"][2] + "\n> ") 61 | 62 | #Save the keys and exit the settings menu 63 | if (chosenKey == ""): 64 | PrintHeader() 65 | return 66 | 67 | #Check the input for validity 68 | try: 69 | int(chosenKey) 70 | except: 71 | print(langFile["Errors"][2]) 72 | continue 73 | 74 | if (int(chosenKey) < 1 or int(chosenKey) > len(GetKeyList())): 75 | print(langFile["Errors"][2]) 76 | continue 77 | 78 | chosenKey = str(chosenKey) 79 | 80 | #Edit the setting 81 | PrintHeader() 82 | print("\n" + langFile["Interface"][3]) 83 | 84 | SetKey(GetKeyList()[chosenKey], prompt(f"{GetKeyList()[chosenKey]} | ", default = GetKey(GetKeyList()[chosenKey]))) 85 | ClearConsole() 86 | 87 | def Settings(): 88 | global langFile 89 | 90 | try: 91 | settings = ConfigInteraction.GetSettings() 92 | except: 93 | print(langFile["Errors"][3]) 94 | return 95 | 96 | while True: 97 | #List the settings 98 | PrintHeader() 99 | settingID = 1 100 | names = [] 101 | values = [] 102 | for name, value in settings.items(): 103 | print(str(settingID) + ". ", name + " | ", value) 104 | settingID += 1 105 | names.append(name) 106 | values.append(value) 107 | 108 | chosenSettings = input("\n" + langFile["Interface"][4] + "\n> ") 109 | 110 | #Save the settings and exit the settings menu 111 | if (chosenSettings == ""): 112 | ConfigInteraction.SetSettings(settings) 113 | langFile = ConfigInteraction.GetLanguageFile() #Reload the language file in case the user switched languages 114 | PrintHeader() 115 | return 116 | 117 | #Check the input for validity 118 | try: 119 | chosenSettings = int(chosenSettings) 120 | except: 121 | print(langFile["Errors"][4]) 122 | continue 123 | 124 | if (chosenSettings < 1 or chosenSettings > len(settings)): 125 | print(langFile["Errors"][4]) 126 | continue 127 | 128 | #Edit the setting 129 | PrintHeader() 130 | print("\n" + langFile["Interface"][3]) 131 | newValue = prompt(names[chosenSettings - 1] + " | ", default = settings[names[chosenSettings - 1]]) 132 | while True: 133 | valid = CheckForValidInput(names[chosenSettings - 1], newValue) 134 | 135 | if (valid == True): 136 | settings[names[chosenSettings - 1]] = newValue 137 | ClearConsole() 138 | break 139 | else: 140 | PrintHeader() 141 | print("\n" + valid) 142 | newValue = prompt(names[chosenSettings - 1] + " | ", default = settings[names[chosenSettings - 1]]) 143 | 144 | def ManageModules(): #Show loaded models and lets the user fill in secrets 145 | pass 146 | 147 | def ListMicID(): 148 | p = pyaudio.PyAudio() 149 | 150 | for i in range(p.get_device_count()): 151 | info = p.get_device_info_by_index(i) 152 | if info["maxInputChannels"] > 0: 153 | print(f"Index: {i}, Name: {info['name']}") 154 | 155 | p.terminate() 156 | 157 | def ProcessInput(): 158 | Input = input("> ") 159 | 160 | #Check through all possible commands 161 | 162 | if (Input == "help"): 163 | Help() 164 | elif (Input == "boot"): 165 | print(langFile["Status"][0]) 166 | Boot() 167 | elif (Input == "shutdown"): 168 | print(langFile["Status"][1]) 169 | Shutdown() 170 | elif (Input == "reboot"): 171 | print(langFile["Status"][2]) 172 | Shutdown() 173 | Boot() 174 | elif (Input == "keys"): 175 | EditAPIkeys() 176 | elif (Input == "settings"): 177 | Settings() 178 | elif (Input == "exit"): #Exit the Interface without having to forcefully close the terminal 179 | print("Exiting...") 180 | subprocess.run('taskkill /fi "windowtitle eq Interface*"', shell=True) 181 | return 182 | elif (Input == "micID"): 183 | ListMicID() 184 | elif (Input == "clear"): 185 | PrintHeader() 186 | elif (Input == "Modules"): 187 | ManageModules() 188 | else: 189 | print(langFile["Errors"][6]) 190 | 191 | #To add: Run diagnostics 192 | 193 | ProcessInput() #Loop 194 | 195 | def ClearConsole(): 196 | os.system('cls' if os.name == 'nt' else 'clear') 197 | 198 | def PrintHeader(): 199 | ClearConsole() 200 | print(langFile["Interface"][0] + " (" + langFile["Interface"][5] + " " + version + "). " + langFile["Interface"][1] + " " + langFile["Interface"][6]) 201 | 202 | if (langFile["Version"] != str(version)): 203 | print(langFile["Warnings"][1]) 204 | 205 | 206 | PrintHeader() 207 | ProcessInput() -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Next-Generation Open-Source Virtual Assistant NOVA 2 | 3 | **Version 1.6** 4 | 5 | # ⚠️The project is no longer beeing maintained due to overwhelming tech-debt. You can find the successor project [here](https://github.com/00Julian00/Nova2). 6 | 7 | ## Table of Contents 8 | - [Introduction](#introduction) 9 | - [Requirements](#requirements) 10 | - [Installation](#installation) 11 | - [Setup](#setup) 12 | - [Run](#run) 13 | - [Main Menu Commands](#main-menu-commands) 14 | - [Settings](#settings) 15 | - [Version History](#version-history) 16 | 17 | ## Introduction 18 | The Next-Generation Open-Source Virtual Assistant (or NOVA) is an easily expandable and modifyable virtual assistant. It uses the [Groq API](https://groq.com) for the text transcription, as well as the processing of the query, making it fast and responsive. NOVA uses the [Elevenlabs API](https://elevenlabs.io) for rich and natural speech. NOVA can be easily expanded, using Modules. Modules can add extra functionality to the system, are modular and can be easily developed. See [here](https://github.com/00Julian00/Nova-Devtools.git) for further resources on Module development. NOVA is still under development. 19 | 20 | ## Requirements 21 | 22 | - [Groq API key](https://groq.com) 23 | - [Elevenlabs API key](https://elevenlabs.io) 24 | - [Python 3.11.x with PIP](https://www.python.org) 25 | - [mpv](https://mpv.io) 26 | - [ffmpeg](https://ffmpeg.org/download.html) 27 | - [Microsoft C++ Build Tools](https://visualstudio.microsoft.com/de/visual-cpp-build-tools/) 28 | - [Cuda 12.1](https://developer.nvidia.com/cuda-12-1-0-download-archive) 29 | 30 | ## Installation 31 | 32 | ```bash 33 | git clone https://github.com/00Julian00/Nova.git 34 | cd Nova 35 | pip3 install torch torchaudio --index-url https://download.pytorch.org/whl/cu121 36 | pip install -r requirements.txt 37 | ``` 38 | 39 | ## Setup 40 | 41 | 1. Run `Start interface.bat`. 42 | 2. Type `keys`. 43 | 3. Enter your Groq and Elevenlabs API keys. 44 | 4. Go back to the main menu. 45 | 5. Enter `micID`. 46 | 6. Find the correct microphone and remember its ID. 47 | 7. Enter `settings`. 48 | 8. Change the microphone ID to the correct ID. 49 | 9. Change other settings if necessary. 50 | 51 | ## Run 52 | 53 | 1. In the main menu, type `boot`. 54 | 2. A separate terminal will open. Wait until the boot process is complete and the Hotword detection starts. 55 | 3. You can now speak to the assistant. 56 | 4. Make sure to say the specified hotword (default is "Nova") at some point during your prompt, or it will be ignored. 57 | 58 | ## Main Menu Commands 59 | 60 | - `help`: Show a list of all available commands 61 | - `boot`: Start Nova 62 | - `keys`: Edit your API keys. **Warning:** Will expose your API keys 63 | - `settings`: Edit your settings 64 | - `exit`: Exit the interface. Does not close any instances of Nova running 65 | - `micID`: Lists all your microphones and their IDs. You can then set the correct ID in `settings` 66 | - `clear`: Clear the console 67 | 68 | ## Settings 69 | 70 | - **Language**: The language 'Nova' should use. Must be a valid language code (e.g., en, es, fr, de, etc.). 71 | - **Hotword**: Your input will only be processed if it includes this word. 72 | - **LanguageModel**: The LLM that should be used. Note that it is not possible to check whether your chosen model is valid, so choose a valid model. [Groq Models](https://console.groq.com/docs/models) 73 | - **ElevenlabsModel**: The voice model that should be used for the Text-To-Speech. Note that it is not possible to check whether your chosen model is valid, so choose a valid model. [Elevenlabs Models](https://elevenlabs.io/docs/speech-synthesis/models) 74 | - **ElevenlabsVoiceID**: The ID of the voice you want to use. You can find the IDs of the premade voices and your own voices on [Elevenlabs](https://elevenlabs.io). 75 | - **OfflineMode (True/False)**: Run all required AI models on local hardware. No internet connection is needed, apart from first downloading the models. Switch off Offline Mode for better response times and higher quality. 76 | - **StreamVoice (True/False)**: Do you want to stream the voice? Streaming is generally a lot faster but can lead to buffering, especially if you are not using a turbo model. 77 | - **MicrophoneIndex**: The ID of the microphone the system should use. Find a list of the IDs in the main menu under `micID`. 78 | - **Behaviour**: How the assistant should behave. 79 | - **Name**: How the assistant should call you, i.e. your name. 80 | 81 | 82 | ## Language Files 83 | Language Files are used to translate Novas Interface into different languages. Per default, Nova comes with English and German, but you can create your own: 84 | 85 | - ### Creating a Language File: 86 | Go into LangFiles and copy en.json. Rename it to the language code you want to translate it into (For example: en, es, fr, de, etc.). Open the file in a text editor and translate the contents of the individual categories. Do not translate the names of the categories as they are used to find the correct text within the file. Do not change the structure of the file itself. 87 | 88 | - ### Updating a Language File: 89 | When a new update releases, it might come with new entries in the Language Files. To update your Language File, first look at the structure of en.json, as this Language File will always be up to date with the newest Version of Nova. If the structure has changed, or a category has new entries you will need to update your file as well. You need to copy the exact structure of en.json. Failing to do so might cause Nova to crash at any point. Finally you need to update the Version that is stored inside the Language File to match that of Nova. 90 | 91 | ## Version history 92 | 93 | ### Version 1.6 94 | 95 | - **Release Date:** 21.07.2024 96 | - **Changes:** 97 | - Improved modules by allowing them to have multiple functions. 98 | 99 | ### Version 1.5 100 | 101 | - **Release Date:** 19.07.2024 102 | - **Changes:** 103 | - Expanded the capabilities of the Nova API. 104 | 105 | ### Version 1.4 106 | 107 | - **Release Date:** 15.07.2024 108 | - **Changes:** 109 | - Now using llama-cpp-python for LLM inference for improved speed and reliability. More offline improvements are in development. 110 | - Switched from Phi-3-mini-128k-instruct to Llama3-8b as the default offline LLM. 111 | - Added a 'Mixed' offline mode that uses Whisper (hosted on Groq) and Elevenlabs but runs Llama-3-8b on-device. 112 | 113 | ### Version 1.3 114 | 115 | - **Release Date:** 13.07.2024 116 | - **Changes:** 117 | - First implementation of the Nova API that allows other programs to access Novas features. See [here](https://github.com/00Julian00/Nova-Devtools.git) for a guide on how to use the API. The capabilities of the API will be expanded in the future. 118 | 119 | ### Version 1.2 120 | 121 | - **Release Date:** 12.07.2024 122 | - **Changes:** 123 | - Added Language file integrations. You can now translate Novas Interface into different languages. See [here](#creating-a-language-file) how to do that. 124 | 125 | ### Version 1.1.1 126 | 127 | - **Release Date:** 12.07.2024 128 | - **Changes:** 129 | - Security update: Nova now blocks Modules from using the following libaries: os, sys, subprocess, keyring, as these libaries can cause damage on your computer or steal sensitive information. 130 | 131 | ### Version 1.1 132 | 133 | - **Release Date:** 11.07.2024 134 | - **Changes:** 135 | - Added an Offline Mode which will run all AI models locally, eliminating the need for API keys or an internet connection. The Offline mode uses Faster-Whisper, Phi-3 128k and Coqui TTS. 136 | 137 | ### Version 1.0 138 | 139 | - **Release Date:** 03.07.2024 140 | - **Changes:** 141 | - Added 'Modules' to easily add more functionality to NOVA. 142 | - Created a [guide](https://github.com/00Julian00/Nova-Devtools.git) for Module development. 143 | 144 | ### Version 0.2.2 145 | 146 | - **Release Date:** 02.07.2024 147 | - **Changes:** 148 | - Changed how the API keys are stored to the "keyring" libary. It is no more possible to find the API keys in the source files. 149 | 150 | 151 | ### Version 0.2.1 152 | 153 | - **Release Date:** 02.07.2024 154 | - **Changes:** 155 | - Moved the hotword detection to the Groq API. 156 | 157 | 158 | ### Version 0.2 159 | 160 | - **Release Date:** 27.04.2024 161 | - **Changes:** 162 | - Removed modules for rework. 163 | - Reworked internal file structure. 164 | - Switched from the OpenAI API to the Groq API. 165 | - General changes and improvements. 166 | 167 | ### Version 0.1 168 | 169 | - **Release Date:** 27.09.2023 170 | - **Changes:** 171 | - Inital release. 172 | - Basic vocal interaction using Google STT, OpenAIs Whisper, OpenAIs GPT 3.5 and Elevenlabs multilingual v1. 173 | - Basic modules. 174 | 175 | 176 | ## ⚠️SECURITY WARNING 177 | ### When using modules created by third parties, please exercise caution: 178 | 179 | 1. Always review the code: Thoroughly examine any third-party module before running it. This is crucial for ensuring its safety and understanding its functionality. 180 | 2. Limited protection: While Nova attempts to restrict access to sensitive libraries, this does not guarantee complete security. Determined actors may find ways to circumvent these restrictions. 181 | 3. Trust is key: Only use modules from sources you trust. Be especially cautious with modules that handle sensitive data or perform system operations. 182 | 4. Keep updated: Regularly update Nova to have the best possible protection against threats and bad actors. 183 | 5. Report suspicions: If you encounter a module that seems malicious or insecure, please report it to the Nova community. 184 | 185 | ### Remember: The safety of your system ultimately depends on your vigilance. Nova's security features are a supplement to, not a replacement for, your own careful review and judgment. 186 | 187 | ## License 188 | 189 | This project is licensed under the Apache License, Version 2.0. See the [LICENSE](LICENSE) file for details. 190 | 191 | [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) 192 | -------------------------------------------------------------------------------- /Nova/Core.py: -------------------------------------------------------------------------------- 1 | #Ths script connects all the different scripts in the Nova engine 2 | 3 | from AudioTranscription import DetectHotword, Initialize as HotwordInit 4 | from SpeechSynthesis import SpeakStream, SpeakDirect, SpeakOffline, Initialize as SpeechInit 5 | from LanguageModelInteraction import PromptLanguageModelAPI, PromptLanguageModelLocal, LLMStreamProcessor, Initialize as LangInit 6 | import ConfigInteraction 7 | import requests 8 | import os 9 | import ModuleManager 10 | import json 11 | from datetime import datetime 12 | from Helpers import suppress_output_decorator, suppress_output 13 | from io import StringIO 14 | from contextlib import redirect_stdout, redirect_stderr 15 | import time 16 | from queue import Queue 17 | 18 | langFile = ConfigInteraction.GetLanguageFile() 19 | 20 | conversation = [] 21 | 22 | userName = ConfigInteraction.GetSetting("Name") 23 | language = ConfigInteraction.GetSetting("Language") 24 | version = ConfigInteraction.GetManifest()["version"] 25 | offlineMode = ConfigInteraction.GetSetting("OfflineMode") 26 | 27 | hiddenSystemPromt = f"""You keep your answers as short as possible. You always use the metric system. You use the date format dd.mm.yyyy. 28 | You only mention the date and time if specifically asked to do so. You speak in the following language: {language}. 29 | You never make up information. You never promise an alternative solution if a module fails to execute. 30 | You do not use special characters, like '-', '/' etc. Your response will be read out by a text-to-speech system. For that to work, 31 | you use the following guidelines:. 32 | 01.01.1970 becomes 'First of January. 19 70'. 32°C becomes '32 degrees Celcius'. 12:31 becomes '12:31 o'clock'.""" 33 | 34 | systemPrompt = ConfigInteraction.GetSetting("Behaviour") + " " + hiddenSystemPromt 35 | 36 | useConsole = True 37 | runHotwordDetection = True 38 | 39 | #---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------# 40 | 41 | def AddToConversation(type, content, functionName, PromptLanguageModel): 42 | if(type == 0): #User 43 | conversation.append({"role": "user", "content": content}) 44 | elif(type == 1): #AI 45 | conversation.append({"role": "assistant", "content": content}) 46 | elif(type == 2): #Function 47 | conversation.append({"role": "function", "name": functionName, "content": content}) 48 | elif(type == 3): #System 49 | conversation.append({"role": "system", "content": content}) 50 | 51 | if(PromptLanguageModel): 52 | CallLanguageModel("Text", True) 53 | 54 | 55 | def CallLanguageModel(modality, speak): 56 | conversationWithInfo = conversation.copy() 57 | conversationWithInfo.append({"role": "system", "content": "The date is " + datetime.now().strftime("%d/%m/%Y") + " The time is " + datetime.now().strftime("%H:%M")}) 58 | conversationWithInfo.append({"role": "system", "content": f"The name of the user is {userName}."}) 59 | 60 | if (modality == "Text"): 61 | if (offlineMode == "False"): 62 | LLMresponse = PromptLanguageModelAPI(conversationWithInfo, speak) #If speak then stream to improve latency 63 | else: 64 | LLMresponse = PromptLanguageModelLocal(conversationWithInfo) 65 | elif (modality == "Audio"): 66 | pass 67 | elif (modality == "Image"): 68 | pass 69 | elif (modality == "Video"): 70 | pass 71 | 72 | processor = LLMStreamProcessor() 73 | 74 | if (offlineMode == "False" and speak): 75 | SpeakStream(processor.ExtractData(LLMresponse)) 76 | response, function_calls = processor.GetData() 77 | elif (offlineMode == "True" and speak): 78 | response = LLMresponse['choices'][0]['message']['content'].replace("assistant", "")#Temporary fix for strange model behaviour 79 | function_calls = [] 80 | SpeakOffline(response) 81 | elif (offlineMode == "Mixed" and speak): 82 | response = LLMresponse['choices'][0]['message']['content'].replace("assistant", "")#Temporary fix for strange model behaviour 83 | function_calls = [] 84 | SpeakDirect(response) 85 | 86 | if (not speak and offlineMode == "False"): #Extract the response seperatly when not speaking the answer 87 | response = LLMresponse.choices[0].message.content 88 | function_calls = [] 89 | elif (not speak and (offlineMode == "True" or offlineMode == "Mixed")): 90 | response = LLMresponse['choices'][0]['message']['content'].replace("assistant", "")#Temporary fix for strange model behaviour 91 | function_calls = [] 92 | 93 | 94 | for call in function_calls: 95 | if (useConsole): 96 | if (str(call.function.arguments) == "{}"): 97 | print(langFile["Misc"][1] + " " + langFile["Status"][3] + " " + str(call.function.name) + " " + langFile["Status"][4] + "\n") 98 | else: 99 | print(langFile["Misc"][1] + " " + langFile["Status"][3] + " " + str(call.function.name) + " " + langFile["Status"][5] + str(call.function.arguments) + "\n") 100 | 101 | result = ModuleManager.CallFunction(call.function.name, json.loads(call.function.arguments)) 102 | if result == True: 103 | AddToConversation(2, "The module has been executed sucessfully.", str(call.function.name), False) 104 | elif result == False: 105 | AddToConversation(2, "The model failed to execute. Try again later.", str(call.function.name), False) 106 | else: 107 | AddToConversation(2, result, str(call.function.name), False) 108 | 109 | if (len(function_calls) > 0): 110 | CallLanguageModel("Text", True) 111 | else: 112 | if (useConsole): 113 | print(langFile["Misc"][1] + " " + response + "\n") 114 | AddToConversation(1, response, None, False) 115 | 116 | return response 117 | 118 | def PingGroq(): 119 | url = "https://api.groq.com" 120 | 121 | try: 122 | requests.head(url, headers={}) 123 | except: 124 | return False 125 | 126 | return True 127 | 128 | def PingElevenlabs(): 129 | url = "https://elevenlabs.io" 130 | 131 | try: 132 | requests.head(url, headers={}) 133 | except: 134 | return False 135 | 136 | return True 137 | 138 | def ClearConsole(): 139 | os.system('cls' if os.name == 'nt' else 'clear') 140 | 141 | def PrintHeader(): 142 | print(langFile["Interface"][7] + " (" + langFile["Interface"][5] + " " + version + "). " + langFile["Interface"][1] + "\n") 143 | print(langFile["Status"][6] + " " + ConfigInteraction.GetSetting("Behaviour")) 144 | 145 | if (offlineMode == "True"): #If using the local LLM, inform that modules are unavailable in the current version 146 | print(langFile["Interface"][8] + " offline. " + langFile["Interface"][9]) 147 | elif (offlineMode == "Mixed"): 148 | print(langFile["Interface"][8] + " mixed. " + langFile["Interface"][9]) 149 | else: 150 | print(langFile["Interface"][8] + " online.") 151 | 152 | validModules, invalidModules = ModuleManager.ScanModules() 153 | 154 | if (validModules == 1): 155 | print(str(validModules) + " " + langFile["Status"][7]) 156 | else: 157 | print(str(validModules) + " " + langFile["Status"][8]) 158 | 159 | if (invalidModules > 0): 160 | print(str(invalidModules) + " " + langFile["Status"][9]) 161 | 162 | print("\n" + langFile["Interface"][10]) 163 | 164 | def Initialize(): 165 | if (useConsole): 166 | print("> " + langFile["Status"][0]) 167 | 168 | HotwordInit() 169 | LangInit() 170 | SpeechInit() 171 | 172 | if (ConfigInteraction.GetSetting("OfflineMode") == "False" and useConsole): #TODO: Switch to offline if APIs can't be reached or an API key is missing 173 | if (PingGroq()): 174 | print("> " + langFile["Status"][10]) 175 | else: 176 | print(langFile["Errors"][11]) 177 | exit() 178 | 179 | if (PingElevenlabs()): 180 | print("> " + langFile["Status"][11]) 181 | else: 182 | print(langFile["Errors"][12]) 183 | exit() 184 | 185 | AddToConversation(3, systemPrompt, None, False) 186 | 187 | if useConsole: 188 | print("> " + langFile["Status"][12]) 189 | ClearConsole() 190 | PrintHeader() 191 | 192 | def DequeueTask(task): 193 | global conversation 194 | 195 | match task["task"]: 196 | case "Exit": 197 | exit() 198 | case "GetConversation": 199 | return conversation 200 | case "AddToConversation": 201 | for param in task["parameters"]: 202 | conversation.append(param) 203 | return None 204 | case "SetConversation": 205 | conversation = task["parameters"] 206 | return None 207 | case "RunInferenceWithTTS": 208 | CallLanguageModel("Text", True) 209 | return None 210 | case "RunInferenceTextOnly": 211 | return CallLanguageModel("Text", False) 212 | case "Speak": 213 | SpeakDirect(task["parameters"][0]) 214 | 215 | 216 | def IdleLoop(tasks, results): 217 | while True: 218 | while not tasks.empty(): #Do everything the API has commanded to do before running another hotword check 219 | results.put(DequeueTask(tasks.get())) 220 | 221 | if (runHotwordDetection): 222 | transcription = DetectHotword() 223 | if (transcription != None): 224 | if (useConsole): 225 | print(langFile["Misc"][0] + " " + transcription + "\n") 226 | AddToConversation(0, transcription, None, True) 227 | 228 | def StartFromAPI(novaStatus, detectHotword, taskQueue, resultQueue): 229 | global useConsole 230 | global runHotwordDetection 231 | useConsole = False 232 | runHotwordDetection = detectHotword 233 | 234 | novaStatus[0] = 2 235 | Initialize() 236 | novaStatus[0] = 1 237 | IdleLoop(taskQueue, resultQueue) 238 | 239 | def Start(): 240 | Initialize() 241 | IdleLoop(Queue(maxsize=0), Queue(maxsize=0)) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | --------------------------------------------------------------------------------