├── .gitattributes ├── .gitignore ├── .idea ├── .gitignore ├── Jarvis_AI.iml ├── inspectionProfiles │ └── profiles_settings.xml ├── misc.xml ├── modules.xml └── vcs.xml ├── JarvisAI ├── JarvisAI │ ├── __init__.py │ ├── actions.json │ ├── brain │ │ ├── __init__.py │ │ ├── auth.py │ │ ├── chatbot_premium.py │ │ ├── intent_classification.py │ │ └── ner.py │ ├── features │ │ ├── __init__.py │ │ ├── click_photo.py │ │ ├── covid_cases.py │ │ ├── date_time.py │ │ ├── games.py │ │ ├── greet.py │ │ ├── iambored.py │ │ ├── internet_speed_test.py │ │ ├── joke.py │ │ ├── news.py │ │ ├── places_near_me.py │ │ ├── screenshot.py │ │ ├── send_email.py │ │ ├── tell_me_about.py │ │ ├── volume_controller.py │ │ ├── weather.py │ │ ├── website_open.py │ │ ├── whatsapp_message.py │ │ ├── youtube_play.py │ │ └── youtube_video_downloader.py │ ├── features_manager.py │ └── utils │ │ ├── __init__.py │ │ ├── input_output.py │ │ ├── speech_to_text │ │ ├── __init__.py │ │ ├── speech_to_text.py │ │ └── speech_to_text_whisper.py │ │ └── text_to_speech │ │ ├── __init__.py │ │ └── text_to_speech.py ├── License.txt ├── MANIFEST.in ├── README for JarvisAI 4.3 and below.md ├── README.md ├── TODO ├── __init__.py └── setup.py ├── License.txt ├── README.md ├── cmd_twine.txt └── requirements.txt /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | manager.py 30 | manager_code.py 31 | 32 | # PyInstaller 33 | # Usually these files are written by a python script from a template 34 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 35 | *.manifest 36 | *.spec 37 | 38 | # Installer logs 39 | pip-log.txt 40 | pip-delete-this-directory.txt 41 | 42 | # Unit test / coverage reports 43 | htmlcov/ 44 | .tox/ 45 | .nox/ 46 | .coverage 47 | .coverage.* 48 | .cache 49 | nosetests.xml 50 | coverage.xml 51 | *.cover 52 | *.py,cover 53 | .hypothesis/ 54 | .pytest_cache/ 55 | 56 | # Translations 57 | *.mo 58 | *.pot 59 | 60 | # Django stuff: 61 | *.log 62 | local_settings.py 63 | db.sqlite3 64 | db.sqlite3-journal 65 | 66 | # Flask stuff: 67 | instance/ 68 | .webassets-cache 69 | 70 | # Scrapy stuff: 71 | .scrapy 72 | 73 | # Sphinx documentation 74 | docs/_build/ 75 | 76 | # PyBuilder 77 | target/ 78 | 79 | # Jupyter Notebook 80 | .ipynb_checkpoints 81 | 82 | # IPython 83 | profile_default/ 84 | ipython_config.py 85 | 86 | # pyenv 87 | .python-version 88 | 89 | # pipenv 90 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 91 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 92 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 93 | # install all needed dependencies. 94 | #Pipfile.lock 95 | 96 | # celery beat schedule file 97 | celerybeat-schedule 98 | 99 | # SageMath parsed files 100 | *.sage.py 101 | 102 | # Environments 103 | .env 104 | .venv 105 | env/ 106 | venv/ 107 | ENV/ 108 | env.bak/ 109 | venv.bak/ 110 | 111 | # Spyder project settings 112 | .spyderproject 113 | .spyproject 114 | 115 | # Rope project settings 116 | .ropeproject 117 | 118 | # mkdocs documentation 119 | /site 120 | 121 | # mypy 122 | .mypy_cache/ 123 | .dmypy.json 124 | dmypy.json 125 | 126 | # Pyre type checker 127 | .pyre/ 128 | JarvisAI/JarvisAI/user_configs/speech_engine.txt 129 | JarvisAI/JarvisAI/user_configs/api_key.txt 130 | JarvisAI/.idea/misc.xml 131 | JarvisAI/.idea/JarvisAI.iml 132 | *.iml 133 | *.iml 134 | *.xml 135 | *.iml 136 | *.iml 137 | -------------------------------------------------------------------------------- /.idea/.gitignore: -------------------------------------------------------------------------------- 1 | # Default ignored files 2 | /shelf/ 3 | /workspace.xml 4 | -------------------------------------------------------------------------------- /.idea/Jarvis_AI.iml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 14 | -------------------------------------------------------------------------------- /.idea/inspectionProfiles/profiles_settings.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 6 | -------------------------------------------------------------------------------- /.idea/misc.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 7 | -------------------------------------------------------------------------------- /.idea/modules.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /.idea/vcs.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import logging 4 | import requests 5 | 6 | try: 7 | from utils import input_output 8 | from brain import intent_classification 9 | from brain import ner 10 | from features_manager import action_map 11 | from brain.auth import verify_user 12 | except: 13 | from JarvisAI.utils import input_output 14 | from JarvisAI.brain import intent_classification 15 | from JarvisAI.brain import ner 16 | from JarvisAI.features_manager import action_map 17 | from JarvisAI.brain.auth import verify_user 18 | 19 | 20 | def action_handler(intent, query, api_key): 21 | intent = intent.replace("_", " ") 22 | if intent in action_map: 23 | logging.info(f"Intent {intent} matched. Calling action {action_map[intent]}") 24 | entities = ner.perform_ner(query=query) 25 | try: 26 | ans = action_map[intent](query=query, intent=intent, entities=entities, input_output_fun=input_output, 27 | api_key=api_key) 28 | if ans == "Entity not found": 29 | intent = "others" 30 | print( 31 | "Entity not found, calling action for intent 'others'. If you are not upgraded to our premium plan, " 32 | "please upgrade to use this feature.") 33 | ans = action_map[intent](query=query, intent=intent, entities=entities, input_output_fun=input_output, 34 | api_key=api_key) 35 | return ans 36 | return ans 37 | except Exception as e: 38 | ans = action_map[intent](query=query, intent=intent, entities=entities, input_output_fun=input_output, 39 | api_key=api_key) 40 | if ans == "Entity not found": 41 | intent = "others" 42 | print( 43 | "Entity not found, calling action for intent 'others'. If you are not upgraded to our premium plan, " 44 | "please upgrade to use this feature.") 45 | ans = action_map[intent](query=query, intent=intent, entities=entities, input_output_fun=input_output, 46 | api_key=api_key) 47 | return ans 48 | return ans 49 | else: 50 | logging.info(f"Intent {intent} not found in action map.") 51 | return "Sorry, I don't know how to handle this intent." 52 | 53 | 54 | # function to add new actions to the actions_map dictionary 55 | def add_action(intent: str, action: object): 56 | """Add a new action to the action map. 57 | @param intent: (String) The intent to be mapped to the action. 58 | @param action: (Object) The function to call when the intent is matched. 59 | @return: (String) The message to be displayed to the user. 60 | """ 61 | try: 62 | intent = intent.replace("_", " ") 63 | action_map[intent] = action 64 | 65 | if not os.path.exists('actions.json'): 66 | url = "https://raw.githubusercontent.com/Dipeshpal/Jarvis_AI/master/JarvisAI/JarvisAI/actions.json" 67 | data = requests.get(url).json() 68 | with open('actions.json', 'w') as f: 69 | json.dump(data, f) 70 | 71 | with open("actions.json", "r") as f: 72 | actions = json.load(f) 73 | 74 | # Check for duplicates 75 | duplicates = [i for i, act in enumerate(actions) if act["intent"] == intent] 76 | if len(duplicates) > 1: 77 | for i in duplicates[1:]: 78 | actions.pop(i) 79 | logging.warning(f"Duplicate actions found for intent {intent}. Only the first action will be used.") 80 | print(f"Found {len(duplicates)} duplicates for intent '{intent}'. Deleted all but the first.") 81 | 82 | # Check if the action already exists 83 | action_exists = False 84 | for i, act in enumerate(actions): 85 | if act["intent"] == intent: 86 | action_exists = True 87 | action_index = i 88 | break 89 | 90 | if action_exists: 91 | print(f"Previous examples for intent '{intent}': {actions[action_index]['example']}") 92 | overwrite = input(f"Intent '{intent}' already exists. Do you want to overwrite? (y/n)") 93 | if overwrite.lower() == "y": 94 | num_examples = int(input("How many examples do you want to add? (maximum 3): ")) 95 | examples = [] 96 | for i in range(num_examples): 97 | examples.append(input(f"Enter example {i + 1} for intent {intent}: ")) 98 | actions[action_index] = { 99 | "intent": intent, 100 | "example": examples 101 | } 102 | else: 103 | return "Intent not overwritten." 104 | else: 105 | num_examples = int(input("How many examples do you want to add? (maximum 10): ")) 106 | examples = [] 107 | for i in range(num_examples): 108 | examples.append(input(f"Enter example {i + 1} for intent {intent}: ")) 109 | actions.append({ 110 | "intent": intent, 111 | "example": examples 112 | }) 113 | 114 | # Write the updated actions back to the file 115 | with open("actions.json", "w") as f: 116 | json.dump(actions, f, indent=4) 117 | 118 | logging.info(f"Action for intent {intent} has been added/updated. Train new model to use this action.") 119 | print(f"Action for intent {intent} has been added/updated. Train new model to use this action.") 120 | return f"Action for intent {intent} has been added/updated. Train new model to use this action." 121 | except Exception as e: 122 | logging.error(f"Error adding action: {e}") 123 | raise f"Error adding action: {e}" 124 | 125 | 126 | class JarvisAI(input_output.JarvisInputOutput): 127 | def __init__(self, input_mechanism='text', output_mechanism='text', 128 | google_speech_api_key=None, backend_tts_api='pyttsx3', 129 | use_whisper_asr=False, display_logs=False, 130 | api_key=None): 131 | super().__init__(input_mechanism=input_mechanism, output_mechanism=output_mechanism, 132 | google_speech_api_key=google_speech_api_key, backend_tts_api=backend_tts_api, 133 | use_whisper_asr=use_whisper_asr, duration_listening=5, display_logs=display_logs, 134 | api_key=api_key) 135 | 136 | if os.path.exists('jarvis.log'): 137 | os.remove('jarvis.log') 138 | 139 | self.display_logs = display_logs 140 | if not self.display_logs: 141 | logging.basicConfig(filename='jarvis.log', level=logging.DEBUG) 142 | else: 143 | logging.basicConfig(level=logging.DEBUG) 144 | 145 | if not os.path.exists('actions.json'): 146 | url = "https://raw.githubusercontent.com/Dipeshpal/Jarvis_AI/master/JarvisAI/JarvisAI/actions.json" 147 | data = requests.get(url).json() 148 | with open('actions.json', 'w') as f: 149 | json.dump(data, f) 150 | 151 | def handle_input(self): 152 | try: 153 | if self.input_mechanism == 'text': 154 | return self.text_input() 155 | elif self.input_mechanism == 'voice': 156 | return self.voice_input() 157 | else: 158 | available_input_mechanisms = ['text', 'voice'] 159 | logging.error(f"Invalid input mechanism: {self.input_mechanism}. Available input mechanisms are: " 160 | f"{available_input_mechanisms}") 161 | raise Exception("Invalid input mechanism. Available input mechanisms are: {available_input_mechanisms}") 162 | except Exception as e: 163 | logging.exception(f"An error occurred while handling input. Error: {e}") 164 | return f"An error occurred while handling input. Error: {e}" 165 | 166 | def handle_output(self, text): 167 | try: 168 | if self.output_mechanism == 'text': 169 | self.text_output(text) 170 | elif self.output_mechanism == 'voice': 171 | self.voice_output(text=text) 172 | elif self.output_mechanism == 'both': 173 | self.text_output(text) 174 | self.voice_output(text=text) 175 | else: 176 | available_output_mechanisms = ['text', 'voice', 'both'] 177 | logging.error(f"Invalid output mechanism: {self.output_mechanism}. Available output mechanisms are: " 178 | f"{available_output_mechanisms}") 179 | raise f"Invalid output mechanism: {self.output_mechanism}. Available output mechanisms are: " \ 180 | f"{available_output_mechanisms}" 181 | except Exception as e: 182 | logging.exception(f"An error occurred while handling output. Error: {e}") 183 | self.handle_output(f"An error occurred while handling output. Error: {e}") 184 | 185 | def take_action(self, intent, query): 186 | try: 187 | # if not os.path.exists('actions.json'): 188 | # # TODO: Add a default actions.json file 189 | # logging.error("actions.json file not found.") 190 | # return "actions.json file not found." 191 | # 192 | # # load the JSON file containing the list of available actions and their respective commands 193 | # with open('actions.json', 'r') as f: 194 | # actions = json.load(f) 195 | 196 | # check if the intent matches any of the available actions 197 | # for action in actions: 198 | # intent, _ = intent_classification.classify_intent(secret_key=self.api_key, text=query) 199 | # if the intent matches, do the action 200 | try: 201 | return action_handler(intent, query, self.api_key) 202 | except Exception as e: 203 | logging.exception(f"An error occurred while performing action. Error: {e}") 204 | self.handle_output(f"An error occurred while performing action. Error: {e}") 205 | 206 | print(f"Intent {intent} not found in actions.json.") 207 | # if no action is matched, return a default message 208 | return "Sorry, I don't know how to handle this intent." 209 | except Exception as e: 210 | logging.exception(f"An error occurred while taking action. Error: {e}") 211 | return f"An error occurred while taking action. Error: {e}" 212 | 213 | # don't change this function, do not try to remove verify_user otherwise Jarvis will not work 214 | @verify_user 215 | def start(self): 216 | while True: 217 | try: 218 | query = self.handle_input() 219 | if query == "" or query is None: 220 | continue 221 | if query == 'exit': 222 | self.handle_output("Exiting...") 223 | break 224 | else: 225 | # NOTE: The query is passed to the intent classification function to get the intent 226 | intent, _ = intent_classification.classify_intent(secret_key=self.api_key, text=query) 227 | # print(f"Intent: {intent}") 228 | # intent = intent.replace("_", " ") 229 | # PATCH BELOW for date and time if-else 230 | if 'time' in intent.replace("_", " "): 231 | print(f"Intent: date / {intent}") 232 | else: 233 | print(f"Intent: {intent}") 234 | logging.debug(f"Input: {query}, Intent: {intent}") 235 | response = self.take_action(intent, query) 236 | self.handle_output(response) 237 | except Exception as e: 238 | logging.exception(f"An error occurred while running Jarvis. Error: {e}") 239 | self.handle_output(f"An error occurred while running Jarvis. Error: {e}") 240 | raise f"An error occurred while running Jarvis. Error: {e}" 241 | 242 | 243 | if __name__ == "__main__": 244 | def custom_function(*args, **kwargs): 245 | command = kwargs.get('query') 246 | entities = kwargs.get('entities') 247 | print(entities) 248 | # write your code here to do something with the command 249 | # perform some tasks # return is optional 250 | return command + ' Executed' 251 | 252 | 253 | jarvis = JarvisAI(input_mechanism='text', output_mechanism='text', 254 | google_speech_api_key=None, backend_tts_api='pyttsx3', 255 | use_whisper_asr=False, display_logs=False, 256 | api_key='527557f2-0b67-4500-8ca0-03766ade589a') 257 | add_action("general", custom_function) 258 | jarvis.start() 259 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/actions.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "intent": "greet and hello hi kind of things", 4 | "example": [] 5 | }, 6 | { 7 | "intent": "goodbye", 8 | "example": [] 9 | }, 10 | { 11 | "intent": "asking date", 12 | "example": [] 13 | }, 14 | { 15 | "intent": "tell me joke", 16 | "example": [] 17 | }, 18 | { 19 | "intent": "asking time", 20 | "example": [] 21 | }, 22 | { 23 | "intent": "tell me about", 24 | "example": [] 25 | }, 26 | { 27 | "intent": "i am bored", 28 | "example": [] 29 | }, 30 | { 31 | "intent": "volume control", 32 | "example": [] 33 | }, 34 | { 35 | "intent": "tell me news", 36 | "example": [] 37 | }, 38 | { 39 | "intent": "click photo", 40 | "example": [] 41 | }, 42 | { 43 | "intent": "places near me", 44 | "example": [] 45 | }, 46 | { 47 | "intent": "play on youtube", 48 | "example": [] 49 | }, 50 | { 51 | "intent": "play games", 52 | "example": [] 53 | }, 54 | { 55 | "intent": "what can you do", 56 | "example": [] 57 | }, 58 | { 59 | "intent": "send email", 60 | "example": [] 61 | }, 62 | { 63 | "intent": "download youtube video", 64 | "example": [] 65 | }, 66 | { 67 | "intent": "asking weather", 68 | "example": [] 69 | }, 70 | { 71 | "intent": "take screenshot", 72 | "example": [] 73 | }, 74 | { 75 | "intent": "open website", 76 | "example": [] 77 | }, 78 | { 79 | "intent": "send whatsapp message", 80 | "example": [] 81 | }, 82 | { 83 | "intent": "covid cases", 84 | "example": [] 85 | }, 86 | { 87 | "intent": "asking weather", 88 | "example": [] 89 | }, 90 | { 91 | "intent": "others", 92 | "example": [] 93 | } 94 | ] -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/brain/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dipeshpal/Jarvis_AI/d5fdc8c96fe4a4fd154ec02aa053d15a480de809/JarvisAI/JarvisAI/brain/__init__.py -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/brain/auth.py: -------------------------------------------------------------------------------- 1 | import shutup 2 | import requests 3 | import json 4 | 5 | shutup.please() 6 | 7 | 8 | def verify_user(func): 9 | def inner(self, *args, **kwargs): 10 | with open('api_key.txt', 'r') as f: 11 | api_key = f.read() 12 | url = f'https://jarvisai.in/check_secret_key?secret_key={api_key}' 13 | response = requests.get(url) 14 | if response.status_code == 200: 15 | status = json.loads(response.content) 16 | if status: 17 | print("Authentication Successful") 18 | return func(self, *args, **kwargs) # Return the result of calling func 19 | else: 20 | print("Authentication Failed. Please check your API key.") 21 | exit() 22 | else: 23 | print(f'Error: {response.status_code}') 24 | exit() 25 | 26 | return inner 27 | 28 | 29 | if __name__ == "__main__": 30 | data = verify_user("527557f2-0b67-4500-8ca0-03766ade589a") 31 | print(data, type(data)) 32 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/brain/chatbot_premium.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from googlesearch import search 3 | import wikipedia 4 | import html2text 5 | from bs4 import BeautifulSoup 6 | from markdown import markdown 7 | import re 8 | 9 | 10 | def search_google_description(query): 11 | try: 12 | ans = search(query, advanced=True, num_results=5) 13 | 14 | desc = '' 15 | for i in ans: 16 | desc += i.description 17 | 18 | return desc 19 | except: 20 | return '' 21 | 22 | 23 | def query_pages(query): 24 | return list(search(query)) 25 | 26 | 27 | def markdown_to_text(markdown_string): 28 | """ Converts a markdown string to plaintext """ 29 | 30 | # md -> html -> text since BeautifulSoup can extract text cleanly 31 | html = markdown(markdown_string) 32 | 33 | # remove code snippets 34 | html = re.sub(r'
(.*?)
', ' ', html) 35 | html = re.sub(r'(.*?)', ' ', html) 36 | 37 | # extract text 38 | soup = BeautifulSoup(html, "html.parser") 39 | for e in soup.find_all(): 40 | if e.name not in ['p']: 41 | e.unwrap() 42 | text = ''.join([i.strip() for i in soup.findAll(text=True)]) 43 | return text 44 | 45 | 46 | def format_text(text): 47 | text = markdown_to_text(text) 48 | text = text.replace('\n', ' ') 49 | return text 50 | 51 | 52 | def search_google(query): 53 | try: 54 | def query_to_text(query): 55 | html_conv = html2text.HTML2Text() 56 | html_conv.ignore_links = True 57 | html_conv.escape_all = True 58 | text = [] 59 | for link in query_pages(query)[0:3]: 60 | try: 61 | headers = { 62 | 'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'} 63 | 64 | req = requests.get(link, headers=headers) 65 | text.append(html_conv.handle(req.text)) 66 | text[-1] = format_text(text[-1]) 67 | except: 68 | pass 69 | return text 70 | 71 | return query_to_text(query) 72 | except: 73 | return '' 74 | 75 | 76 | def search_wiki(query): 77 | try: 78 | ans = wikipedia.summary(query, sentences=2) 79 | return ans 80 | except: 81 | return '' 82 | 83 | 84 | def search_all(query, advance_search=False): 85 | combined = search_google_description(query) + "\n" + search_wiki(query) 86 | if advance_search: 87 | combined += "\n" + ' '.join(search_google(query)) 88 | return combined 89 | 90 | 91 | def try_to_get_response(*args, **kwargs): 92 | api_key = kwargs.get("api_key") 93 | query = kwargs.get('query') 94 | context = search_all(query, advance_search=False) 95 | headers = { 96 | 'accept': 'application/json', 97 | 'content-type': 'application/x-www-form-urlencoded', 98 | } 99 | params = { 100 | 'secret_key': api_key, 101 | 'text': query, 102 | 'context': context, 103 | } 104 | response = requests.post('https://www.jarvisai.in/chatbot_premium_api', params=params, headers=headers) 105 | if response.status_code == 200: 106 | return response.json()['data'] 107 | else: 108 | return response.json().get("message", "Server is facing some issues. Please try again later.") 109 | 110 | 111 | def premium_chat(*args, **kwargs): 112 | # try 3 times to call try_to_get_response(*args, **kwargs) function and if it fails then return error message 113 | for i in range(3): 114 | try: 115 | return try_to_get_response(*args, **kwargs) 116 | except Exception as e: 117 | pass 118 | return "Server is facing some issues. Please try again later." 119 | # try: 120 | # try_to_get_response(*args, **kwargs) 121 | # except Exception as e: 122 | # return f"An error occurred while performing premium_chat, connect with developer. Error: {e}" 123 | 124 | 125 | if __name__ == "__main__": 126 | print(premium_chat(query="who is naredra modi", api_key='ae44cc6e-0d5c-45c1-b8a3-fe412469510f')) 127 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/brain/intent_classification.py: -------------------------------------------------------------------------------- 1 | import os 2 | import requests 3 | import json 4 | from selenium import webdriver 5 | from selenium.webdriver.common.by import By 6 | from selenium.webdriver.chrome.options import Options 7 | from element_manager import * 8 | import time 9 | import requests 10 | 11 | 12 | def check_local_intent(text): 13 | """Check if the intent of a text string is available locally. 14 | 15 | Args: 16 | text (str): The text string to check. 17 | 18 | Returns: 19 | str: The intent of the text string. 20 | """ 21 | try: 22 | if not os.path.exists('actions.json'): 23 | return None 24 | else: 25 | with open('actions.json', 'r') as f: 26 | actions = json.load(f) 27 | # Below is sample actions.json file 28 | # [ 29 | # { 30 | # "intent": "greet and hello hi kind of things", 31 | # "example": [] 32 | # }, 33 | # { 34 | # "intent": "goodbye", 35 | # "example": ['bye', 'goodbye', 'see you later'] 36 | # } 37 | # ] 38 | # if text in example then return intent 39 | for action in actions: 40 | if text in action['example']: 41 | return action['intent'] 42 | if action['intent'] in text: 43 | return action['intent'] 44 | if action['example'] == "": 45 | continue 46 | except Exception as e: 47 | return None 48 | 49 | 50 | def try_to_classify_intent(secret_key, text): 51 | """Classify the intent of a text string using the JarvisAI API. 52 | 53 | Args: 54 | text (str): The text string to classify. 55 | 56 | Returns: 57 | str: The intent of the text string. 58 | """ 59 | try: 60 | intent = check_local_intent(text) 61 | if intent is not None: 62 | return intent, 1.0 63 | except Exception as e: 64 | pass 65 | 66 | try: 67 | url = f'https://jarvisai.in/intent_classifier?secret_key={secret_key}&text={text}' 68 | response = requests.get(url) 69 | data = response.json() 70 | if data['status'] == 'success': 71 | return data['data'][0], data['data'][1] 72 | except Exception as e: 73 | raise Exception('Something went wrong while classifying the intent.') 74 | 75 | 76 | def restart_server(): 77 | try: 78 | chrome_options = Options() 79 | chrome_options.add_argument("--headless") 80 | driver = webdriver.Chrome(options=chrome_options) 81 | 82 | driver.get('https://huggingface.co/spaces/dipesh/dipesh-Intent-Classification-large') 83 | 84 | time.sleep(4) 85 | # to click on the element(Restart this Space) found 86 | driver.find_element(By.XPATH, get_xpath(driver, 'd7n5Js111lAwV_U')).click() 87 | return True, 'Server restarted successfully' 88 | except Exception as e: 89 | print(e) 90 | print("Make sure chromedriver.exe is in the same folder as your script") 91 | return False, 'Server restart failed' 92 | 93 | 94 | def classify_intent(secret_key, text): 95 | for i in range(3): 96 | try: 97 | return try_to_classify_intent(secret_key, text) 98 | except Exception as e: 99 | restart_server() 100 | return try_to_classify_intent(secret_key, text) 101 | 102 | 103 | if __name__ == "__main__": 104 | intent, _ = classify_intent('99f605ce-5bf9-4e80-93a3-f367df65aa27', "custom function") 105 | print(intent, _) 106 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/brain/ner.py: -------------------------------------------------------------------------------- 1 | import spacy 2 | import os 3 | 4 | try: 5 | nlp = spacy.load("en_core_web_trf") 6 | except: 7 | print("Downloading spaCy NLP model...") 8 | print("This may take a few minutes and it's one time process...") 9 | os.system("pip install https://huggingface.co/spacy/en_core_web_trf/resolve/main/en_core_web_trf-any-py3-none-any.whl") 10 | nlp = spacy.load("en_core_web_trf") 11 | 12 | 13 | def perform_ner(*args, **kwargs): 14 | query = kwargs['query'] 15 | # Process the input text with spaCy NLP model 16 | doc = nlp(query) 17 | 18 | # Extract named entities and categorize them 19 | entities = [(entity.text, entity.label_) for entity in doc.ents] 20 | 21 | return entities 22 | 23 | 24 | if __name__ == "__main__": 25 | # Example input text 26 | input_text = "I want to buy a new iPhone 12 Pro Max from Apple." 27 | 28 | # Perform NER on input text 29 | entities = perform_ner(query=input_text) 30 | 31 | # Print the extracted entities 32 | print(entities) 33 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/features/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dipeshpal/Jarvis_AI/d5fdc8c96fe4a4fd154ec02aa053d15a480de809/JarvisAI/JarvisAI/features/__init__.py -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/features/click_photo.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import cv2 3 | import os 4 | 5 | 6 | def click_pic(*args, **kwargs): 7 | try: 8 | t = datetime.datetime.now() 9 | # Taking a video from the webcam 10 | camera = cv2.VideoCapture(0) 11 | # Taking first 20 frames of the video 12 | for i in range(20): 13 | return_value, image = camera.read() 14 | if not os.path.exists("photos"): 15 | os.mkdir("photos") 16 | # Using 20th frame as the picture and now saving the image as the time in seconds,minute,hour,day and month of the year 17 | # Giving the camera around 20 frames to adjust to the surroundings for better picture quality 18 | cv2.imwrite(f"photos/{t.second, t.minute, t.hour, t.day, t.month}_photo.png", image) 19 | # As soon as the image is saved we will stop recording 20 | del camera 21 | print(f"Photo taken: photos/{t.second, t.minute, t.hour, t.day, t.month}_photo.png") 22 | return "Photo taken" 23 | except Exception as e: 24 | return "Error: " + str(e) + "\n Unable to take photo" 25 | 26 | 27 | # Calling the photo_with_python function 28 | if __name__ == "__main__": 29 | click_pic() 30 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/features/covid_cases.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import pycountry 3 | 4 | 5 | def check_command_is_for_covid_cases(*args, **kwargs): 6 | command = kwargs.get('query') 7 | try: 8 | command = command.title() 9 | entities = kwargs.get('entities') 10 | if len(entities) > 0: 11 | country = [entity[0] for entity in entities if entity[1] == 'GPE'][0] 12 | else: 13 | country = get_country(command) 14 | cases = get_covid_cases(country) 15 | return f"The current active cases in {country} are {cases}" 16 | except Exception as e: 17 | print("Error: ", e) 18 | return "Sorry, I couldn't find the country you are looking for. Or server is down." 19 | 20 | 21 | def get_country(command): # For getting only the country name for the whole query 22 | for country in pycountry.countries: 23 | if country.name in command: 24 | return country.name 25 | 26 | 27 | def get_covid_cases(country): # For getting current covid cases 28 | totalActiveCases = 0 29 | response = requests.get('https://api.covid19api.com/live/country/' + country + '/status/confirmed').json() 30 | for data in response: 31 | totalActiveCases += data.get('Active') 32 | return totalActiveCases 33 | 34 | 35 | if __name__ == '__main__': 36 | print(check_command_is_for_covid_cases('active Covid India cases?')) # Example 37 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/features/date_time.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | 3 | 4 | def date_time(*args, **kwargs): 5 | query = kwargs['query'] 6 | if 'time' in query: 7 | return datetime.datetime.now().strftime("%H:%M:%S") 8 | elif 'date' in query: 9 | return datetime.datetime.now().strftime("%d/%m/%Y") 10 | else: 11 | return "Sorry, I don't know how to handle this intent." 12 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/features/games.py: -------------------------------------------------------------------------------- 1 | import webbrowser 2 | 3 | 4 | def play_games(*args, **kwargs): 5 | url = 'https://poki.com/' 6 | try: 7 | webbrowser.open(url) 8 | return "Successfully opened Poki.com, Play your games!" 9 | except Exception as e: 10 | print(e) 11 | return "Failed to open Poki.com, please try again!" 12 | 13 | 14 | if __name__ == '__main__': 15 | play_games('inp_command') 16 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/features/greet.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | 3 | 4 | def greet(*args, **kwargs): 5 | time = datetime.datetime.now().hour 6 | if time < 12: 7 | return "Hi, Good Morning" 8 | elif 12 <= time < 18: 9 | return "Hi, Good Afternoon" 10 | else: 11 | return "Hi, Good Evening" 12 | 13 | 14 | def goodbye(*args, **kwargs): 15 | return "Goodbye" -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/features/iambored.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | 4 | def get_me_suggestion(*args, **kwargs): 5 | url = 'http://www.boredapi.com/api/activity' 6 | response = requests.get(url) 7 | return response.json()['activity'] 8 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/features/internet_speed_test.py: -------------------------------------------------------------------------------- 1 | import speedtest # pip install speedtest-cli 2 | 3 | try: 4 | st = speedtest.Speedtest() 5 | except: 6 | print("Please check your internet connection.") 7 | pass 8 | 9 | 10 | def download_speed(): 11 | down = round(st.download() / 10 ** 6, 2) 12 | return down 13 | 14 | 15 | def upload_speed(): 16 | up = round(st.upload() / 10 ** 6, 2) 17 | return up 18 | 19 | 20 | def ping(): 21 | servernames = [] 22 | st.get_servers(servernames) 23 | results = st.results.ping 24 | return results 25 | 26 | 27 | def speed_test(*args, **kwargs): 28 | try: 29 | print("Checking internet speed. Please wait...") 30 | # print('Download Speed: ' + str(download_speed()) + 'MB/s') 31 | # print('Upload Speed: ' + str(upload_speed()) + ' MB/s') 32 | # print('Ping: ' + str(ping()) + ' ms') 33 | return "Download Speed: " + str(download_speed()) + "MB/s" + "\n Upload Speed: " + str( 34 | upload_speed()) + " MB/s" + "\n Ping: " + str(ping()) + " ms" 35 | except Exception as e: 36 | print(e) 37 | return "Error in internet speed test" 38 | 39 | 40 | if __name__ == '__main__': 41 | print(speed_test()) 42 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/features/joke.py: -------------------------------------------------------------------------------- 1 | import pyjokes 2 | 3 | 4 | def tell_me_joke(*args, **kwargs): 5 | """ 6 | Function to tell a joke 7 | Read https://pyjok.es/api/ for more details 8 | :return: str 9 | """ 10 | lang = kwargs.get("lang", "en") 11 | cat = kwargs.get("cat", "neutral") 12 | return pyjokes.get_joke(language=lang, category=cat) 13 | 14 | 15 | if __name__ == '__main__': 16 | print(tell_me_joke()) 17 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/features/news.py: -------------------------------------------------------------------------------- 1 | import webbrowser 2 | 3 | 4 | def news(*args, **kwargs): 5 | """ 6 | This method will open the browser and show the news "https://thetechport.in/" 7 | :return: list / bool 8 | """ 9 | try: 10 | url = "https://thetechport.in/" 11 | webbrowser.open(url) 12 | return True 13 | except Exception as e: 14 | print(e) 15 | return False 16 | 17 | 18 | def show_me_some_tech_news(): 19 | try: 20 | url = "https://thetechport.in/" 21 | webbrowser.open(url) 22 | return True 23 | except Exception as e: 24 | print(e) 25 | return False 26 | 27 | 28 | def show_me_some_tech_videos(): 29 | try: 30 | url = "https://www.youtube.com/c/TechPortOfficial" 31 | webbrowser.open(url) 32 | return True 33 | except Exception as e: 34 | print(e) 35 | return False 36 | 37 | 38 | if __name__ == "__main__": 39 | print(news()) 40 | print(show_me_some_tech_news()) 41 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/features/places_near_me.py: -------------------------------------------------------------------------------- 1 | import webbrowser 2 | 3 | 4 | def get_places_near_me(*args, **kwargs): 5 | inp_command = kwargs.get("query") 6 | map_base_url = f"https://www.google.com/maps/search/{inp_command}" 7 | webbrowser.open(map_base_url) 8 | 9 | return "Opening Google Maps" 10 | 11 | 12 | if __name__ == "__main__": 13 | print(get_places_near_me("nearest coffee shop")) 14 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/features/screenshot.py: -------------------------------------------------------------------------------- 1 | import pyscreenshot 2 | import os 3 | import datetime 4 | 5 | 6 | def take_screenshot(*args, **kwargs): 7 | try: 8 | image = pyscreenshot.grab() 9 | image.show() 10 | a = datetime.datetime.now() 11 | if not os.path.exists("screenshot"): 12 | os.mkdir("screenshot") 13 | image.save(f"screenshot/{a.day, a.month, a.year}_screenshot.png") 14 | print(f"Screenshot taken: screenshot/{a.day, a.month, a.year}_screenshot.png ") 15 | return f"Screenshot taken" 16 | except Exception as e: 17 | print(e) 18 | return "Unable to take screenshot" 19 | 20 | 21 | if __name__ == "__main__": 22 | take_screenshot() 23 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/features/send_email.py: -------------------------------------------------------------------------------- 1 | import pywhatkit 2 | 3 | 4 | def send_email(*args, **kwargs): 5 | try: 6 | my_email = input("Enter your email address: ") 7 | my_password = input("Enter your password: ") 8 | mail_to = input("Enter the email address you want to send to: ") 9 | subject = input("Enter the subject of the email: ") 10 | content = input("Enter the content of the email: ") 11 | print("Sending email...") 12 | pywhatkit.send_mail(email_sender=my_email, 13 | password=my_password, 14 | subject=subject, 15 | message=content, 16 | email_receiver=mail_to) 17 | print("Email sent!") 18 | return 'Email sent!' 19 | except Exception as e: 20 | print(e) 21 | return 'Email not sent!, Check Error. or Check secure apps is enabled' 22 | 23 | 24 | if __name__ == "__main__": 25 | send_email(None) 26 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/features/tell_me_about.py: -------------------------------------------------------------------------------- 1 | import wikipedia 2 | import re 3 | 4 | 5 | def tell_me_about(*args, **kwargs): 6 | # topic = kwargs.get("query") 7 | entities = kwargs.get("entities") 8 | if len(entities) == 0: 9 | return "Entity not found" 10 | li = ['EVENT', 'FAC', 'GPE', 'LANGUAGE', 'LAW', 'LOC', 'MONEY', 'NORP', 'ORDINAL', 'ORG', 11 | 'PERCENT', 'PERSON', 'PRODUCT', 'TIME', 'WORK_OF_ART'] 12 | topic = [entity[0] for entity in entities if entity[1] in li][0] 13 | try: 14 | ny = wikipedia.page(topic) 15 | res = str(ny.content[:500].encode('utf-8')) 16 | res = re.sub('[^a-zA-Z.\d\s]', '', res)[1:] 17 | return res 18 | except Exception as e: 19 | print(e) 20 | return False 21 | 22 | 23 | if __name__ == '__main__': 24 | import spacy 25 | import os 26 | 27 | try: 28 | nlp = spacy.load("en_core_web_trf") 29 | except: 30 | print("Downloading spaCy NLP model...") 31 | print("This may take a few minutes and it's one time process...") 32 | os.system( 33 | "pip install https://huggingface.co/spacy/en_core_web_trf/resolve/main/en_core_web_trf-any-py3-none-any.whl") 34 | nlp = spacy.load("en_core_web_trf") 35 | 36 | 37 | def perform_ner(*args, **kwargs): 38 | query = kwargs['query'] 39 | # Process the input text with spaCy NLP model 40 | doc = nlp(query) 41 | 42 | # Extract named entities and categorize them 43 | entities = [(entity.text, entity.label_) for entity in doc.ents] 44 | 45 | return entities 46 | 47 | 48 | query = "tell me about Narendra Modi" 49 | # Perform NER on input text 50 | entities = perform_ner(query=query) 51 | 52 | print(tell_me_about(query=query, entities=entities)) 53 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/features/volume_controller.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import mediapipe as mp 3 | from math import hypot 4 | from ctypes import cast, POINTER 5 | from comtypes import CLSCTX_ALL 6 | from pycaw.pycaw import AudioUtilities, IAudioEndpointVolume 7 | import numpy as np 8 | 9 | 10 | def start_volume_control(*args, **kwargs): 11 | cap = cv2.VideoCapture(0) # Checks for camera 12 | 13 | mpHands = mp.solutions.hands # detects hand/finger 14 | hands = mpHands.Hands() # complete the initialization configuration of hands 15 | mpDraw = mp.solutions.drawing_utils 16 | 17 | # To access speaker through the library pycaw 18 | devices = AudioUtilities.GetSpeakers() 19 | interface = devices.Activate(IAudioEndpointVolume._iid_, CLSCTX_ALL, None) 20 | volume = cast(interface, POINTER(IAudioEndpointVolume)) 21 | volbar = 400 22 | volper = 0 23 | 24 | volMin, volMax = volume.GetVolumeRange()[:2] 25 | 26 | while True: 27 | success, img = cap.read() # If camera works capture an image 28 | img = cv2.flip(img, 1) 29 | imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Convert to rgb 30 | cv2.putText(img, f"Press SPACE to stop", (20, 80), cv2.FONT_ITALIC, 1, (0, 255, 98), 3) 31 | 32 | # Collection of gesture information 33 | results = hands.process(imgRGB) # completes the image processing. 34 | 35 | lmList = [] # empty list 36 | if results.multi_hand_landmarks: # list of all hands detected. 37 | # By accessing the list, we can get the information of each hand's corresponding flag bit 38 | for handlandmark in results.multi_hand_landmarks: 39 | for id, lm in enumerate(handlandmark.landmark): # adding counter and returning it 40 | # Get finger joint points 41 | h, w, _ = img.shape 42 | cx, cy = int(lm.x * w), int(lm.y * h) 43 | lmList.append([id, cx, cy]) # adding to the empty list 'lmList' 44 | mpDraw.draw_landmarks(img, handlandmark, mpHands.HAND_CONNECTIONS) 45 | 46 | if lmList != []: 47 | # getting the value at a point 48 | # x #y 49 | x1, y1 = lmList[4][1], lmList[4][2] # thumb 50 | x2, y2 = lmList[8][1], lmList[8][2] # index finger 51 | # creating circle at the tips of thumb and index finger 52 | cv2.circle(img, (x1, y1), 13, (255, 0, 0), cv2.FILLED) # image #fingers #radius #rgb 53 | cv2.circle(img, (x2, y2), 13, (255, 0, 0), cv2.FILLED) # image #fingers #radius #rgb 54 | cv2.line(img, (x1, y1), (x2, y2), (255, 0, 0), 3) # create a line b/w tips of index finger and thumb 55 | 56 | length = hypot(x2 - x1, y2 - y1) # distance b/w tips using hypotenuse 57 | # from numpy we find our length,by converting hand range in terms of volume range ie b/w -63.5 to 0 58 | vol = np.interp(length, [30, 350], [volMin, volMax]) 59 | volbar = np.interp(length, [30, 350], [400, 150]) 60 | volper = np.interp(length, [30, 350], [0, 100]) 61 | 62 | print(vol, int(length)) 63 | volume.SetMasterVolumeLevel(vol, None) 64 | 65 | # Hand range 30 - 350 66 | # Volume range -63.5 - 0.0 67 | # creating volume bar for volume level 68 | 69 | cv2.rectangle(img, (50, 150), (85, 400), (0, 0, 255), 70 | 4) # vid ,initial position ,ending position ,rgb ,thickness 71 | cv2.rectangle(img, (50, int(volbar)), (85, 400), (0, 0, 255), cv2.FILLED) 72 | cv2.putText(img, f"{int(volper)}%", (10, 40), cv2.FONT_ITALIC, 1, (0, 255, 98), 3) 73 | # tell the volume percentage ,location,font of text,length,rgb color,thickness 74 | # flip the image 75 | cv2.imshow('Image', img) # Show the video 76 | if cv2.waitKey(1) & 0xff == ord(' '): # By using spacebar delay will stop 77 | break 78 | 79 | cap.release() # stop cam 80 | cv2.destroyAllWindows() # close window 81 | return "Volume control stopped" 82 | 83 | 84 | if __name__ == '__main__': 85 | start_volume_control() 86 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/features/weather.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | 4 | def get_weather(*args, **kwargs): 5 | print("Getting weather") 6 | query = kwargs.get("query") 7 | entities = kwargs.get("entities") 8 | city = "Indore" 9 | if len(entities) == 0: 10 | city = [entity[0] for entity in entities if entity[1] == "GPE"][0] 11 | if len(city) == 0: 12 | return "Unable to in which city you want to know weather" 13 | geo_url = f"https://geocoding-api.open-meteo.com/v1/search?name={city}&count=1" 14 | geo_data = requests.get(geo_url).json() 15 | lat = geo_data['results'][0]["latitude"] 16 | lon = geo_data['results'][0]["longitude"] 17 | weather_url = f"https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lon}&hourly=temperature_2m" 18 | weather_data = requests.get(weather_url).json() 19 | temp = weather_data["hourly"]["temperature_2m"][-1] 20 | return f"The temperature in {city} is {temp} degrees Celsius." 21 | 22 | 23 | if __name__ == "__main__": 24 | print(get_weather(entities=[("London", "GPE")])) 25 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/features/website_open.py: -------------------------------------------------------------------------------- 1 | import webbrowser 2 | import re 3 | 4 | 5 | def website_opener(*args, **kwargs): 6 | input_text = kwargs.get("query") 7 | domain = input_text.lower().split(" ")[-1] 8 | extension = re.search(r"[.]", domain) 9 | if not extension: 10 | if not domain.endswith(".com"): 11 | domain = domain + ".com" 12 | try: 13 | url = 'https://www.' + domain 14 | webbrowser.open(url) 15 | return True 16 | except Exception as e: 17 | print(e) 18 | return False 19 | 20 | 21 | if __name__ == '__main__': 22 | website_opener("facebook") 23 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/features/whatsapp_message.py: -------------------------------------------------------------------------------- 1 | import pywhatkit as kit 2 | 3 | 4 | def send_whatsapp_message(*arg, **kwargs): 5 | country_code = input("Enter country code (Default=+91): ") or "+91" 6 | number = input("Enter whatsapp number: ") 7 | message = input("Enter message: ") 8 | print("Sending message...") 9 | kit.sendwhatmsg_instantly(f"{country_code}{number}", message, wait_time=20) 10 | print("Message sent successfully!") 11 | 12 | 13 | if __name__ == "__main__": 14 | send_whatsapp_message() 15 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/features/youtube_play.py: -------------------------------------------------------------------------------- 1 | import pywhatkit as kit 2 | 3 | 4 | def yt_play(*arg, **kwargs): 5 | inp_command = kwargs.get("query") 6 | kit.playonyt(inp_command) 7 | return "Playing Video on Youtube" 8 | 9 | 10 | if __name__ == "__main__": 11 | yt_play('play on youtube shape of you') 12 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/features/youtube_video_downloader.py: -------------------------------------------------------------------------------- 1 | from pytube import YouTube 2 | import os 3 | 4 | 5 | def download_yt_video(*arg, **kwargs): 6 | ytURL = input("Enter the URL of the YouTube video: ") 7 | yt = YouTube(ytURL) 8 | try: 9 | print("Downloading...") 10 | yt.streams.filter(progressive=True, file_extension="mp4").order_by("resolution")[-1].download() 11 | except: 12 | return "ERROR | Please try again later" 13 | return f"Download Complete | Saved at {os.getcwd()}" 14 | 15 | 16 | if __name__ == "__main__": 17 | download_yt_video() 18 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/features_manager.py: -------------------------------------------------------------------------------- 1 | try: 2 | from features.date_time import date_time 3 | from features.greet import greet, goodbye 4 | from features.joke import tell_me_joke 5 | from features.click_photo import click_pic 6 | from features.covid_cases import check_command_is_for_covid_cases 7 | from features.games import play_games 8 | from features.iambored import get_me_suggestion 9 | from features.internet_speed_test import speed_test 10 | from features.news import news 11 | from features.places_near_me import get_places_near_me 12 | from features.screenshot import take_screenshot 13 | from features.send_email import send_email 14 | from features.tell_me_about import tell_me_about 15 | from features.volume_controller import start_volume_control 16 | from features.weather import get_weather 17 | from features.website_open import website_opener 18 | from features.whatsapp_message import send_whatsapp_message 19 | from features.youtube_play import yt_play 20 | from features.youtube_video_downloader import download_yt_video 21 | from brain import chatbot_premium 22 | except Exception as e: 23 | from .features.date_time import date_time 24 | from .features.greet import greet, goodbye 25 | from .features.joke import tell_me_joke 26 | from .features.click_photo import click_pic 27 | from .features.covid_cases import check_command_is_for_covid_cases 28 | from .features.games import play_games 29 | from .features.iambored import get_me_suggestion 30 | from .features.internet_speed_test import speed_test 31 | from .features.news import news 32 | from .features.places_near_me import get_places_near_me 33 | from .features.screenshot import take_screenshot 34 | from .features.send_email import send_email 35 | from .features.tell_me_about import tell_me_about 36 | from .features.volume_controller import start_volume_control 37 | from .features.weather import get_weather 38 | from .features.website_open import website_opener 39 | from .features.whatsapp_message import send_whatsapp_message 40 | from .features.youtube_play import yt_play 41 | from .features.youtube_video_downloader import download_yt_video 42 | from .brain import chatbot_premium 43 | 44 | 45 | def show_what_can_i_do(*args, **kwargs): 46 | print("I can do following things:") 47 | for key in action_map.keys(): 48 | print(key) 49 | 50 | 51 | action_map = { 52 | "asking time": date_time, 53 | "asking date": date_time, 54 | "greet and hello hi kind of things": greet, 55 | "goodbye": goodbye, 56 | "tell me joke": tell_me_joke, 57 | "tell me about": tell_me_about, # TODO: improve this 58 | "i am bored": get_me_suggestion, 59 | "volume control": start_volume_control, 60 | "tell me news": news, 61 | "click photo": click_pic, 62 | "places near me": get_places_near_me, 63 | "play on youtube": yt_play, 64 | "play games": play_games, 65 | "what can you do": show_what_can_i_do, 66 | "send email": send_email, 67 | "download youtube video": download_yt_video, 68 | "asking weather": get_weather, 69 | "take screenshot": take_screenshot, 70 | "open website": website_opener, 71 | "send whatsapp message": send_whatsapp_message, 72 | "covid cases": check_command_is_for_covid_cases, 73 | "check internet speed": speed_test, 74 | "others": chatbot_premium.premium_chat, 75 | } 76 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dipeshpal/Jarvis_AI/d5fdc8c96fe4a4fd154ec02aa053d15a480de809/JarvisAI/JarvisAI/utils/__init__.py -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/utils/input_output.py: -------------------------------------------------------------------------------- 1 | try: 2 | from utils.speech_to_text.speech_to_text import speech_to_text_google 3 | from utils.text_to_speech.text_to_speech import text_to_speech 4 | from utils.speech_to_text.speech_to_text_whisper import speech_to_text_whisper 5 | except: 6 | from JarvisAI.utils.speech_to_text.speech_to_text import speech_to_text_google 7 | from JarvisAI.utils.text_to_speech.text_to_speech import text_to_speech 8 | from JarvisAI.utils.speech_to_text.speech_to_text_whisper import speech_to_text_whisper 9 | 10 | 11 | class JarvisInputOutput: 12 | def __init__(self, input_mechanism='text', output_mechanism='text', logging=None, 13 | google_speech_api_key=None, google_speech_recognition_input_lang='en', 14 | duration_listening=5, backend_tts_api='pyttsx3', 15 | use_whisper_asr=False, display_logs=False, 16 | api_key=None): 17 | self.input_mechanism = input_mechanism 18 | self.output_mechanism = output_mechanism 19 | self.google_speech_api_key = google_speech_api_key 20 | self.google_speech_recognition_input_lang = google_speech_recognition_input_lang 21 | self.duration_listening = duration_listening 22 | self.backend_tts_api = backend_tts_api 23 | self.logging = logging 24 | self.use_whisper_asr = use_whisper_asr 25 | self.display_logs = display_logs 26 | self.api_key = api_key 27 | with open('api_key.txt', 'w') as f: 28 | f.write(api_key) 29 | # print("JarvisInputOutput initialized") 30 | # print(f"Input mechanism: {self.input_mechanism}") 31 | # print(f"Output mechanism: {self.output_mechanism}") 32 | # print(f"Google Speech API Key: {self.google_speech_api_key}") 33 | # print(f"Backend TTS API: {self.backend_tts_api}") 34 | 35 | def text_input(self): 36 | if self.input_mechanism == 'text': 37 | return input("Enter your query: ") 38 | else: 39 | self.logging.exception("Invalid input mechanism") 40 | raise ValueError("Invalid input mechanism") 41 | 42 | def text_output(self, text): 43 | if self.output_mechanism == 'text' or self.output_mechanism == 'both': 44 | print(text) 45 | else: 46 | self.logging.exception("Invalid output mechanism") 47 | raise ValueError("Invalid output mechanism") 48 | 49 | def voice_input(self, *args, **kwargs): 50 | if self.input_mechanism == 'voice': 51 | if self.use_whisper_asr: 52 | if self.display_logs: 53 | print("Using Whisper ASR") 54 | command, status = speech_to_text_whisper(duration=self.duration_listening) 55 | else: 56 | if self.display_logs: 57 | print("Using Google ASR") 58 | command, status = speech_to_text_google(input_lang=self.google_speech_recognition_input_lang, 59 | key=self.google_speech_api_key, 60 | duration=self.duration_listening) 61 | print(f"You Said: {command}") 62 | if status: 63 | return command 64 | else: 65 | return None 66 | else: 67 | self.logging.exception("Invalid input mechanism") 68 | raise ValueError("Invalid input mechanism") 69 | 70 | def voice_output(self, *args, **kwargs): 71 | if self.output_mechanism == 'voice' or self.output_mechanism == 'both': 72 | text = kwargs.get('text', None) 73 | text_to_speech(text=text, lang='en', backend_tts_api=self.backend_tts_api) 74 | else: 75 | self.logging.exception("Invalid output mechanism") 76 | raise ValueError("Invalid output mechanism") 77 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/utils/speech_to_text/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dipeshpal/Jarvis_AI/d5fdc8c96fe4a4fd154ec02aa053d15a480de809/JarvisAI/JarvisAI/utils/speech_to_text/__init__.py -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/utils/speech_to_text/speech_to_text.py: -------------------------------------------------------------------------------- 1 | import speech_recognition as sr 2 | import pyaudio 3 | import wave 4 | 5 | 6 | def record_audio(duration=5): 7 | filename = "recording.wav" 8 | # set the chunk size of 1024 samples 9 | chunk = 1024 10 | # sample format 11 | FORMAT = pyaudio.paInt16 12 | # mono, change to 2 if you want stereo 13 | channels = 1 14 | # 44100 samples per second 15 | sample_rate = 44100 16 | record_seconds = duration 17 | # initialize PyAudio object 18 | p = pyaudio.PyAudio() 19 | # open stream object as input & output 20 | stream = p.open(format=FORMAT, 21 | channels=channels, 22 | rate=sample_rate, 23 | input=True, 24 | output=True, 25 | frames_per_buffer=chunk) 26 | frames = [] 27 | for i in range(int(sample_rate / chunk * record_seconds)): 28 | data = stream.read(chunk) 29 | # if you want to hear your voice while recording 30 | # stream.write(data) 31 | frames.append(data) 32 | # print("Finished recording.") 33 | # stop and close stream 34 | stream.stop_stream() 35 | stream.close() 36 | # terminate pyaudio object 37 | p.terminate() 38 | # save audio file 39 | # open the file in 'write bytes' mode 40 | wf = wave.open(filename, "wb") 41 | # set the channels 42 | wf.setnchannels(channels) 43 | # set the sample format 44 | wf.setsampwidth(p.get_sample_size(FORMAT)) 45 | # set the sample rate 46 | wf.setframerate(sample_rate) 47 | # write the frames as bytes 48 | wf.writeframes(b"".join(frames)) 49 | # close the file 50 | wf.close() 51 | 52 | 53 | def speech_to_text_google(input_lang='en', key=None, duration=5): 54 | try: 55 | print("Listening for next 5 seconds...") 56 | record_audio(duration=duration) 57 | r = sr.Recognizer() 58 | with sr.AudioFile("recording.wav") as source: 59 | audio = r.record(source) 60 | command = r.recognize_google(audio, language=input_lang, key=key) 61 | # TODO: Translate command to target language 62 | # if input_lang != 'en': 63 | # translator = googletrans.Translator() 64 | # command = translator.translate("command", dest='hi').text 65 | return command, True 66 | except Exception as e: 67 | print(e) 68 | return e, False 69 | 70 | 71 | if __name__ == "__main__": 72 | print(speech_to_text_google()) 73 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/utils/speech_to_text/speech_to_text_whisper.py: -------------------------------------------------------------------------------- 1 | import whisper 2 | import sounddevice 3 | from scipy.io.wavfile import write 4 | import time 5 | 6 | model = whisper.load_model("base") 7 | 8 | 9 | def recorder(second=5): 10 | fs = 16000 11 | print("Recording.....") 12 | record_voice = sounddevice.rec(int(second * fs), samplerate=fs, channels=2) 13 | sounddevice.wait() 14 | write("./recording.wav", fs, record_voice) 15 | print("Finished.....") 16 | time.sleep(1) 17 | 18 | 19 | def speech_to_text_whisper(duration=5): 20 | try: 21 | recorder(second=duration) 22 | result = model.transcribe("./recording.wav") 23 | return result.get("text"), True 24 | except Exception as e: 25 | print(e) 26 | return e, False 27 | 28 | 29 | if __name__ == "__main__": 30 | speech_to_text_whisper() 31 | -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/utils/text_to_speech/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dipeshpal/Jarvis_AI/d5fdc8c96fe4a4fd154ec02aa053d15a480de809/JarvisAI/JarvisAI/utils/text_to_speech/__init__.py -------------------------------------------------------------------------------- /JarvisAI/JarvisAI/utils/text_to_speech/text_to_speech.py: -------------------------------------------------------------------------------- 1 | import os 2 | from gtts import gTTS 3 | import pyttsx3 4 | from playsound import playsound 5 | from lazyme.string import color_print as cprint 6 | 7 | USER_CONFIG_FOLDER = './user_configs/' 8 | SPEECH_ENGINE_PATH = f'{USER_CONFIG_FOLDER}speech_engine.txt' 9 | 10 | 11 | def text_to_speech(text, lang='en', backend_tts_api='gtts'): 12 | """ 13 | Convert any text to speech 14 | You can use GTTS or PYTTSX3 as backend for Text to Speech. 15 | PYTTSX3 may support different voices (male/female) depends upon your system. 16 | You can set backend of tts while creating object of JarvisAI class. Default is PYTTSX3. 17 | :param backend_tts_api: 18 | :param text: str 19 | text (String) 20 | :param lang: str 21 | default 'en' 22 | :return: Bool 23 | True / False (Play sound if True otherwise write exception to log and return False) 24 | """ 25 | if backend_tts_api == 'gtts': 26 | # for gtts Backend 27 | try: 28 | myobj = gTTS(text=text, lang=lang, slow=False) 29 | myobj.save("tmp.mp3") 30 | playsound("tmp.mp3") 31 | os.remove("tmp.mp3") 32 | return True 33 | except Exception as e: 34 | print(e) 35 | print("or You may reached free limit of 'gtts' API. Use 'pyttsx3' as backend for unlimited use.") 36 | return False 37 | else: 38 | # for pyttsx3 Backend 39 | engine = pyttsx3.init() 40 | voices = engine.getProperty('voices') 41 | 42 | try: 43 | if not os.path.exists("configs"): 44 | os.mkdir("configs") 45 | 46 | voice_file_name = "configs/Edith-Voice.txt" 47 | if not os.path.exists(voice_file_name): 48 | cprint("You can try different voices. This is one time setup. You can reset your voice by deleting" 49 | "'configs/Edith-Voice.txt' file in your working directory.", 50 | color='blue') 51 | cprint("Your System Support Following Voices- ", 52 | color='blue') 53 | voices_dict = {} 54 | for index, voice in enumerate(voices): 55 | print(f"{index}: ", voice.id) 56 | voices_dict[str(index)] = voice.id 57 | option = input(f"Choose any- {list(voices_dict.keys())}: ") 58 | with open(voice_file_name, 'w') as f: 59 | f.write(voices_dict.get(option, voices[0].id)) 60 | with open(voice_file_name, 'r') as f: 61 | voice_property = f.read() 62 | else: 63 | with open(voice_file_name, 'r') as f: 64 | voice_property = f.read() 65 | except Exception as e: 66 | print(e) 67 | print("Error occurred while creating config file for voices in pyttsx3 in 'text2speech'.", 68 | "Contact maintainer/developer of JarvisAI") 69 | try: 70 | engine.setProperty('voice', voice_property) 71 | engine.say(text) 72 | engine.runAndWait() 73 | return True 74 | except Exception as e: 75 | print(e) 76 | print("Error occurred while using pyttsx3 in 'text2speech'.", 77 | "or Your system may not support pyttsx3 backend. Use 'gtts' as backend.", 78 | "Contact maintainer/developer of JarvisAI.") 79 | return False 80 | -------------------------------------------------------------------------------- /JarvisAI/License.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2018 The Python Packaging Authority 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. -------------------------------------------------------------------------------- /JarvisAI/MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include JarvisAI *.dll *.so *.dylib -------------------------------------------------------------------------------- /JarvisAI/README for JarvisAI 4.3 and below.md: -------------------------------------------------------------------------------- 1 | [![Header](https://i.postimg.cc/mDCdt9Jn/Mixing-Panel-Photocentric-EDM-Youtube-Channel-Art-1.png "Header")](http://jarvis-ai-api.herokuapp.com/) 2 | 3 | 4 | ![Python](https://img.shields.io/badge/python-3670A0?style=for-the-badge&logo=python&logoColor=ffdd54)![TensorFlow](https://img.shields.io/badge/TensorFlow-%23FF6F00.svg?style=for-the-badge&logo=TensorFlow&logoColor=white)![PyTorch](https://img.shields.io/badge/PyTorch-%23EE4C2C.svg?style=for-the-badge&logo=PyTorch&logoColor=white) 5 | 6 | # Hello, folks! 7 | 8 | This project is created only for those who are interested in building a Virtual Assistant. Generally, it took lots of time to write code from scratch to build a Virtual Assistant. So, I have built a Library called "JarvisAI", which gives you easy functionality to build your own Virtual Assistant. 9 | # Content- 10 | 11 | 1. What is JarvisAI? 12 | 2. Prerequisite 13 | 3. Architecture 14 | 4. Getting Started- How to use it? 15 | 5. What it can do (Features it supports) 16 | 6. Future / Request Features 17 | 7. Contribute 18 | 8. Contact me 19 | 9. Donate 20 | 10. Thank me on- 21 | 22 | ## YouTube Tutorial- 23 | 24 | Click on the image below to watch the tutorial on YouTube- 25 | 26 | **Tutorial 1-** 27 | 28 | [![JarvisAI Tutorial 1](https://img.youtube.com/vi/p2hdqB11S-8/0.jpg)](https://www.youtube.com/watch?v=p2hdqB11S-8) 29 | 30 | **Tutorial 2-** 31 | 32 | [![JarvisAI Tutorial 2](https://img.youtube.com/vi/6p8bhNGtVbA/0.jpg)](https://www.youtube.com/watch?v=6p8bhNGtVbA) 33 | 34 | 35 | 36 | ## **1. What is Jarvis AI?** 37 | 38 | Jarvis AI is a Python Module that is able to perform tasks like Chatbot, Assistant, etc. It provides base functionality for any assistant application. This JarvisAI is built using Tensorflow, Pytorch, Transformers, and other open-source libraries and frameworks. Well, you can contribute to this project to make it more powerful. 39 | 40 | * Official Website: [Click Here](https://jarvisai.in) 41 | 42 | * Official Instagram Page: [Click Here](https://www.instagram.com/_jarvisai_) 43 | 44 | 45 | ## 2. Prerequisite 46 | 47 | - Get your Free API key from [https://jarvisai.in](https://jarvisai.in) 48 | 49 | - To use it only Python (> 3.6) is required. 50 | 51 | - To contribute to the project: Python is the only prerequisite for basic scripting, Machine Learning, and Deep Learning knowledge will help this model to do tasks like AI-ML. Read the How to Contribute section of this page. 52 | 53 | ## 3. Architecture 54 | 55 | The JarvisAI’s architecture is divided into two parts. 56 | 57 | 1. User End- It is basically responsible for getting input from the user and after preprocessing input it sends input to JarvisAI’s server. And once the server sends its response back, it produces output on the user screen/system. 58 | 2. Server Side- The server is responsible to handle various kinds of AI-ML, and NLP tasks. It mainly identifies user intent by analyzing user input and interacting with other external APIs and handling user input. 59 | 60 | ![JarvisAI’s Architecture](https://cdn-images-1.medium.com/max/800/1*_PK8b96tBgRHlmZecli-nA.jpeg) 61 | 62 | 63 | ## 4. Getting Started- How to use it? 64 | 65 | #### NOTE: If you are using 'JarvisAI<4.0' the follow this link to get started: https://pypi.org/project/JarvisAI/3.9/ (Some features might not work in old version) 66 | 67 | #### Recommended: If you want to use latest version follow below docs- 68 | 69 | ### 4.1. Installation- 70 | 71 | * Install the latest version- 72 | 73 | ```bash 74 | pip install JarvisAI 75 | ``` 76 | 77 | #### Optional Steps (Common Installation Issues)- 78 | 79 | * [Optional Step] If Pyaudio is not working or not installed you might need to install it separately- 80 | 81 | In the case of Mac OSX do the following: 82 | 83 | ```python 84 | brew install portaudio 85 | pip install pyaudio 86 | ``` 87 | In the case of Windows or Linux do the following: 88 | 89 | - Download pyaudio from: lfd.uci.edu/~gohlke/pythonlibs/#pyaudio 90 | 91 | - ```pip install PyAudio-0.2.11-cp310-cp310-win_amd64.whl``` 92 | 93 | * [Optional Step] If pycountry is not working or not installed then Install "python3-pycountry" Package on Ubuntu/Linux- 94 | 95 | ``` 96 | sudo apt-get update -y 97 | sudo apt-get install -y python3-pycountry 98 | ``` 99 | 100 | * [Optional Step] You might need to Install [Microsoft Visual C++ Redistributable for Visual Studio 2022](https://visualstudio.microsoft.com/downloads/#microsoft-visual-c-redistributable-for-visual-studio-2022) 101 | 102 | ### 4.2. Code You Need- 103 | 104 | You need only this piece of code- 105 | 106 | ``` 107 | import JarvisAI 108 | 109 | # create your own function 110 | # It must contain parameter 'feature_command' which is the command you want to execute 111 | # Return is optional 112 | # If you want to provide return value it should only return text (str) 113 | # Your return value will be displayed or call out by the choice of OutputMethods of JarvisAI 114 | 115 | def custom_function(feature_command="custom command"): 116 | # write your code here to do something with the command 117 | # perform some tasks # return is optional 118 | return feature_command + ' Executed' 119 | 120 | obj = JarvisAI.JarvisAI(input_method=JarvisAI.InputsMethods.voice_input_google_api, 121 | output_method=JarvisAI.OutputMethods.voice_output, 122 | backend_tts_api='pyttsx3', 123 | api_key="c6fd2013918f9bc9a12c5394a819af49", 124 | detect_wake_word=False, 125 | wake_word_detection_method=JarvisAI.InputsMethods.voice_input_google_api, 126 | bot_name="Jarvis", 127 | display_intent=True, 128 | google_speech_recognition_input_lang='en', 129 | google_speech_recognition_key=None, 130 | google_speech_recognition_duration_listening=5, 131 | warnings=False), 132 | 133 | obj.register_feature(feature_obj=custom_function, feature_command='custom feature') 134 | 135 | obj.start() 136 | ``` 137 | 138 | ### 4.3. **Whats now?** 139 | 140 | It will start your AI, it will ask you to give input and accordingly it will produce output. 141 | You can configure `input_mechanism` and `output_mechanism` parameter for voice input/output or text input/output. 142 | 143 | ### 4.4. Let's understand the Parameters- 144 | 145 | ```bash 146 | :param input_method: (object) method to get input from user 147 | :param output_method: (object) method to give output to user 149 | :param api_key: (str) [Default ''] api key to use JarvisAI get it from http://jarvis-ai-api.herokuapp.com 150 | :param detect_wake_word: (bool) [Default True] detect wake word or not 151 | :param wake_word_detection_method: (object) [Default None] method to detect wake word 154 | :param google_speech_recognition_input_lang: (str) [Default 'en'] language of the input Check supported languages here: https://cloud.google.com/speech-to-text/docs/languages 155 | :param google_speech_recognition_key: (str) [Default None] api key to use Google Speech API 156 | :param google_speech_recognition_duration_listening: (int) [Default 5] duration of the listening 157 | 158 | READ MORE: Google Speech API (Pricing and Key) at: https://cloud.google.com/speech-to-text 159 | ``` 160 | 161 | ## 5. What it can do (Features it supports)- 162 | 163 | 1. Currently, it supports only english language 164 | 2. Supports voice and text input/output. 165 | 3. Supports AI based voice input and by using google api voice input. 166 | 4. All intellectual task is process in JarvisAI server so there is no load on your system. 167 | 5. Lightweight and able to understand natural language (commands) 168 | 6. Ability to add your own custom functions. 169 | 170 | ### 5.1. Supported Commands- 171 | 172 | 1. you can ask the date: Say- “what is the date today” 173 | 2. you can ask the time: Say- “what is the time now” 174 | 3. you can ask joke: Say- “tell me a joke” 175 | 4. you can ask for news: Say- “tell me the news” 176 | 5. you can ask weather: Say- “what is the weather”, “tell me the weather”, “tell me about the weather”, “tell me about the weather in < city>” 177 | 6. you can ask about: Say- “tell me about < topic>” 178 | 7. you can open website: Say- “open website < website name>”, “open website < website name><.extension>”, “open website techport.in” 179 | 8. you can play on youtube: Say- “play on youtube < video name>”, “play < video name> on youtube” 180 | 9. you can send a WhatsApp message: Say- “send WhatsApp message’’ 181 | 10. you can send an email: Say- “send email” 182 | 11. greet: Say- “greet”, “hello”, “hey”, “hi”, “good morning”, “good afternoon”, “good evening”, "how are you" 183 | 12. goodbye: Say- “goodbye”, “bye”, “see you later” 184 | 13. conversation: Say- “conversation”, “chat”, “talk”, “talk with chatbot” 185 | 14. you can take a screenshot of the current screen: Say- “take screenshot” 186 | 15. you can click a photo: Say- “click photo” 187 | 16. you can check internet speed: Say- “check internet speed” 188 | 17. you can download a youtube video: Say- “download youtube video” 189 | 18. you can check covid cases: Say- “covid cases in < country>”, “covid cases < country>” 190 | 19. you can ask to play games: Say- “play games” 191 | 20. you can ask places near me: Say- "cafe near me" 192 | 21. you can say : Say- "i am bored" 193 | 22. you can control volume: Say- "open volume controller" 194 | 195 | ### 5.2. Supported Input/Output Methods (Which option do I need to choose?)- 196 | 197 | 1. **For text input-**' 198 | 199 | ``text_input`` Just ask input from command line 200 | 201 | 3. **For voice input-** 202 | 203 | ```voice_input_google_api``` It use google free API. After using few minutes GoogleAPI might restrict you to use it. It's a limitation from GoogleAPI. But it's fast, very accurate and consume very less memory. 204 | 205 | **or** 206 | 207 | ```voice_input_deepspeech_streaming``` JarvisAI's own Machine Learning model to process voice input and convert into text for further processing. Little slow as compared to GoogleAPI, consume more memory, less accurate. But it's free to use and no restriction. 208 | 209 | 4. **For text output-** 210 | 211 | ```text_output``` Just print output in command line 212 | 213 | 5. **For voice output-** 214 | 215 | ```voice_output``` It use 'gtts' or 'pyttsx3' backend to produce voice output. You can set backend_tts_api. 216 | 217 | ## 6. Future/Request Features- 218 | 219 | **WIP** 220 | 221 | **You tell me** 222 | 223 | 224 | ## 7. Contribute- 225 | 226 | **Instructions Coming Soon** 227 | 228 | ## 8. Contact me- 229 | 230 | - [Instagram](https://www.instagram.com/dipesh_pal17) 231 | 232 | - [YouTube](https://www.youtube.com/dipeshpal17) 233 | 234 | 235 | 236 | ## 9. Donate- 237 | 238 | [Donate and Contribute to run me this project, and buy a domain](https://www.buymeacoffee.com/dipeshpal) 239 | 240 | **_Feel free to use my code, don't forget to mention credit. All the contributors will get credits in this repo._** 241 | 242 | **_Mention below line for credits-_** 243 | 244 | ***Credits-*** 245 | 246 | - [https://jarvis-ai-api.herokuapp.com](https://jarvis-ai-api.herokuapp.com/) 247 | 248 | - [https://github.com/Dipeshpal/Jarvis_AI](https://github.com/Dipeshpal/Jarvis_AI) 249 | 250 | - [https://www.youtube.com/dipeshpal17](https://www.youtube.com/dipeshpal17) 251 | 252 | - [https://www.instagram.com/dipesh_pal17](https://www.instagram.com/dipesh_pal17/) 253 | 254 | 255 | ## 10. Thank me on- 256 | 257 | - Follow me on Instagram: [https://www.instagram.com/dipesh_pal17](https://www.instagram.com/dipesh_pal17/) 258 | 259 | - Subscribe me on YouTube: [https://www.youtube.com/dipeshpal17](https://www.youtube.com/dipeshpal17) 260 | 261 | ## License 262 | 263 | [MIT](https://choosealicense.com/licenses/mit/) -------------------------------------------------------------------------------- /JarvisAI/README.md: -------------------------------------------------------------------------------- 1 | [![Header](https://i.postimg.cc/mDCdt9Jn/Mixing-Panel-Photocentric-EDM-Youtube-Channel-Art-1.png "Header")](http://jarvis-ai-api.herokuapp.com/) 2 | 3 | 4 | ![Python](https://img.shields.io/badge/python-3670A0?style=for-the-badge&logo=python&logoColor=ffdd54)![TensorFlow](https://img.shields.io/badge/TensorFlow-%23FF6F00.svg?style=for-the-badge&logo=TensorFlow&logoColor=white)![PyTorch](https://img.shields.io/badge/PyTorch-%23EE4C2C.svg?style=for-the-badge&logo=PyTorch&logoColor=white) 5 | 6 | # Hello, folks! 7 | This project is created only for those who are interested in building a Virtual Assistant. Generally, it took lots of time to write code from scratch to build a Virtual Assistant. So, I have built a Library called "JarvisAI", which gives you easy functionality to build your own Virtual Assistant. 8 | # Content- 9 | 10 | 1. What is JarvisAI? 11 | 2. Prerequisite 12 | 3. Architecture 13 | 4. Getting Started- How to use it? 14 | 5. What it can do (Features it supports) 15 | 6. Future / Request Features 16 | 7. Contribute 17 | 8. Contact me 18 | 9. Donate 19 | 10. Thank me on- 20 | 21 | ## Premium Plan- 22 | 23 | What is our premium plan? 24 | 25 | - AI will be able to understand all your commands. It will answer all your questions apart from below basic intent. 26 | 27 | - It will be able to handle intent- 'others / Unknown Intent'. Free plan doesn't support this. 28 | 29 | - It will be automatically upgraded to use GPT-3 based model in the future. Currently, it uses other advance custom AI models to answer queries. 30 | 31 | - Currently unlimited API calls. Later we might change / limit. 32 | 33 | - Currently, it doesn't remember the previous context of the chat, but soon it will be. We don't store your 34 | personal chat information. 35 | 36 | **Check out our plan: https://jarvisai.in/dashboard** 37 | 38 | 39 | ## YouTube Tutorial- 40 | 41 | Click on the image below to watch the tutorial on YouTube- 42 | 43 | **Tutorial Latest-** 44 | 45 | [![JarvisAI Tutorial 1](https://img.youtube.com/vi/hPE-kdRmYf8/0.jpg)](https://www.youtube.com/watch?v=hPE-kdRmYf8) 46 | 47 | 48 | **Tutorial 1-** 49 | 50 | [![JarvisAI Tutorial 1](https://img.youtube.com/vi/p2hdqB11S-8/0.jpg)](https://www.youtube.com/watch?v=p2hdqB11S-8) 51 | 52 | **Tutorial 2-** 53 | 54 | [![JarvisAI Tutorial 2](https://img.youtube.com/vi/6p8bhNGtVbA/0.jpg)](https://www.youtube.com/watch?v=6p8bhNGtVbA) 55 | 56 | 57 | 58 | ## **1. What is Jarvis AI?** 59 | Jarvis AI is a Python Module that is able to perform tasks like Chatbot, Assistant, etc. It provides base functionality for any assistant application. This JarvisAI is built using Tensorflow, Pytorch, Transformers, and other open-source libraries and frameworks. Well, you can contribute to this project to make it more powerful. 60 | 61 | * Official Website: [Click Here](https://jarvisai.in) 62 | 63 | * Official Instagram Page: [Click Here](https://www.instagram.com/_jarvisai_) 64 | 65 | 66 | ## 2. Prerequisite 67 | - Get your Free API key from [https://jarvisai.in](https://jarvisai.in) 68 | 69 | - To use it only Python (> 3.6) is required. 70 | 71 | - To contribute to the project: Python is the only prerequisite for basic scripting, Machine Learning, and Deep Learning knowledge will help this model to do tasks like AI-ML. Read the How to Contribute section of this page. 72 | 73 | ## 3. Architecture 74 | 75 | The JarvisAI’s architecture is divided into two parts. 76 | 77 | 1. User End- It is basically responsible for getting input from the user and after preprocessing input it sends input to JarvisAI’s server. And once the server sends its response back, it produces output on the user screen/system. 78 | 2. Server Side- The server is responsible to handle various kinds of AI-ML, and NLP tasks. It mainly identifies user intent by analyzing user input and interacting with other external APIs and handling user input. 79 | 80 | ![JarvisAI’s Architecture](https://cdn-images-1.medium.com/max/800/1*_PK8b96tBgRHlmZecli-nA.jpeg) 81 | 82 | 83 | ## 4. Getting Started- How to use it? 84 | 85 | #### NOTE: Old version is depreciated use latest version of JarvisAI 86 | 87 | ### 4.1. Installation- 88 | 89 | * Install the latest version- 90 | 91 | ```bash 92 | pip install JarvisAI 93 | ``` 94 | 95 | #### Optional Steps (Common Installation Issues)- 96 | 97 | * [Optional Step] If Pyaudio is not working or not installed you might need to install it separately- 98 | 99 | In the case of Mac OSX do the following: 100 | 101 | ``` 102 | brew install portaudio 103 | pip install pyaudio 104 | ``` 105 | 106 | In the case of Windows or Linux do the following: 107 | 108 | - Download pyaudio from: lfd.uci.edu/~gohlke/pythonlibs/#pyaudio 109 | 110 | - ```pip install PyAudio-0.2.11-cp310-cp310-win_amd64.whl``` 111 | 112 | * [Optional Step] If pycountry is not working or not installed then Install "python3-pycountry" Package on Ubuntu/Linux- 113 | 114 | ``` 115 | sudo apt-get update -y 116 | sudo apt-get install -y python3-pycountry 117 | ``` 118 | 119 | 120 | * [Optional Step] You might need to Install [Microsoft Visual C++ Redistributable for Visual Studio 2022](https://visualstudio.microsoft.com/downloads/#microsoft-visual-c-redistributable-for-visual-studio-2022) 121 | 122 | ### 4.2. Code You Need- 123 | 124 | You need only this piece of code- 125 | 126 | 127 | import JarvisAI 128 | 129 | def custom_function(*args, **kwargs): 130 | command = kwargs.get('query') 131 | entities = kwargs.get('entities') 132 | print(entities) 133 | # write your code here to do something with the command 134 | # perform some tasks # return is optional 135 | return command + ' Executed' 136 | 137 | 138 | jarvis = JarvisAI.JarvisAI(input_mechanism='text', output_mechanism='text', 139 | google_speech_api_key=None, backend_tts_api='pyttsx3', 140 | use_whisper_asr=False, display_logs=False, 141 | api_key='99f605ce-5bf9-4e80-93a3-f367df65aa27') 142 | JarvisAI.add_action('custom_function', custom_function) 143 | jarvis.start() 144 | 145 | 146 | ### 4.3. **What's now?** 147 | 148 | It will start your AI, it will ask you to give input and accordingly it will produce output. 149 | You can configure `input_mechanism` and `output_mechanism` parameter for voice input/output or text input/output. 150 | 151 | ### 4.4. Let's understand the Parameters- 152 | 153 | For text input- 154 | 155 | input_mechanism='text' 156 | 157 | For voice input- 158 | 159 | input_mechanism='voice' 160 | 161 | For text output- 162 | 163 | output_mechanism='text' 164 | 165 | For voice output- 166 | 167 | output_mechanism='voice' 168 | 169 | For voice and text output- 170 | 171 | output_mechanism='both' 172 | 173 | 174 | ## 5. What it can do (Features it supports)- 175 | 176 | 1. Currently, it supports only english language 177 | 2. Supports voice and text input/output. 178 | 3. Supports AI based voice input (using whisper asr) and by using google api voice input. 179 | 4. All intellectual task is process in JarvisAI server so there is no load on your system. 180 | 5. Lightweight and able to understand natural language (commands) 181 | 6. Ability to add your own custom functions. 182 | 183 | ### 5.1. Supported Commands- 184 | 185 | These are below supported intent that AI can handle, you can ask in natural language. 186 | 187 | **Example- "What is the time now", "make me laugh", "click a photo", etc.** 188 | 189 | **Note: Some features / command might not work. WIP. Tell me bugs.** 190 | 191 | 1. asking time 192 | 2. asking date 193 | 3. greet and hello hi kind of things goodbye 194 | 4. tell me joke 195 | 5. tell me about 196 | 6. i am bored 197 | 7. volume control 198 | 8. tell me news 199 | 9. click photo 200 | 10. places near me 201 | 11. play on youtube 202 | 12. play games 203 | 13. what can you do 204 | 14. send email 205 | 15. download youtube video 206 | 16. asking weather 207 | 17. take screenshot 208 | 18. open website 209 | 19. send whatsapp message 210 | 20. covid cases 211 | 21. check internet speed 212 | 22. others / Unknown Intent (Premium Feature) 213 | 214 | 215 | ### 5.2. Supported Input/Output Methods (Which option do I need to choose?)- 216 | 217 | You can set below parameter while creating object of JarvisAI- 218 | 219 | jarvis = JarvisAI.JarvisAI(input_mechanism='text', output_mechanism='text', 220 | google_speech_api_key=None, backend_tts_api='pyttsx3', 221 | use_whisper_asr=False, display_logs=False, 222 | api_key='99f605ce-5bf9-4e80-93a3-f367df65aa27') 223 | 224 | 1. **For text input-**' 225 | 226 | input_mechanism='text' 227 | 228 | 2. **For voice input-** 229 | 230 | input_mechanism='voice' 231 | 232 | 3. **For text output-** 233 | 234 | output_mechanism='text' 235 | 236 | 4. **For voice output-** 237 | 238 | output_mechanism='voice' 239 | 240 | 5. **For voice and text output-** 241 | 242 | output_mechanism='both' 243 | 244 | ## 6. Future/Request Features- 245 | 246 | **WIP** 247 | **You tell me** 248 | at info@jarvisai.in or my www.dipeshpal.in/social 249 | 250 | ## 7. Contribute- 251 | 252 | 1. Clone this repo. 253 | 2. Create your file in JarvisAI/JarvisAI/features/ 254 | 3. Write entry function like this- 255 | 256 | 257 | def some_func(*args, **kwargs): 258 | query = kwargs.get("query") 259 | entities = kwargs.get("entities") 260 | li = ['EVENT', 'FAC', 'GPE', 'LANGUAGE', 'LAW', 'LOC', 'MONEY', 'NORP', 'ORDINAL', 'ORG', 261 | 'PERCENT', 'PERSON', 'PRODUCT', 'TIME', 'WORK_OF_ART'] 262 | topic = [entity[0] for entity in entities if entity[1] in li][0] 263 | return "This Code Done" 264 | 265 | - query is the text that is recognized by your microphone 266 | 267 | - entities Named Entity Recognition is a technique of natural language processing that is used for the categorization of the data 268 | 269 | - Example- 270 | 271 | query: who is Narendra Modi 272 | 273 | entities: [('Narendra Modi', 'PERSON')] 274 | 275 | topic: Narendra Modi 276 | 277 | So, now you got the topic from query and now you can play with the topic. 278 | 279 | 4. Your function can return someting text or perform something. Return text will be automatically print / spoken by your system. 280 | 5. In JarvisAI/JarvisAI/features_manager.py , import and add your function like this 281 | 282 | try: 283 | from features.your_file import some_func 284 | except Exception as e: 285 | from .features.your_file import some_func 286 | 287 | action_map = { 288 | ..... 289 | .... 290 | 'your_intent': some_func 291 | } 292 | 293 | 6. That's it now raise pull request. I'll verify your code. If working, ethical and all terms are followed, I'll approve. 294 | 7. You will become contributer. 295 | 296 | ## 8. Contact me- 297 | - [Instagram](https://www.instagram.com/dipesh_pal17) 298 | 299 | - [YouTube](https://www.youtube.com/dipeshpal17) 300 | 301 | 302 | 303 | ## 9. Donate- 304 | 305 | Consider donating to JarvisAI to support our mission of keeping our servers running 24/7. Your contribution will enable us to continue doing great things and providing valuable services. Every little bit helps! 306 | 307 | 308 | [Click Here to support](https://www.instamojo.com/@techport/) 309 | 310 | **_Feel free to use my code, don't forget to mention credit. All the contributors will get credits in this repo._** 311 | **_Mention below line for credits-_** 312 | ***Credits-*** 313 | - [https://jarvisai.in](https://jarvisai.in) 314 | 315 | - [https://github.com/Dipeshpal/Jarvis_AI](https://github.com/Dipeshpal/Jarvis_AI) 316 | 317 | - [https://www.youtube.com/dipeshpal17](https://www.youtube.com/dipeshpal17) 318 | 319 | - [https://www.instagram.com/dipesh_pal17](https://www.instagram.com/_dipeshpal_) 320 | 321 | 322 | ## 10. Thank me on- 323 | - Follow me on Instagram: [https://www.instagram.com/dipesh_pal17](https://www.instagram.com/_dipeshpal_) 324 | 325 | - Subscribe me on YouTube: [https://www.youtube.com/dipeshpal17](https://www.youtube.com/dipeshpal17) 326 | 327 | ## License 328 | [MIT](https://choosealicense.com/licenses/mit/) 329 | -------------------------------------------------------------------------------- /JarvisAI/TODO: -------------------------------------------------------------------------------- 1 | Setup Premium Features -------------------------------------------------------------------------------- /JarvisAI/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dipeshpal/Jarvis_AI/d5fdc8c96fe4a4fd154ec02aa053d15a480de809/JarvisAI/__init__.py -------------------------------------------------------------------------------- /JarvisAI/setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | from setuptools import find_namespace_packages 3 | 4 | with open("README.md", "r", encoding='utf-8') as fh: 5 | long_description = fh.read() 6 | 7 | setuptools.setup( 8 | name="JarvisAI", 9 | version="4.9", 10 | author="Dipesh", 11 | author_email="dipeshpal17@gmail.com", 12 | description="JarvisAI is python library to build your own AI virtual assistant with natural language processing.", 13 | long_description=long_description, 14 | long_description_content_type="text/markdown", 15 | url="https://github.com/Dipeshpal/Jarvis_AI", 16 | include_package_data=True, 17 | packages=find_namespace_packages(include=['JarvisAI.*', 'JarvisAI']), 18 | install_requires=['numpy', 'gtts', 'playsound', 'pyscreenshot', "opencv-python", 19 | 'SpeechRecognition', 'pyjokes', 'wikipedia', 'scipy', 'lazyme', 20 | "requests", "pyttsx3", "spacy==3.5.0", 'pywhatkit', 'speedtest-cli', 21 | 'pytube', 'pycountry', 'playsound', 'pyaudio', 'mediapipe==0.8.11', 22 | 'pycaw', 'openai-whisper', 'shutup', 'sounddevice', 'html2text==2020.1.16', 23 | 'wikipedia==1.4.0', 'Markdown==3.4.1', 'markdown2==2.4.8', 24 | 'lxml==4.9.2', 'googlesearch-python==1.2.3', 'selenium', 'selenium-pro', 25 | 'element-manager'], 26 | classifiers=[ 27 | "Programming Language :: Python :: 3", 28 | "License :: OSI Approved :: MIT License", 29 | "Operating System :: OS Independent", 30 | ], 31 | python_requires='>=3.6', 32 | project_urls={ 33 | 'Official Website': 'https://jarvisai.in', 34 | 'Documentation': 'https://github.com/Dipeshpal/Jarvis_AI', 35 | 'Donate': 'https://www.buymeacoffee.com/dipeshpal', 36 | 'Say Thanks!': 'https://youtube.com/techportofficial', 37 | 'Source': 'https://github.com/Dipeshpal/Jarvis_AI', 38 | 'Contact': 'https://www.dipeshpal.in/social', 39 | }, 40 | ) 41 | -------------------------------------------------------------------------------- /License.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2018 The Python Packaging Authority 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Header](https://i.postimg.cc/mDCdt9Jn/Mixing-Panel-Photocentric-EDM-Youtube-Channel-Art-1.png "Header")](http://jarvis-ai-api.herokuapp.com/) 2 | 3 | 4 | ![Python](https://img.shields.io/badge/python-3670A0?style=for-the-badge&logo=python&logoColor=ffdd54)![TensorFlow](https://img.shields.io/badge/TensorFlow-%23FF6F00.svg?style=for-the-badge&logo=TensorFlow&logoColor=white)![PyTorch](https://img.shields.io/badge/PyTorch-%23EE4C2C.svg?style=for-the-badge&logo=PyTorch&logoColor=white) 5 | 6 | # Hello, folks! 7 | This project is created only for those who are interested in building a Virtual Assistant. Generally, it took lots of time to write code from scratch to build a Virtual Assistant. So, I have built a Library called "JarvisAI", which gives you easy functionality to build your own Virtual Assistant. 8 | # Content- 9 | 10 | 1. What is JarvisAI? 11 | 2. Prerequisite 12 | 3. Architecture 13 | 4. Getting Started- How to use it? 14 | 5. What it can do (Features it supports) 15 | 6. Future / Request Features 16 | 7. Contribute 17 | 8. Contact me 18 | 9. Donate 19 | 10. Thank me on- 20 | 21 | ## YouTube Tutorial- 22 | 23 | Click on the image below to watch the tutorial on YouTube- 24 | 25 | **Tutorial 1-** 26 | 27 | [![JarvisAI Tutorial 1](https://img.youtube.com/vi/p2hdqB11S-8/0.jpg)](https://www.youtube.com/watch?v=p2hdqB11S-8) 28 | 29 | **Tutorial 2-** 30 | 31 | [![JarvisAI Tutorial 2](https://img.youtube.com/vi/6p8bhNGtVbA/0.jpg)](https://www.youtube.com/watch?v=6p8bhNGtVbA) 32 | 33 | 34 | 35 | ## **1. What is Jarvis AI?** 36 | Jarvis AI is a Python Module that is able to perform tasks like Chatbot, Assistant, etc. It provides base functionality for any assistant application. This JarvisAI is built using Tensorflow, Pytorch, Transformers, and other open-source libraries and frameworks. Well, you can contribute to this project to make it more powerful. 37 | 38 | * Official Website: [Click Here](https://jarvisai.in) 39 | 40 | * Official Instagram Page: [Click Here](https://www.instagram.com/_jarvisai_) 41 | 42 | 43 | ## 2. Prerequisite 44 | - Get your Free API key from [https://jarvisai.in](https://jarvisai.in) 45 | 46 | - To use it only Python (> 3.6) is required. 47 | 48 | - To contribute to the project: Python is the only prerequisite for basic scripting, Machine Learning, and Deep Learning knowledge will help this model to do tasks like AI-ML. Read the How to Contribute section of this page. 49 | 50 | ## 3. Architecture 51 | 52 | The JarvisAI’s architecture is divided into two parts. 53 | 54 | 1. User End- It is basically responsible for getting input from the user and after preprocessing input it sends input to JarvisAI’s server. And once the server sends its response back, it produces output on the user screen/system. 55 | 2. Server Side- The server is responsible to handle various kinds of AI-ML, and NLP tasks. It mainly identifies user intent by analyzing user input and interacting with other external APIs and handling user input. 56 | 57 | ![JarvisAI’s Architecture](https://cdn-images-1.medium.com/max/800/1*_PK8b96tBgRHlmZecli-nA.jpeg) 58 | 59 | 60 | ## 4. Getting Started- How to use it? 61 | 62 | #### NOTE: Old version is depreciated use latest version of JarvisAI 63 | 64 | ### 4.1. Installation- 65 | 66 | * Install the latest version- 67 | 68 | ```bash 69 | pip install JarvisAI 70 | ``` 71 | 72 | #### Optional Steps (Common Installation Issues)- 73 | 74 | * [Optional Step] If Pyaudio is not working or not installed you might need to install it separately- 75 | 76 | In the case of Mac OSX do the following: 77 | 78 | ``` 79 | brew install portaudio 80 | pip install pyaudio 81 | ``` 82 | 83 | In the case of Windows or Linux do the following: 84 | 85 | - Download pyaudio from: lfd.uci.edu/~gohlke/pythonlibs/#pyaudio 86 | 87 | - ```pip install PyAudio-0.2.11-cp310-cp310-win_amd64.whl``` 88 | 89 | * [Optional Step] If pycountry is not working or not installed then Install "python3-pycountry" Package on Ubuntu/Linux- 90 | 91 | ``` 92 | sudo apt-get update -y 93 | sudo apt-get install -y python3-pycountry 94 | ``` 95 | 96 | 97 | * [Optional Step] You might need to Install [Microsoft Visual C++ Redistributable for Visual Studio 2022](https://visualstudio.microsoft.com/downloads/#microsoft-visual-c-redistributable-for-visual-studio-2022) 98 | 99 | ### 4.2. Code You Need- 100 | 101 | You need only this piece of code- 102 | 103 | 104 | def custom_function(*args, **kwargs): 105 | command = kwargs.get('query') 106 | entities = kwargs.get('entities') 107 | print(entities) 108 | # write your code here to do something with the command 109 | # perform some tasks # return is optional 110 | return command + ' Executed' 111 | 112 | 113 | jarvis = JarvisAI.Jarvis(input_mechanism='voice', output_mechanism='both', 114 | google_speech_api_key=None, backend_tts_api='pyttsx3', 115 | use_whisper_asr=False, display_logs=False, 116 | api_key='527557f2-0b67-4500-8ca0-03766ade589a') 117 | # add_action("general", custom_function) # OPTIONAL 118 | jarvis.start() 119 | 120 | 121 | 122 | ### 4.3. **What's now?** 123 | 124 | It will start your AI, it will ask you to give input and accordingly it will produce output. 125 | You can configure `input_mechanism` and `output_mechanism` parameter for voice input/output or text input/output. 126 | 127 | ### 4.4. Let's understand the Parameters- 128 | 129 | ```bash :param input_method: (object) method to get input from user :param output_method: (object) method to give output to user :param api_key: (str) [Default ''] api key to use JarvisAI get it from http://jarvis-ai-api.herokuapp.com :param detect_wake_word: (bool) [Default True] detect wake word or not :param wake_word_detection_method: (object) [Default None] method to detect wake word :param google_speech_recognition_input_lang: (str) [Default 'en'] language of the input Check supported languages here: https://cloud.google.com/speech-to-text/docs/languages :param google_speech_recognition_key: (str) [Default None] api key to use Google Speech API :param google_speech_recognition_duration_listening: (int) [Default 5] duration of the listening 130 | READ MORE: Google Speech API (Pricing and Key) at: https://cloud.google.com/speech-to-text 131 | ``` 132 | ## 5. What it can do (Features it supports)- 133 | 134 | 1. Currently, it supports only english language 135 | 2. Supports voice and text input/output. 136 | 3. Supports AI based voice input (using whisper asr) and by using google api voice input. 137 | 4. All intellectual task is process in JarvisAI server so there is no load on your system. 138 | 5. Lightweight and able to understand natural language (commands) 139 | 6. Ability to add your own custom functions. 140 | 141 | ### 5.1. Supported Commands- 142 | 143 | These are below supported intent that AI can handle, you can ask in natural language. 144 | 145 | **Example- "What is the time now", "make me laugh", "click a photo", etc.** 146 | 147 | **Note: Some features / command might not work. WIP. Tell me bugs.** 148 | 149 | 1. asking time 150 | 2. asking date 151 | 3. greet and hello hi kind of things goodbye 152 | 4. tell me joke 153 | 5. tell me about 154 | 6. i am bored 155 | 7. volume control 156 | 8. tell me news 157 | 9. click photo 158 | 10. places near me 159 | 11. play on youtube 160 | 12. play games 161 | 13. what can you do 162 | 14. send email 163 | 15. download youtube video 164 | 16. asking weather 165 | 17. take screenshot 166 | 18. open website 167 | 19. send whatsapp message 168 | 20. covid cases 169 | 21. check internet speed 170 | 22. others / Unknown Intent (IN PROGRESS) 171 | 172 | 173 | ### 5.2. Supported Input/Output Methods (Which option do I need to choose?)- 174 | 175 | You can set below parameter while creating object of JarvisAI- 176 | 177 | jarvis = JarvisAI.Jarvis(input_mechanism='voice', output_mechanism='both', 178 | google_speech_api_key=None, backend_tts_api='pyttsx3', 179 | use_whisper_asr=False, display_logs=False, 180 | api_key='527557f2-0b67-4500-8ca0-03766ade589a') 181 | 182 | 1. **For text input-**' 183 | 184 | input_mechanism='text' 185 | 186 | 2. **For voice input-** 187 | 188 | input_mechanism='voice' 189 | 190 | 3. **For text output-** 191 | 192 | output_mechanism='text' 193 | 194 | 4. **For voice output-** 195 | 196 | output_mechanism='text' 197 | 198 | 5. **For voice and text output-** 199 | 200 | output_mechanism='both' 201 | 202 | ## 6. Future/Request Features- 203 | **WIP** 204 | **You tell me** 205 | 206 | ## 7. Contribute- 207 | **Instructions Coming Soon** 208 | ## 8. Contact me- 209 | - [Instagram](https://www.instagram.com/dipesh_pal17) 210 | 211 | - [YouTube](https://www.youtube.com/dipeshpal17) 212 | 213 | 214 | 215 | ## 9. Donate- 216 | [Donate and Contribute to run me this project, and buy a domain](https://www.buymeacoffee.com/dipeshpal) 217 | 218 | **_Feel free to use my code, don't forget to mention credit. All the contributors will get credits in this repo._** 219 | **_Mention below line for credits-_** 220 | ***Credits-*** 221 | - [https://jarvis-ai-api.herokuapp.com](https://jarvis-ai-api.herokuapp.com/) 222 | 223 | - [https://github.com/Dipeshpal/Jarvis_AI](https://github.com/Dipeshpal/Jarvis_AI) 224 | 225 | - [https://www.youtube.com/dipeshpal17](https://www.youtube.com/dipeshpal17) 226 | 227 | - [https://www.instagram.com/dipesh_pal17](https://www.instagram.com/dipesh_pal17/) 228 | 229 | 230 | ## 10. Thank me on- 231 | - Follow me on Instagram: [https://www.instagram.com/dipesh_pal17](https://www.instagram.com/dipesh_pal17/) 232 | 233 | - Subscribe me on YouTube: [https://www.youtube.com/dipeshpal17](https://www.youtube.com/dipeshpal17) 234 | 235 | ## License 236 | [MIT](https://choosealicense.com/licenses/mit/) 237 | -------------------------------------------------------------------------------- /cmd_twine.txt: -------------------------------------------------------------------------------- 1 | python setup.py sdist bdist_wheel 2 | twine upload dist/* --skip-existing 3 | pyarmor obfuscate --bootstrap 3 --exact --platform windows.x86_64 --platform windows.x86 --platform linux.x86_64 --platform linux.x86 --platform darwin.x86_64 manager.py 4 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==1.4.0 2 | aiohttp==3.8.3 3 | aiosignal==1.3.1 4 | async-timeout==4.0.2 5 | attrs==22.2.0 6 | beautifulsoup4==4.11.2 7 | blis==0.7.9 8 | catalogue==2.0.8 9 | certifi==2022.12.7 10 | charset-normalizer==2.1.1 11 | click==8.1.3 12 | colorama==0.4.6 13 | comtypes==1.1.14 14 | confection==0.0.4 15 | contourpy==1.0.7 16 | cycler==0.11.0 17 | cymem==2.0.7 18 | EasyProcess==1.1 19 | entrypoint2==1.1 20 | Flask==2.2.2 21 | flatbuffers==23.1.21 22 | fonttools==4.38.0 23 | frozenlist==1.3.3 24 | gTTS==2.3.1 25 | idna==3.4 26 | importlib-metadata==6.0.0 27 | itsdangerous==2.1.2 28 | Jinja2==3.1.2 29 | kiwisolver==1.4.4 30 | langcodes==3.3.0 31 | lazyme==0.0.27 32 | MarkupSafe==2.1.2 33 | matplotlib==3.6.3 34 | mediapipe==0.9.1.0 35 | MouseInfo==0.1.3 36 | mss==7.0.1 37 | multidict==6.0.4 38 | murmurhash==1.0.9 39 | numpy==1.24.2 40 | openai==0.26.4 41 | opencv-contrib-python==4.7.0.68 42 | opencv-python==4.7.0.68 43 | packaging==23.0 44 | pathy==0.10.1 45 | Pillow==9.4.0 46 | playsound==1.3.0 47 | preshed==3.0.8 48 | protobuf==3.20.3 49 | psutil==5.9.4 50 | PyAudio==0.2.13 51 | PyAutoGUI==0.9.53 52 | pycaw==20220416 53 | pycountry==22.3.5 54 | pydantic==1.10.4 55 | PyGetWindow==0.0.9 56 | pyjokes==0.6.0 57 | PyMsgBox==1.0.9 58 | pyparsing==3.0.9 59 | pyperclip==1.8.2 60 | pypiwin32==223 61 | PyRect==0.2.0 62 | pyscreenshot==3.0 63 | PyScreeze==0.1.28 64 | python-dateutil==2.8.2 65 | pyttsx3==2.90 66 | pytube==12.1.2 67 | pytweening==1.0.4 68 | pywhatkit==5.4 69 | pywin32==305 70 | requests==2.28.2 71 | six==1.16.0 72 | smart-open==6.3.0 73 | soupsieve==2.3.2.post1 74 | spacy==3.5.0 75 | spacy-legacy==3.0.12 76 | spacy-loggers==1.0.4 77 | SpeechRecognition==3.9.0 78 | speedtest-cli==2.1.3 79 | srsly==2.4.5 80 | thinc==8.1.7 81 | tqdm==4.64.1 82 | typer==0.7.0 83 | typing_extensions==4.4.0 84 | urllib3==1.26.14 85 | wasabi==1.1.1 86 | Werkzeug==2.2.2 87 | wikipedia==1.4.0 88 | yarl==1.8.2 89 | zipp==3.12.1 90 | sourcedefender==10.0.13 --------------------------------------------------------------------------------