├── JarvisGUI
├── signup.qrc
├── SignUpUI
│ ├── res.qrc
│ ├── requirements.txt
│ ├── SignUpUI.pyproject
│ ├── Resources.qrc
│ ├── dialog.py
│ ├── jarvisMainGUI.py
│ └── form.ui
├── requirements.txt
├── LoginUI
│ ├── requirements.txt
│ ├── LoginUI.pyproject
│ ├── LoginResources.qrc
│ ├── widget.py
│ ├── loginWindowGUI.py
│ └── form.ui
├── NewSignUP
│ ├── requirements.txt
│ ├── NewSignUP.pyproject
│ ├── ButtonResources.qrc
│ ├── ButtonResource.qrc
│ ├── widget.py
│ ├── signUpGUI.py
│ └── form.ui
├── FaceRecogGUI
│ ├── requirements.txt
│ ├── FaceRecogGUI.pyproject
│ ├── GUIResourceFiles.qrc
│ ├── widget.py
│ ├── faceRecogGUI.py
│ └── form.ui
├── FaceRecogForNewUser
│ ├── requirements.txt
│ ├── FaceRecogForNewUser.pyproject
│ ├── NewUserFaceRecResource.qrc
│ ├── widget.py
│ ├── newUserFaceRecGUI.py
│ └── form.ui
├── JarvisImages
│ └── ironman.webp
├── JarvisGUI.pyproject
├── MainFile.qrc
├── dialog.py
├── main.py
├── signUpImpl.py
├── loginWindowMain.py
├── mainFileNew.py
├── form.ui
├── newUserFaceRecGUI.py
├── faceRecogGUI.py
├── newUserPyFaceRecogFile.py
├── signUpGUI.py
├── faceRecog.py
├── loginWindowGUI.py
├── jarvisMainGUI.py
└── jarvisMAIN.py
├── searchFile.py
├── SystemAppsUsingCMD.py
├── selenuimFile.py
├── text2speech.py
├── gptIntegration.py
├── web_gmail.py
├── winsearch.py
├── Email.py
├── WeatherUpdates.py
├── tellmejoke.py
├── objDetecGfg.py
├── LICENSE
├── minimizeWindow.py
├── usingNLPnew.py
├── EngtoTam.py
├── InterfaceGUI.py
├── requirements.txt
├── NewsApi.py
├── ModelTrainer.py
├── increaseVolume.py
├── voice_reg_bg.py
├── snap.py
├── GetRootword.py
├── .gitignore
├── Gesture Control
└── GoogleMaps.py
├── switchVoices.py
├── FingerScroll.py
├── web_apps.py
├── OsFunction.py
├── AdvancedSpeech.py
├── IntegratedFunctional.py
├── speech2text.py
├── objectIdentification.py
├── increaseBrightness.py
├── web_function.py
├── ScheduleGmeet.py
├── youtube.py
├── SpeechAuthentication.py
├── Just_youtube_functionality.py
├── Remainders and Alarms.py
├── web_Youtube.py
├── web_Whatsapp.py
├── utubeVideoDownloader.py
├── web_chrome.py
├── web_edge.py
├── web_firefox.py
├── FaceRecognition.py
├── MAIN.py
└── README.md
/JarvisGUI/signup.qrc:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/JarvisGUI/SignUpUI/res.qrc:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/JarvisGUI/requirements.txt:
--------------------------------------------------------------------------------
1 | PySide6
2 |
--------------------------------------------------------------------------------
/JarvisGUI/LoginUI/requirements.txt:
--------------------------------------------------------------------------------
1 | PySide6
2 |
--------------------------------------------------------------------------------
/JarvisGUI/NewSignUP/requirements.txt:
--------------------------------------------------------------------------------
1 | PySide6
2 |
--------------------------------------------------------------------------------
/JarvisGUI/SignUpUI/requirements.txt:
--------------------------------------------------------------------------------
1 | PySide6
2 |
--------------------------------------------------------------------------------
/JarvisGUI/FaceRecogGUI/requirements.txt:
--------------------------------------------------------------------------------
1 | PySide6
2 |
--------------------------------------------------------------------------------
/JarvisGUI/FaceRecogForNewUser/requirements.txt:
--------------------------------------------------------------------------------
1 | PySide6
2 |
--------------------------------------------------------------------------------
/JarvisGUI/JarvisImages/ironman.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/harriik/Jarvis/HEAD/JarvisGUI/JarvisImages/ironman.webp
--------------------------------------------------------------------------------
/JarvisGUI/LoginUI/LoginUI.pyproject:
--------------------------------------------------------------------------------
1 | {
2 | "files": [
3 | "widget.py",
4 | "form.ui",
5 | "LoginResources.qrc"
6 | ]
7 | }
8 |
--------------------------------------------------------------------------------
/JarvisGUI/SignUpUI/SignUpUI.pyproject:
--------------------------------------------------------------------------------
1 | {
2 | "files": [
3 | "Resources.qrc",
4 | "dialog.py",
5 | "form.ui"
6 | ]
7 | }
8 |
--------------------------------------------------------------------------------
/JarvisGUI/NewSignUP/NewSignUP.pyproject:
--------------------------------------------------------------------------------
1 | {
2 | "files": [
3 | "widget.py",
4 | "form.ui",
5 | "ButtonResource.qrc"
6 | ]
7 | }
8 |
--------------------------------------------------------------------------------
/JarvisGUI/FaceRecogGUI/FaceRecogGUI.pyproject:
--------------------------------------------------------------------------------
1 | {
2 | "files": [
3 | "widget.py",
4 | "form.ui",
5 | "GUIResourceFiles.qrc"
6 | ]
7 | }
8 |
--------------------------------------------------------------------------------
/JarvisGUI/FaceRecogForNewUser/FaceRecogForNewUser.pyproject:
--------------------------------------------------------------------------------
1 | {
2 | "files": [
3 | "widget.py",
4 | "form.ui",
5 | "NewUserFaceRecResource.qrc"
6 | ]
7 | }
8 |
--------------------------------------------------------------------------------
/JarvisGUI/JarvisGUI.pyproject:
--------------------------------------------------------------------------------
1 | {
2 | "files": [
3 | "MainFile.qrc",
4 | "dialog.py",
5 | "form.ui",
6 | "mainFile.qrc",
7 | "signup.qrc"
8 | ]
9 | }
10 |
--------------------------------------------------------------------------------
/JarvisGUI/NewSignUP/ButtonResources.qrc:
--------------------------------------------------------------------------------
1 |
2 |
3 | ../JarvisImages/backButton.png
4 | ../JarvisImages/exitButton.png
5 | ../JarvisImages/signUp.png
6 |
7 |
8 |
--------------------------------------------------------------------------------
/JarvisGUI/FaceRecogGUI/GUIResourceFiles.qrc:
--------------------------------------------------------------------------------
1 |
2 |
3 | ../JarvisImages/exitButton.png
4 | ../JarvisImages/ironmanSidePose.jpg
5 | ../JarvisImages/loginButton.png
6 |
7 |
8 |
--------------------------------------------------------------------------------
/JarvisGUI/SignUpUI/Resources.qrc:
--------------------------------------------------------------------------------
1 |
2 |
3 | ../JarvisImages/exitButton.png
4 | ../JarvisImages/listening.gif
5 | ../JarvisImages/startButton.png
6 | ../JarvisImages/voicerecog.gif
7 |
8 |
9 |
--------------------------------------------------------------------------------
/JarvisGUI/FaceRecogForNewUser/NewUserFaceRecResource.qrc:
--------------------------------------------------------------------------------
1 |
2 |
3 | ../JarvisImages/capture.png
4 | ../JarvisImages/exitButton.png
5 | ../JarvisImages/loginButton.png
6 | ../JarvisImages/backButton.png
7 |
8 |
9 |
--------------------------------------------------------------------------------
/searchFile.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | import pyautogui as pg
3 | import time
4 | def open_windows_search(name):
5 | try:
6 | subprocess.run(["start", "search-ms:"], shell=True)
7 | time.sleep(1)
8 | pg.write(name)
9 | time.sleep(1)
10 | pg.press('enter')
11 | except Exception as e:
12 | print("Error:", e)
13 |
--------------------------------------------------------------------------------
/JarvisGUI/LoginUI/LoginResources.qrc:
--------------------------------------------------------------------------------
1 |
2 |
3 | ../JarvisImages/backButton.png
4 | ../JarvisImages/exitButton.png
5 | ../JarvisImages/retryButton.png
6 | ../JarvisImages/loginButton.png
7 | ../JarvisImages/newUser.png
8 |
9 |
10 |
--------------------------------------------------------------------------------
/JarvisGUI/MainFile.qrc:
--------------------------------------------------------------------------------
1 |
2 |
3 | JarvisImages/backButton.png
4 | JarvisImages/exitButton.png
5 | JarvisImages/loginButton.png
6 | JarvisImages/logo.png
7 | JarvisImages/retryButton.png
8 | JarvisImages/startButton.png
9 |
10 |
11 |
--------------------------------------------------------------------------------
/JarvisGUI/NewSignUP/ButtonResource.qrc:
--------------------------------------------------------------------------------
1 |
2 |
3 | ../JarvisImages/backButton.png
4 | ../JarvisImages/blkimg.png
5 | ../JarvisImages/exitButton.png
6 | ../JarvisImages/newUser.png
7 | ../JarvisImages/retryButton.png
8 | ../JarvisImages/signUp.png
9 |
10 |
11 |
--------------------------------------------------------------------------------
/SystemAppsUsingCMD.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 |
3 | def list_installed_apps_windows():
4 | result = subprocess.run(['powershell', 'Get-AppxPackage'], capture_output=True, text=True)
5 | if result.returncode == 0:
6 | return result.stdout.split('\n')
7 | else:
8 | return []
9 |
10 | # Example usage
11 | installed_apps = list_installed_apps_windows()
12 | file = open("App list.txt","w")
13 | for app in installed_apps:
14 | file.write(app)
15 | file.write("\n")
16 | print(app)
17 |
--------------------------------------------------------------------------------
/selenuimFile.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 |
3 | def run_selenium_side(file_path):
4 | # Path to the selenium-side-runner executable
5 | runner_path = "/path/to/selenium-side-runner"
6 |
7 | # Command to run the .side file
8 | command = [runner_path, file_path]
9 |
10 | # Execute the command
11 | subprocess.run(command)
12 |
13 | if __name__ == "__main__":
14 | # Specify the path to your .side file
15 | side_file_path = "/path/to/your/file.side"
16 |
17 | # Call the function to run the .side file
18 | run_selenium_side(side_file_path)
19 |
--------------------------------------------------------------------------------
/text2speech.py:
--------------------------------------------------------------------------------
1 | def switch_voice(engine,voice_id):
2 | voices = engine.getProperty('voices')
3 | for voice in voices:
4 | if voice.id == voice_id:
5 | engine.setProperty('voice', voice.id)
6 | break
7 | if voice_id == 'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_DAVID_11.0':
8 |
9 | text2speech(engine,"Voice change to Male")
10 | else:
11 | text2speech(engine,"Voice change to Female")
12 |
13 | def text2speech(engine,text):
14 | user_input = text
15 | engine.say(user_input)
16 | engine.runAndWait()
17 |
--------------------------------------------------------------------------------
/JarvisGUI/dialog.py:
--------------------------------------------------------------------------------
1 | # This Python file uses the following encoding: utf-8
2 | import sys
3 |
4 | from PySide6.QtWidgets import QApplication, QDialog
5 |
6 | # Important:
7 | # You need to run the following command to generate the ui_form.py file
8 | # pyside6-uic form.ui -o ui_form.py, or
9 | # pyside2-uic form.ui -o ui_form.py
10 | from ui_form import Ui_Dialog
11 |
12 | class Dialog(QDialog):
13 | def __init__(self, parent=None):
14 | super().__init__(parent)
15 | self.ui = Ui_Dialog()
16 | self.ui.setupUi(self)
17 |
18 |
19 | if __name__ == "__main__":
20 | app = QApplication(sys.argv)
21 | widget = Dialog()
22 | widget.show()
23 | sys.exit(app.exec())
24 |
--------------------------------------------------------------------------------
/JarvisGUI/LoginUI/widget.py:
--------------------------------------------------------------------------------
1 | # This Python file uses the following encoding: utf-8
2 | import sys
3 |
4 | from PySide6.QtWidgets import QApplication, QWidget
5 |
6 | # Important:
7 | # You need to run the following command to generate the ui_form.py file
8 | # pyside6-uic form.ui -o ui_form.py, or
9 | # pyside2-uic form.ui -o ui_form.py
10 | from ui_form import Ui_Widget
11 |
12 | class Widget(QWidget):
13 | def __init__(self, parent=None):
14 | super().__init__(parent)
15 | self.ui = Ui_Widget()
16 | self.ui.setupUi(self)
17 |
18 |
19 | if __name__ == "__main__":
20 | app = QApplication(sys.argv)
21 | widget = Widget()
22 | widget.show()
23 | sys.exit(app.exec())
24 |
--------------------------------------------------------------------------------
/JarvisGUI/SignUpUI/dialog.py:
--------------------------------------------------------------------------------
1 | # This Python file uses the following encoding: utf-8
2 | import sys
3 |
4 | from PySide6.QtWidgets import QApplication, QDialog
5 |
6 | # Important:
7 | # You need to run the following command to generate the ui_form.py file
8 | # pyside6-uic form.ui -o ui_form.py, or
9 | # pyside2-uic form.ui -o ui_form.py
10 | from ui_form import Ui_Dialog
11 |
12 | class Dialog(QDialog):
13 | def __init__(self, parent=None):
14 | super().__init__(parent)
15 | self.ui = Ui_Dialog()
16 | self.ui.setupUi(self)
17 |
18 |
19 | if __name__ == "__main__":
20 | app = QApplication(sys.argv)
21 | widget = Dialog()
22 | widget.show()
23 | sys.exit(app.exec())
24 |
--------------------------------------------------------------------------------
/JarvisGUI/FaceRecogGUI/widget.py:
--------------------------------------------------------------------------------
1 | # This Python file uses the following encoding: utf-8
2 | import sys
3 |
4 | from PySide6.QtWidgets import QApplication, QWidget
5 |
6 | # Important:
7 | # You need to run the following command to generate the ui_form.py file
8 | # pyside6-uic form.ui -o ui_form.py, or
9 | # pyside2-uic form.ui -o ui_form.py
10 | from ui_form import Ui_Widget
11 |
12 | class Widget(QWidget):
13 | def __init__(self, parent=None):
14 | super().__init__(parent)
15 | self.ui = Ui_Widget()
16 | self.ui.setupUi(self)
17 |
18 |
19 | if __name__ == "__main__":
20 | app = QApplication(sys.argv)
21 | widget = Widget()
22 | widget.show()
23 | sys.exit(app.exec())
24 |
--------------------------------------------------------------------------------
/JarvisGUI/NewSignUP/widget.py:
--------------------------------------------------------------------------------
1 | # This Python file uses the following encoding: utf-8
2 | import sys
3 |
4 | from PySide6.QtWidgets import QApplication, QWidget
5 |
6 | # Important:
7 | # You need to run the following command to generate the ui_form.py file
8 | # pyside6-uic form.ui -o ui_form.py, or
9 | # pyside2-uic form.ui -o ui_form.py
10 | from ui_form import Ui_Widget
11 |
12 | class Widget(QWidget):
13 | def __init__(self, parent=None):
14 | super().__init__(parent)
15 | self.ui = Ui_Widget()
16 | self.ui.setupUi(self)
17 |
18 |
19 | if __name__ == "__main__":
20 | app = QApplication(sys.argv)
21 | widget = Widget()
22 | widget.show()
23 | sys.exit(app.exec())
24 |
--------------------------------------------------------------------------------
/JarvisGUI/FaceRecogForNewUser/widget.py:
--------------------------------------------------------------------------------
1 | # This Python file uses the following encoding: utf-8
2 | import sys
3 |
4 | from PySide6.QtWidgets import QApplication, QWidget
5 |
6 | # Important:
7 | # You need to run the following command to generate the ui_form.py file
8 | # pyside6-uic form.ui -o ui_form.py, or
9 | # pyside2-uic form.ui -o ui_form.py
10 | from ui_form import Ui_Widget
11 |
12 | class Widget(QWidget):
13 | def __init__(self, parent=None):
14 | super().__init__(parent)
15 | self.ui = Ui_Widget()
16 | self.ui.setupUi(self)
17 |
18 |
19 | if __name__ == "__main__":
20 | app = QApplication(sys.argv)
21 | widget = Widget()
22 | widget.show()
23 | sys.exit(app.exec())
24 |
--------------------------------------------------------------------------------
/gptIntegration.py:
--------------------------------------------------------------------------------
1 | import openai
2 | import speech2text as s2t
3 |
4 | openai.api_key = 'API_KEY' #replace 'API_KEY' with your own OpenAI API key
5 |
6 | def chat():
7 | print("\nSay 'stop listening' to stop")
8 | while True:
9 | text = input("You: ")
10 |
11 | if not text:
12 | continue
13 |
14 | if text.lower() == 'stop listening':
15 | break
16 |
17 | response = openai.ChatCompletion.create(
18 | model="gpt-3.5-turbo",
19 | messages=[
20 | {"role": "system", "content": "Your name is Jarvis and you're a virtual assistant."},
21 | {"role": "user", "content": text},
22 | ]
23 | )
24 |
25 | print(response['choices'][0]['message']['content'])
26 |
27 |
--------------------------------------------------------------------------------
/web_gmail.py:
--------------------------------------------------------------------------------
1 | import pyautogui as pg
2 | import text2speech as t2s
3 | import speech2text as s2t
4 | import pyttsx3
5 | engine = pyttsx3.init()
6 | import time
7 |
8 | # Open Gmail in a web browser
9 |
10 | def main():
11 | pg.hotkey('win', 'r')
12 | pg.write('chrome https://mail.google.com/mail/u/0/#inbox')
13 | pg.press('enter')
14 | pg.hotkey('win','up')
15 | time.sleep(5)
16 | while True:
17 | user_input = (s2t.voice2text("en")).lower()
18 | print(user_input)
19 | if user_input:
20 | if "stop" in user_input or "close youtube" in user_input:
21 | print("Exiting program...")
22 | t2s.text2speech(engine, "thank you sir")
23 | return
24 | else:
25 | return user_input
26 |
27 |
--------------------------------------------------------------------------------
/winsearch.py:
--------------------------------------------------------------------------------
1 | import pyautogui as pg
2 | import time
3 | from fuzzywuzzy import fuzz
4 | import subprocess
5 | import psutil
6 |
7 | def os_open(app):
8 | pg.press('win')
9 | time.sleep(1)
10 | pg.write(app)
11 | pg.write(" ")
12 | time.sleep(1)
13 | pg.press('enter')
14 | time.sleep(1)
15 |
16 | #maximize
17 | pg.hotkey('win','up')
18 |
19 | def close_application(app):
20 | for proc in psutil.process_iter(['pid', 'name']):
21 | # Check if the process name contains "app"
22 | if app in proc.info['name'].lower():
23 | # Terminate the process
24 | proc.terminate()
25 | print("App terminated successfully.")
26 | return
27 | print("App not found.")
28 |
29 | running_processes = psutil.process_iter()
30 |
--------------------------------------------------------------------------------
/Email.py:
--------------------------------------------------------------------------------
1 | import smtplib
2 | from email.message import EmailMessage
3 |
4 | def send_email(receiver_email, subject, body):
5 | # SMTP Configuration
6 | smtp_server = 'smtp.gmail.com'
7 | port = 587
8 | sender_email = 'yourmailid@gmail.com' #replace your mail id
9 | password = 'your_generated_app_password'
10 |
11 | # Send email
12 | try:
13 | with smtplib.SMTP(smtp_server, port) as server:
14 | server.starttls()
15 | server.login(sender_email, password)
16 |
17 | msg = EmailMessage()
18 | msg.set_content(body)
19 | msg['Subject'] = subject
20 | msg['From'] = sender_email
21 | msg['To'] = receiver_email
22 |
23 | server.send_message(msg)
24 | print("Email sent successfully!")
25 | except Exception as e:
26 | print(f"An error occurred: {e}")
27 |
--------------------------------------------------------------------------------
/WeatherUpdates.py:
--------------------------------------------------------------------------------
1 | import requests
2 |
3 | def get_weather(city_name):
4 | api_key = 'YOUR_API_KEY' # Replace 'YOUR_API_KEY' with your actual API key from OpenWeatherMap
5 | url = f'http://api.openweathermap.org/data/2.5/weather?q={city_name}&appid={api_key}&units=metric'
6 |
7 | response = requests.get(url)
8 | data = response.json()
9 |
10 | if data['cod'] == 200:
11 | weather_description = data['weather'][0]['description']
12 | temperature = data['main']['temp']
13 | humidity = data['main']['humidity']
14 |
15 |
16 |
17 | wind_speed = data['wind']['speed']
18 |
19 | print(f"Weather in {city_name}:")
20 | print(f"Description: {weather_description}")
21 | print(f"Temperature: {temperature}°C")
22 | print(f"Humidity: {humidity}%")
23 | print(f"Wind Speed: {wind_speed} m/s")
24 | else:
25 | print("Error: City not found")
26 |
27 |
--------------------------------------------------------------------------------
/tellmejoke.py:
--------------------------------------------------------------------------------
1 | import speech_recognition as sr
2 | import pyjokes
3 |
4 | def recognize_speech():
5 | recognizer = sr.Recognizer()
6 | with sr.Microphone() as source:
7 | print("Listening...")
8 | recognizer.adjust_for_ambient_noise(source)
9 | audio = recognizer.listen(source)
10 |
11 | try:
12 | text = recognizer.recognize_google(audio).lower()
13 | return text
14 | except sr.UnknownValueError:
15 | return ""
16 | except sr.RequestError:
17 | print("Speech recognition service unavailable")
18 | return ""
19 |
20 | def tell_joke():
21 | joke = pyjokes.get_joke()
22 | print(joke)
23 |
24 | while True:
25 | user_input = input("You (type or say 'tell me a joke'): ").lower()
26 |
27 | if "tell me a joke" in user_input:
28 | tell_joke()
29 | elif "tell me a joke" in recognize_speech():
30 | tell_joke()
31 | elif user_input == "exit":
32 | print("Exiting...")
33 | break
34 | else:
35 | print("I'm sorry, I didn't understand. You can ask me to tell you a joke!")
36 |
--------------------------------------------------------------------------------
/objDetecGfg.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | from matplotlib import pyplot as plt
3 |
4 | # Opening image
5 | img = cv2.imread("image.jpg")
6 |
7 | # OpenCV opens images as BRG
8 | # but we want it as RGB We'll
9 | # also need a grayscale version
10 | img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
11 | img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
12 |
13 |
14 | # Use minSize because for not
15 | # bothering with extra-small
16 | # dots that would look like STOP signs
17 | stop_data = cv2.CascadeClassifier('stop_data.xml')
18 |
19 | found = stop_data.detectMultiScale(img_gray,
20 | minSize =(20, 20))
21 |
22 | # Don't do anything if there's
23 | # no sign
24 | amount_found = len(found)
25 |
26 | if amount_found != 0:
27 |
28 | # There may be more than one
29 | # sign in the image
30 | for (x, y, width, height) in found:
31 |
32 | # We draw a green rectangle around
33 | # every recognized sign
34 | cv2.rectangle(img_rgb, (x, y),
35 | (x + height, y + width),
36 | (0, 255, 0), 5)
37 |
38 | # Creates the environment of
39 | # the picture and shows it
40 | plt.subplot(1, 1, 1)
41 | plt.imshow(img_rgb)
42 | plt.show()
43 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 JARVIS AI Assistant
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/minimizeWindow.py:
--------------------------------------------------------------------------------
1 | import speech_recognition as sr
2 | import pyautogui
3 |
4 | def recognize_speech():
5 | recognizer = sr.Recognizer()
6 | with sr.Microphone() as source:
7 | print("Listening...")
8 | recognizer.adjust_for_ambient_noise(source) # Adjust for ambient noise
9 | audio = recognizer.listen(source)
10 |
11 | try:
12 | text = recognizer.recognize_google(audio).lower()
13 | return text
14 | except sr.UnknownValueError:
15 | return "" # Return empty string if speech is not recognized
16 | except sr.RequestError:
17 | print("Speech recognition service unavailable")
18 | return ""
19 |
20 | def minimize_window():
21 | pyautogui.hotkey('win', 'down') # Minimize window shortcut
22 |
23 | def main():
24 | while True:
25 | user_input = recognize_speech()
26 |
27 | if "minimize window" in user_input:
28 | minimize_window()
29 | elif user_input == "exit":
30 | print("Exiting...")
31 | break
32 | else:
33 | print("Command not recognized. Please try again.")
34 |
35 | if __name__ == "__main__":
36 | main()
37 |
--------------------------------------------------------------------------------
/usingNLPnew.py:
--------------------------------------------------------------------------------
1 | from sklearn.feature_extraction.text import TfidfVectorizer
2 | from sklearn.svm import LinearSVC
3 |
4 | # Sample dataset
5 | sentences = [
6 | "Could you please open the Chrome browser?",
7 | "Please launch Google Chrome.",
8 | "Can you start the browser?",
9 | "Open Firefox, please.",
10 | "Start the web browser, please.",
11 | ]
12 | # Corresponding commands
13 | commands = [
14 | "open chrome",
15 | "open chrome",
16 | "open chrome",
17 | "open firefox",
18 | "open chrome",
19 | ]
20 |
21 | # Feature extraction
22 | vectorizer = TfidfVectorizer(stop_words='english')
23 | print("vec :",vectorizer)
24 | X = vectorizer.fit_transform(sentences)
25 | print("x",X)
26 |
27 | # Training the model
28 | model = LinearSVC()
29 | model.fit(X, commands)
30 |
31 | # New sentence to predict
32 | new_sentence = "Could you open google?"
33 |
34 | # Transform the new sentence using the same vectorizer
35 | new_sentence_vectorized = vectorizer.transform([new_sentence])
36 | print("new sentence",new_sentence_vectorized)
37 | # Predict the corresponding command
38 | predicted_command = model.predict(new_sentence_vectorized)
39 | print("Predicted command:", predicted_command)
40 |
--------------------------------------------------------------------------------
/EngtoTam.py:
--------------------------------------------------------------------------------
1 | import speech_recognition as sr
2 | from googletrans import Translator
3 |
4 | def tamil_speech_to_text():
5 | recognizer = sr.Recognizer()
6 | with sr.Microphone() as source:
7 | print("Speak something in Tamil...")
8 | recognizer.adjust_for_ambient_noise(source)
9 | audio = recognizer.listen(source)
10 |
11 | try:
12 | print("Transcribing...")
13 | # Using Google Speech Recognition for Tamil
14 | text = recognizer.recognize_google(audio, language="ta-IN")
15 | print("You said:", text)
16 | return text
17 | except sr.UnknownValueError:
18 | print("Sorry, I couldn't understand what you said.")
19 | except sr.RequestError as e:
20 | print("Could not request results from Google Speech Recognition service; {0}".format(e))
21 |
22 | def translate_tamil_to_english(text):
23 | translator = Translator()
24 | translated_text = translator.translate(text, src='ta', dest='en')
25 | return translated_text.text
26 |
27 | if __name__ == "__main__":
28 | text=tamil_speech_to_text()
29 | tamil_text = text
30 | translated_text = translate_tamil_to_english(tamil_text)
31 | print("Translated text:", translated_text)
32 |
--------------------------------------------------------------------------------
/InterfaceGUI.py:
--------------------------------------------------------------------------------
1 | import speech_recognition as sr
2 | import pyttsx3
3 | import pygame
4 | import threading
5 |
6 | def speech_to_text():
7 | recognizer = sr.Recognizer()
8 | with sr.Microphone() as source:
9 | print("Listening...")
10 | recognizer.adjust_for_ambient_noise(source)
11 | audio = recognizer.listen(source)
12 | try:
13 | text = recognizer.recognize_google(audio)
14 | print("You said:", text)
15 | return text.lower()
16 | except sr.UnknownValueError:
17 | print("Sorry, I could not understand what you said.")
18 | return ""
19 |
20 | def show_animation():
21 | pygame.init()
22 | pygame.mixer.init()
23 | pygame.mixer.music.load("windows_notification.mp3") # You need to replace "windows_notify.wav" with the path to your Windows notification sound file
24 | pygame.mixer.music.play()
25 | pygame.mixer.music.set_volume(1.0)
26 |
27 | print("Animation triggered!")
28 |
29 | def main():
30 | while True:
31 | text = speech_to_text()
32 | if "hey jarvis" in text:
33 | thread = threading.Thread(target=show_animation)
34 | thread.start()
35 | thread.join()
36 |
37 | if __name__ == "__main__":
38 | main()
39 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # Core GUI Framework
2 | PyQt5>=5.15.0
3 | PyQt6>=6.0.0
4 |
5 | # Computer Vision and Image Processing
6 | opencv-python>=4.5.0
7 | face-recognition>=1.3.0
8 | dlib>=19.22.0
9 | Pillow>=8.3.0
10 |
11 | # Speech Recognition and Synthesis
12 | SpeechRecognition>=3.8.1
13 | pyttsx3>=2.90
14 | pyaudio>=0.2.11
15 |
16 | # Web Automation and Scraping
17 | selenium>=4.0.0
18 | requests>=2.26.0
19 | beautifulsoup4>=4.10.0
20 | webdriver-manager>=3.8.0
21 |
22 | # WhatsApp and Communication
23 | pywhatkit>=5.3
24 | pyautogui>=0.9.53
25 |
26 | # API and Data Processing
27 | numpy>=1.21.0
28 | pandas>=1.3.0
29 | matplotlib>=3.4.0
30 |
31 | # News and Weather APIs
32 | newsapi-python>=0.2.6
33 | python-dotenv>=0.19.0
34 |
35 | # Email functionality
36 | smtplib # Built-in
37 | email-validator>=1.1.3
38 |
39 | # Audio and Media
40 | pygame>=2.0.0
41 | mutagen>=1.45.0
42 |
43 | # YouTube functionality
44 | pytube>=11.0.0
45 | youtube-dl>=2021.6.6
46 |
47 | # Google Services
48 | google-api-python-client>=2.0.0
49 | google-auth-httplib2>=0.1.0
50 | google-auth-oauthlib>=0.5.0
51 |
52 | # System Integration
53 | psutil>=5.8.0
54 | keyboard>=0.13.5
55 | mouse>=0.7.1
56 |
57 | # Natural Language Processing
58 | nltk>=3.6.0
59 | textblob>=0.15.3
60 |
61 | # JSON and Configuration
62 | jsonschema>=3.2.0
63 |
64 | # Date and Time
65 | python-dateutil>=2.8.0
66 | schedule>=1.1.0
67 |
68 | # Gesture Recognition
69 | mediapipe>=0.8.7
--------------------------------------------------------------------------------
/JarvisGUI/main.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from PyQt5.QtWidgets import QDialog, QApplication
3 | from PyQt5 import QtGui
4 | from mainFileNew import Ui_Dialog
5 |
6 | class mainFileNew(QDialog):
7 | def __init__(self):
8 | super(mainFileNew, self).__init__()
9 | print("Setting up GUI")
10 | self.firstUI = Ui_Dialog()
11 | self.firstUI.setupUi(self)
12 |
13 | self.firstUI.movie = QtGui.QMovie("D:\BSc CSD Sem 6\Project\JarvisGUI\JarvisImages\samplegui3.gif")
14 | self.firstUI.gif1.setMovie(self.firstUI.movie)
15 | self.firstUI.movie.start()
16 |
17 | self.firstUI.exitBtn.clicked.connect(self.close)
18 | self.firstUI.startBtn.clicked.connect(self.connectToFaceRecognition)
19 | self.firstUI.loginBtn.clicked.connect(self.connectToLoginPage)
20 |
21 |
22 | def connectToFaceRecognition(self):
23 | from faceRecog import faceRecog
24 | self.showFaceRecogWindow = faceRecog()
25 | self.close() # Close the current dialog
26 | self.showFaceRecogWindow.show()
27 |
28 | def connectToLoginPage(self):
29 | from loginWindowMain import loginWindow
30 | self.showLoginWindow = loginWindow()
31 | self.close() # Close the current dialog
32 | self.showLoginWindow.show()
33 |
34 | if __name__ == "__main__":
35 | app = QApplication(sys.argv)
36 | ui = mainFileNew()
37 | ui.show()
38 | sys.exit(app.exec_())
39 |
--------------------------------------------------------------------------------
/NewsApi.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import pyttsx3
3 |
4 | def fetch_news(api_key, source):
5 | url = f"https://newsapi.org/v2/top-headlines?sources={source}&apiKey={api_key}"
6 | response = requests.get(url)
7 | data = response.json()
8 | return data
9 |
10 | def speak(text):
11 | engine = pyttsx3.init()
12 | voices = engine.getProperty('voices')
13 | engine.setProperty('voice', voices[1].id)
14 | engine.say(text)
15 | engine.runAndWait()
16 |
17 | def main():
18 | api_key = "NEWS_API" #Replace 'NEWS_API' with your own API
19 |
20 | # Enter the news source you want to fetch news from
21 | news_source = "the-hindu" # Example: "bbc-news", "cnn", "the-verge", etc.
22 |
23 | # Fetch news
24 | news_data = fetch_news(api_key, news_source)
25 |
26 | # Check if news fetching was successful
27 | if news_data['status'] == 'ok':
28 | articles = news_data['articles']
29 | if articles:
30 | speak("Here are the latest headlines.")
31 | for index, article in enumerate(articles, 1):
32 | title = article['title']
33 | description = article['description']
34 | speak(f"Headline {index}: {title}. {description}")
35 | else:
36 | speak("No news articles found.")
37 | else:
38 | speak("Sorry, I couldn't fetch the news at the moment.")
39 |
40 | if __name__ == "__main__":
41 | main()
42 |
--------------------------------------------------------------------------------
/ModelTrainer.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | from PIL import Image #pillow package
4 | import os
5 |
6 | path = 'samples' # Path for samples already taken
7 |
8 | recognizer = cv2.face.LBPHFaceRecognizer_create() # Local Binary Patterns Histograms
9 | detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
10 | #Haar Cascade classifier is an effective object detection approach
11 |
12 |
13 | def Images_And_Labels(path): # function to fetch the images and labels
14 |
15 | imagePaths = [os.path.join(path,f) for f in os.listdir(path)]
16 | faceSamples=[]
17 | ids = []
18 |
19 | for imagePath in imagePaths: # to iterate particular image path
20 |
21 | gray_img = Image.open(imagePath).convert('L') # convert it to grayscale
22 | img_arr = np.array(gray_img,'uint8') #creating an array
23 |
24 | id = int(os.path.split(imagePath)[-1].split(".")[1])
25 | faces = detector.detectMultiScale(img_arr)
26 |
27 | for (x,y,w,h) in faces:
28 | faceSamples.append(img_arr[y:y+h,x:x+w])
29 | ids.append(id)
30 |
31 | return faceSamples,ids
32 |
33 | print ("Training faces. It will take a few seconds. Wait ...")
34 |
35 | faces,ids = Images_And_Labels(path)
36 | recognizer.train(faces, np.array(ids))
37 |
38 | recognizer.write('trainer/trainer.yml') # Save the trained model as trainer.yml
39 |
40 | print("Model trained, Now we can recognize your face.")
41 |
--------------------------------------------------------------------------------
/increaseVolume.py:
--------------------------------------------------------------------------------
1 | import platform
2 | import ctypes
3 | from ctypes import cast, POINTER
4 | from comtypes import CLSCTX_ALL
5 | from pycaw.pycaw import AudioUtilities, IAudioEndpointVolume
6 |
7 | def adjust_volume(change):
8 | if platform.system() == 'Windows':
9 | # Get the default audio endpoint
10 | devices = AudioUtilities.GetSpeakers()
11 | interface = devices.Activate(
12 | IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
13 | volume = cast(interface, POINTER(IAudioEndpointVolume))
14 |
15 | # Get the current volume range
16 | volume_range = volume.GetVolumeRange()
17 | min_volume, max_volume, _ = volume_range
18 |
19 | # Get the current volume level
20 | current_volume = volume.GetMasterVolumeLevelScalar()
21 |
22 | # Calculate the new volume level
23 | new_volume = min(1.0, max(0.0, current_volume + change / 100.0))
24 |
25 | # Set the new volume level
26 | volume.SetMasterVolumeLevelScalar(new_volume, None)
27 | print(f"Adjusted volume by {change} points.")
28 |
29 | else:
30 | print("Adjusting volume is not supported on this platform.")
31 |
32 | def process_command(command):
33 | if "increase" in command:
34 | adjust_volume(10) # Increase volume by 10%
35 | elif "decrease" in command:
36 | adjust_volume(-10) # Decrease volume by 10%
37 | else:
38 | print("Command not recognized.")
39 |
40 |
--------------------------------------------------------------------------------
/voice_reg_bg.py:
--------------------------------------------------------------------------------
1 | import speech_recognition as sr
2 | import schedule
3 | import datetime
4 | import pyttsx3
5 |
6 | def speak(text):
7 | engine = pyttsx3.init()
8 | engine.say(text)
9 | engine.runAndWait()
10 |
11 | def set_alarm(alarm_time):
12 | def job():
13 | speak("Wake up! It's time.")
14 |
15 | schedule.every().day.at(alarm_time).do(job)
16 |
17 | def listen():
18 | recognizer = sr.Recognizer()
19 | with sr.Microphone() as source:
20 | # Adjust microphone sensitivity to reduce background noise
21 | recognizer.adjust_for_ambient_noise(source)
22 |
23 | print("Listening...")
24 | audio = recognizer.listen(source)
25 |
26 | try:
27 | print("Recognizing...")
28 | # Recognize speech from the audio
29 | query = recognizer.recognize_google(audio, language='en-in')
30 | print(f"User said: {query}\n")
31 | return query.lower()
32 | except sr.UnknownValueError:
33 | speak("Sorry, I could not understand what you said.")
34 | return ""
35 | except sr.RequestError as e:
36 | speak("Sorry, I am facing some technical issues.")
37 | return ""
38 |
39 | if __name__ == "__main__":
40 | speak("Hello! I am your desktop voice assistant.")
41 | while True:
42 | speak("What would you like me to do?")
43 | command = listen()
44 | if command:
45 | process_command(command)
46 |
--------------------------------------------------------------------------------
/snap.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import pyautogui
3 |
4 | def detect_snapping():
5 | cap = cv2.VideoCapture(0) # Open the default camera (usually the webcam)
6 |
7 | if not cap.isOpened():
8 | print("Error: Unable to open webcam.")
9 | return
10 |
11 | print("Listening for snapping sound... Press 'q' to quit.")
12 |
13 | prev_frame = None
14 |
15 | while True:
16 | ret, frame = cap.read()
17 |
18 | if not ret:
19 | print("Error: Unable to capture frame.")
20 | break
21 |
22 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
23 |
24 | if prev_frame is not None:
25 | # Calculate absolute difference between current and previous frame
26 | diff = cv2.absdiff(gray, prev_frame)
27 | mean_diff = diff.mean()
28 |
29 | # If the mean difference exceeds a threshold, consider it as snapping
30 | if mean_diff > 10: # Adjust threshold as needed
31 | print("Snapping detected!")
32 | minimize_window()
33 | break
34 |
35 | prev_frame = gray
36 |
37 | cv2.imshow("Frame", frame)
38 |
39 | if cv2.waitKey(1) & 0xFF == ord('q'):
40 | break
41 |
42 | cap.release()
43 | cv2.destroyAllWindows()
44 |
45 | def minimize_window():
46 | # Simulate keyboard shortcut to minimize the window
47 | pyautogui.hotkey('winleft', 'down')
48 |
49 | if __name__ == "__main__":
50 | detect_snapping()
51 |
--------------------------------------------------------------------------------
/GetRootword.py:
--------------------------------------------------------------------------------
1 | import nltk
2 | from nltk.corpus import wordnet
3 | from nltk.corpus import stopwords
4 | from nltk.tokenize import word_tokenize
5 |
6 |
7 |
8 | def remove_stopwords(cmd): # stop word remover
9 |
10 | words = word_tokenize(cmd)
11 | stop_words = set(stopwords.words('english'))
12 | filtered_words = [word for word in words if word.lower() not in stop_words]
13 | filtered_text = ' '.join(filtered_words)
14 | print("Original sentence:", cmd)
15 | print("After removing stopwords:", filtered_text)
16 | return filtered_text
17 |
18 |
19 | def get_similar_words(word):
20 | print("Getting similar words ... ")
21 | synonyms = [word]
22 | for syn in wordnet.synsets(word):
23 | for lemma in syn.lemmas():
24 | synonyms.append(lemma.name())
25 | print("similar wors for ",word," is ",synonyms)
26 | return synonyms
27 |
28 |
29 | def get_root_word(similarwords):
30 | print("get root word for the similar words",similarwords[0])
31 | lst = ["open", "start","close", "chrome", "camera"]
32 | for word in lst:
33 | if word in similarwords:
34 | print(word)
35 | return word
36 |
37 |
38 | def main():
39 | cmd = input("Enter the cmd ")
40 | print(cmd)
41 | word_af_remove = remove_stopwords(cmd)
42 | words = word_af_remove.split()
43 | rootword = []
44 | for word in words:
45 | similarwords = get_similar_words(word)
46 | rootword.append(get_root_word(similarwords))
47 | print(rootword)
48 |
49 |
50 | main()
51 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | HEAD
2 | # Ignore virtual environments
3 | venv/
4 | .idea/
5 | __pycache__/
6 | *.pyc
7 | *.log
8 | rev1.txt
9 |
10 | # Ignore sensitive files
11 | credentials.json
12 | old_credentials.json
13 | token.json
14 | contacts.csv
15 | user_data.csv
16 | dataset.csv
17 | os_dataset.csv
18 | web_Dataset.csv
19 | PyWhatKit_DB.txt
20 |
21 | # Ignore media files and big files
22 | *.jpg
23 | *.jpeg
24 | *.png
25 | *.gif
26 | *.wav
27 | *.mp3
28 | *.mp4
29 | *.avi
30 | Downloads/
31 | Videos/
32 | Utube_Videos_Downloaded/
33 | JarvisGUI/FaceRecogGUI/faceImages/
34 | JarvisGUI/FaceRecogGUI/videoss/
35 |
36 | # Ignore development files
37 | seleniumTestings/
38 | Report/
39 | *.side
40 | *.pyproject.user
41 | loading.py
42 | sample.py
43 | chumma.py
44 | chumma2.py
45 |
46 | # Ignore virtual environments
47 | venv/
48 | .idea/
49 | __pycache__/
50 | *.pyc
51 | *.log
52 | rev1.txt
53 |
54 | # Ignore sensitive files
55 | credentials.json
56 | old_credentials.json
57 | token.json
58 | contacts.csv
59 | user_data.csv
60 | dataset.csv
61 | os_dataset.csv
62 | web_Dataset.csv
63 | PyWhatKit_DB.txt
64 |
65 | # Ignore media files and big files
66 | *.jpg
67 | *.jpeg
68 | *.png
69 | *.gif
70 | *.wav
71 | *.mp3
72 | *.mp4
73 | *.avi
74 | Downloads/
75 | Videos/
76 | Utube_Videos_Downloaded/
77 | JarvisGUI/FaceRecogGUI/faceImages/
78 | JarvisGUI/FaceRecogGUI/videoss/
79 | JarvisGUI/.qtcreator/
80 | JarvisGUI/FaceRecogGUI/.qtcreator/
81 | JarvisGUI/LoginUI/.qtcreator/
82 | **/.qtcreator/
83 |
84 |
85 | # Ignore development files
86 | seleniumTestings/
87 | Report/
88 | *.side
89 | *.pyproject.user
90 | loading.py
91 | sample.py
92 | chumma.py
93 | chumma2.py
94 |
95 |
--------------------------------------------------------------------------------
/Gesture Control/GoogleMaps.py:
--------------------------------------------------------------------------------
1 | import googlemaps
2 | from datetime import datetime
3 |
4 | def get_directions(api_key, origin, destination, mode='driving', departure_time=None):
5 | """
6 | Get directions and navigation information using Google Maps API.
7 |
8 | Parameters:
9 | - api_key: Your Google Maps API key.
10 | - origin: Starting location.
11 | - destination: Ending location.
12 | - mode: Mode of transportation (default is 'driving'). Options: 'driving', 'walking', 'bicycling', 'transit'.
13 | - departure_time: Time of departure (optional, default is current time).
14 |
15 | Returns:
16 | - Directions and navigation information as a dictionary.
17 | """
18 | gmaps = googlemaps.Client(key=api_key)
19 |
20 | directions_result = gmaps.directions(origin,
21 | destination,
22 | mode=mode,
23 | departure_time=departure_time)
24 |
25 | return directions_result
26 |
27 | def main():
28 | # Replace 'YOUR_API_KEY' with your actual Google Maps API key
29 | api_key = 'YOUR_API_KEY'
30 |
31 | origin = input("Enter the starting location: ")
32 | destination = input("Enter the destination: ")
33 |
34 | # You can change the mode of transportation if needed
35 | mode = 'driving'
36 |
37 | # Get directions
38 | directions = get_directions(api_key, origin, destination, mode)
39 |
40 | # Print out the steps in the directions
41 | print("Directions:")
42 | for step in directions[0]['legs'][0]['steps']:
43 | print(step['html_instructions'])
44 |
45 | if __name__ == "__main__":
46 | main()
47 |
--------------------------------------------------------------------------------
/switchVoices.py:
--------------------------------------------------------------------------------
1 | import speech_recognition as sr
2 | import pyttsx3
3 |
4 | def recognize_speech():
5 | recognizer = sr.Recognizer()
6 | with sr.Microphone() as source:
7 | print("Listening...")
8 | recognizer.adjust_for_ambient_noise(source) # Adjust for ambient noise
9 | audio = recognizer.listen(source)
10 |
11 | try:
12 | text = recognizer.recognize_google(audio).lower()
13 | return text
14 | except sr.UnknownValueError:
15 | return "" # Return empty string if speech is not recognized
16 | except sr.RequestError:
17 | print("Speech recognition service unavailable")
18 | return ""
19 |
20 | def switch_voice(engine, voice_id):
21 | # Get the list of available voices
22 | voices = engine.getProperty('voices')
23 | for voice in voices:
24 | if voice.id == voice_id:
25 | engine.setProperty('voice', voice.id)
26 | break
27 |
28 | def main():
29 | engine = pyttsx3.init()
30 |
31 | while True:
32 | user_input = recognize_speech()
33 |
34 | if "switch to jarvis" in user_input:
35 | switch_voice(engine, 'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_DAVID_11.0')
36 | engine.say("Voice switched to male.")
37 | engine.runAndWait()
38 | elif "switch to friday" in user_input:
39 | switch_voice(engine, 'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_ZIRA_11.0')
40 | engine.say("Voice switched to female.")
41 | engine.runAndWait()
42 | elif user_input == "exit":
43 | print("Exiting...")
44 | break
45 | else:
46 | engine.say("I'm sorry, I didn't understand.")
47 | engine.runAndWait()
48 |
49 | if __name__ == "__main__":
50 | main()
51 |
--------------------------------------------------------------------------------
/FingerScroll.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | import pyautogui
4 |
5 | # Function to perform scrolling based on hand gesture
6 | def perform_scroll():
7 | pyautogui.scroll(10) # Scroll up by 10 units (you can adjust this value)
8 |
9 | # Main function to capture video and detect hand gestures
10 | def main():
11 | cap = cv2.VideoCapture(0)
12 |
13 | while True:
14 | ret, frame = cap.read()
15 | if not ret:
16 | print("Unable to capture video.")
17 | break
18 |
19 | # Convert the frame to grayscale
20 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
21 |
22 | # Apply Gaussian blur
23 | blurred = cv2.GaussianBlur(gray, (15, 15), 0)
24 |
25 | # Detect edges using Canny
26 | edges = cv2.Canny(blurred, 50, 150)
27 |
28 | # Find contours
29 | contours, _ = cv2.findContours(edges.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
30 |
31 | # Check if any contours are found
32 | if len(contours) > 0:
33 | # Get the largest contour
34 | max_contour = max(contours, key=cv2.contourArea)
35 |
36 | # Get the bounding box of the contour
37 | x, y, w, h = cv2.boundingRect(max_contour)
38 |
39 | # Check if the bounding box is big enough to be considered a hand
40 | if w * h > 10000: # Adjust this threshold according to your needs
41 | # Draw bounding box around the hand
42 | cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
43 |
44 | # Check if hand is in top portion of the frame (for scroll up)
45 | if y < frame.shape[0] // 2:
46 | perform_scroll()
47 |
48 | cv2.imshow('Frame', frame)
49 |
50 | # Break the loop if 'q' is pressed
51 | if cv2.waitKey(1) & 0xFF == ord('q'):
52 | break
53 |
54 | # Release the capture
55 | cap.release()
56 | cv2.destroyAllWindows()
57 |
58 | if __name__ == "__main__":
59 | main()
60 |
--------------------------------------------------------------------------------
/web_apps.py:
--------------------------------------------------------------------------------
1 | import pyautogui as pg
2 | import text2speech as t2s
3 | import speech2text as s2t
4 | import pyttsx3
5 | engine = pyttsx3.init()
6 | import time
7 | # Open Google Maps in a web browser
8 | def open_google_maps():
9 | pg.hotkey('win', 'r')
10 | pg.write('chrome https://www.google.com/maps')
11 | pg.press('enter')
12 |
13 | # Open Google Calendar in a web browser
14 | def open_google_calendar():
15 | pg.hotkey('win', 'r')
16 | pg.write('chrome https://calendar.google.com')
17 | pg.press('enter')
18 |
19 | # Open LinkedIn in a web browser
20 | def open_linkedin():
21 | pg.hotkey('win', 'r')
22 | pg.write('chrome https://www.linkedin.com')
23 | pg.press('enter')
24 |
25 | # Open Telegram desktop app
26 | def open_telegram():
27 | pg.hotkey('win', 'r')
28 | # pg.write('telegram')
29 | pg.write('chrome https://web.telegram.org/k/')
30 | pg.press('enter')
31 |
32 | # Open OneDrive in a web browser
33 | def open_onedrive():
34 | pg.hotkey('win', 'r')
35 | pg.write('chrome https://onedrive.live.com')
36 | pg.press('enter')
37 |
38 | # Open GitHub in a web browser
39 | def open_github():
40 | pg.hotkey('win', 'r')
41 | pg.write('chrome https://github.com')
42 | pg.press('enter')
43 |
44 |
45 | # Open Google Drive in a web browser
46 | def open_google_drive():
47 | pg.hotkey('win', 'r')
48 | pg.write('chrome https://drive.google.com')
49 | pg.press('enter')
50 |
51 | # Open ChatGPT web interface
52 | def open_chatgpt():
53 | pg.hotkey('win', 'r')
54 | pg.write('chrome https://www.chatgpt.com')
55 | pg.press('enter')
56 |
57 | # Open Telegram web interface
58 | def open_telegram_web():
59 | pg.hotkey('win', 'r')
60 | pg.write('chrome https://web.telegram.org')
61 | pg.press('enter')
62 |
63 | # Open Microsoft Teams desktop app
64 | def open_teams():
65 | pg.hotkey('win')
66 | pg.write('teams')
67 | pg.press('enter')
68 |
69 | # Open Spotify desktop app
70 | def open_spotify():
71 | pg.hotkey('win', 'r')
72 | pg.write('spotify')
73 | pg.press('enter')
74 |
75 |
--------------------------------------------------------------------------------
/OsFunction.py:
--------------------------------------------------------------------------------
1 | import pyautogui as pg
2 | import time
3 | from fuzzywuzzy import fuzz
4 | import subprocess
5 | import psutil
6 |
7 | def os_open(app):
8 | pg.press('win')
9 | time.sleep(1)
10 | pg.write(app)
11 | time.sleep(1)
12 | pg.press('enter')
13 | time.sleep(1)
14 |
15 | pg.hotkey('win','up')
16 |
17 | def close_application(app):
18 | running_processes = psutil.process_iter()
19 | for process in running_processes:
20 | ratio = fuzz.ratio((process.name()).replace(".exe",""), app)
21 | if ratio >= 70: # You can adjust the threshold as needed
22 | pname = process.name()
23 | print("Strings are approximately the same.")
24 | else:
25 | print("Strings are different.")
26 | try:
27 | # Run taskkill command to close the application
28 | result = subprocess.run(['taskkill', '/F', '/IM', pname], capture_output=True, text=True)
29 | # Check if the command was successful
30 | if result.returncode == 0:
31 | print(f"Successfully closed {app}.")
32 | else:
33 | print(f"Failed to close {app}.")
34 | print("Error:", result.stderr)
35 | except Exception as e:
36 | print("An error occurred:", e)
37 |
38 | def minimize(app):
39 | running_processes = psutil.process_iter()
40 | pname=""
41 | for process in running_processes:
42 |
43 | ratio = fuzz.ratio((process.name()).replace(".exe",""), app)
44 | if ratio >= 50: # You can adjust the threshold as needed
45 | pname = process.name()
46 | print("Strings are approximately the same.")
47 | break
48 | else:
49 | print("Strings are different.")
50 | if pname != "":
51 |
52 | try:
53 | # Run taskkill command to close the application
54 | powershell_cmd = "$wshell = New-Object -ComObject wscript.shell;$wshell.AppActivate('"+pname+"');sleep 1;$wshell.SendKeys('%{SPACE}n')"
55 | subprocess.run(["powershell", "-Command", powershell_cmd])
56 |
57 | except Exception as e:
58 | print("An error occurred:", e)
59 |
--------------------------------------------------------------------------------
/AdvancedSpeech.py:
--------------------------------------------------------------------------------
1 | import pyttsx3
2 | import speech_recognition as sr
3 | import csv
4 | from sklearn.feature_extraction.text import TfidfVectorizer
5 | from sklearn.svm import LinearSVC
6 | import subprocess
7 |
8 | recognizer = sr.Recognizer()
9 |
10 | # Initialize the pyttsx3 engine
11 | engine = pyttsx3.init()
12 |
13 | # Function to speak text
14 | def speak(text):
15 | engine.say(text)
16 | engine.runAndWait()
17 |
18 | # Load data from CSV
19 | sentences = []
20 | commands = []
21 |
22 | with open('os_dataset.csv', 'r', newline='') as file:
23 | csv_reader = csv.reader(file)
24 | next(csv_reader) # Skip header
25 | for row in csv_reader:
26 | sentences.append(row[0])
27 | commands.append(row[1])
28 |
29 | # Feature extraction
30 | vectorizer = TfidfVectorizer(stop_words='english')
31 | X = vectorizer.fit_transform(sentences)
32 |
33 | # Training the model
34 | model = LinearSVC()
35 | model.fit(X, commands)
36 |
37 | # New sentence to predict
38 | while True:
39 | text = ""
40 | with sr.Microphone() as source:
41 | print("Speak something...")
42 | # Adjust for ambient noise if needed
43 | recognizer.adjust_for_ambient_noise(source)
44 |
45 | # Listen for the audio input
46 | audio = recognizer.listen(source)
47 |
48 | try:
49 | print("Transcribing...")
50 | text = recognizer.recognize_google(audio)
51 | print("You said:", text)
52 | speak(text) # Speak back the user's input
53 |
54 | except sr.UnknownValueError:
55 | print("Sorry, could not understand audio.")
56 | except sr.RequestError as e:
57 | print("Error fetching results; {0}".format(e))
58 |
59 | # Transform the new sentence using the same vectorizer
60 | new_sentence_vectorized = vectorizer.transform([text])
61 |
62 | # Predict the corresponding commandExactly exactly
63 | predicted_command = model.predict(new_sentence_vectorized)
64 | print("Predicted command:", predicted_command)
65 | cmd = predicted_command[0]
66 | cmd = cmd.replace("open_", "")
67 | command = "start " + cmd
68 | print("Command: ", command)
69 | # subprocess.run(command, shell=True)
70 |
--------------------------------------------------------------------------------
/IntegratedFunctional.py:
--------------------------------------------------------------------------------
1 | import Email
2 | import WeatherUpdates
3 | import speech2text
4 | import utubeVideoDownloader
5 | import gptIntegration
6 | import ScheduleGmeet
7 |
8 | print("\nStarted listening... Say 'exit' to stop")
9 |
10 | def main():
11 | while True:
12 | user_input = speechRecog.voice()
13 | user_input=user_input.lower()
14 | if not user_input:
15 | continue
16 |
17 | if "send email" in user_input:
18 | print("Recipient's Email:")
19 | receiver_email = speechRecog.voice().lower().replace(" ", "")
20 | while not receiver_email:
21 | receiver_email = speechRecog.voice().lower().replace(" ", "")
22 | print("Receiver:", receiver_email)
23 | print("Subject:")
24 | subject = speechRecog.voice()
25 | while not subject:
26 | subject = speechRecog.voice()
27 | print("Body:")
28 | body = speechRecog.voice()
29 | while not body:
30 | body = speechRecog.voice()
31 | Email.send_email(receiver_email, subject, body)
32 |
33 | elif "check weather" in user_input:
34 | city_name = speechRecog.voice()
35 | while not city_name:
36 | print("Which city do you want to check weather for?")
37 | city_name = speechRecog.voice()
38 | city_name.strip()
39 | if city_name.lower() == 'exit':
40 | break
41 | else:
42 | WeatherUpdates.get_weather(city_name)
43 |
44 | elif "download youtube video" in user_input:
45 | url = input("Enter the URL of the video: ")
46 | save_path = input("Enter the path to save the video: ")
47 | # save_path="Videos"
48 | utubeVideoDownloader.download_video(url, save_path)
49 |
50 | elif "go to interactive mode" in user_input:
51 | gptIntegration.chat()
52 |
53 | elif "schedule a meet" in user_input:
54 | ScheduleGmeet.main()
55 |
56 | elif "exit" in user_input:
57 | break
58 |
59 | else:
60 | print("Invalid command. Please try again.")
61 |
62 | if __name__ == "__main__":
63 | main()
64 |
65 |
--------------------------------------------------------------------------------
/speech2text.py:
--------------------------------------------------------------------------------
1 | import speech_recognition as sr
2 | from googletrans import Translator
3 |
4 | def translate_tamil_to_english(text):
5 | translator = Translator()
6 | translated_text = translator.translate(text, src='ta', dest='en')
7 | return translated_text.text
8 |
9 | def tamil_speech_to_text():
10 | text=""
11 | while(text==""):
12 |
13 | recognizer = sr.Recognizer()
14 | with sr.Microphone() as source:
15 | print("Speak something in Tamil...")
16 | recognizer.adjust_for_ambient_noise(source)
17 | audio = recognizer.listen(source)
18 |
19 |
20 | try:
21 | print("Transcribing...")
22 | # Using Google Speech Recognition for Tamil
23 | text = recognizer.recognize_google(audio, language="ta-IN")
24 | print("You said:", text)
25 | text = translate_tamil_to_english(text)
26 | print(text)
27 | return text
28 | except sr.UnknownValueError:
29 | print("Sorry, I couldn't understand what you said.")
30 | text= ""
31 | except sr.RequestError as e:
32 | print("Could not request results from Google Speech Recognition service; {0}".format(e))
33 | text= ""
34 |
35 | def english_speech_to_text():
36 | text=""
37 | while text=="":
38 | recognizer = sr.Recognizer()
39 | with sr.Microphone() as source:
40 | print("Speak something ")
41 | recognizer.adjust_for_ambient_noise(source)
42 | audio = recognizer.listen(source)
43 |
44 | try:
45 | print("Transcribing...")
46 | # Using Google Speech Recognition for Tamil
47 | text = recognizer.recognize_google(audio, language="en")
48 | print("You said:", text)
49 | return text
50 | except sr.UnknownValueError:
51 | print("Sorry, I couldn't understand what you said.")
52 | text=""
53 | except sr.RequestError as e:
54 | print("Could not request results from Google Speech Recognition service; {0}".format(e))
55 | text=""
56 |
57 |
58 | def voice2text(ln):
59 | if ln=="ta":
60 | text = tamil_speech_to_text()
61 | else:
62 | text = english_speech_to_text()
63 | return text
64 |
65 |
66 |
--------------------------------------------------------------------------------
/objectIdentification.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | from tensorflow.keras.applications import MobileNetV2
4 | from tensorflow.keras.applications.mobilenet_v2 import preprocess_input, decode_predictions
5 | import pyttsx3
6 |
7 | # Load pre-trained MobileNetV2 model
8 | model = MobileNetV2(weights="imagenet")
9 |
10 | # Initialize text-to-speech engine
11 | engine = pyttsx3.init()
12 |
13 | # Function to recognize object from image
14 | def recognize_object(image):
15 | # Preprocess image
16 | image = cv2.resize(image, (224, 224))
17 | image = np.expand_dims(image, axis=0)
18 | image = preprocess_input(image)
19 |
20 | # Predict object classes
21 | predictions = model.predict(image)
22 | results = decode_predictions(predictions)
23 |
24 | # Return top prediction
25 | return results[0][0][1]
26 |
27 | # Function to respond to user query about calories
28 | def respond_to_query(object_name):
29 | # Dummy calorie information (replace with real data)
30 | calorie_info = {
31 | "coke": 140,
32 | "banana": 105,
33 | "apple": 95,
34 | # Add more items as needed
35 | }
36 |
37 | # Get calorie information for the recognized object
38 | calories = calorie_info.get(object_name.lower())
39 |
40 | # Respond with calorie information
41 | if calories is not None:
42 | response = f"The {object_name} has {calories} calories."
43 | else:
44 | response = f"Sorry, I don't have calorie information for {object_name}."
45 |
46 | # Speak the response
47 | engine.say(response)
48 | engine.runAndWait()
49 |
50 | # Initialize camera
51 | cap = cv2.VideoCapture(0)
52 |
53 | while True:
54 | ret, frame = cap.read()
55 |
56 | # Show live camera feed
57 | cv2.imshow('Camera Feed', frame)
58 |
59 | # Press 'q' to quit
60 | if cv2.waitKey(1) & 0xFF == ord('q'):
61 | break
62 |
63 | # Detect object and respond to query if 'c' is pressed
64 | if cv2.waitKey(1) & 0xFF == ord('c'):
65 | object_name = recognize_object(frame)
66 | respond_to_query(object_name)
67 |
68 | # Display text on the frame
69 | cv2.putText(frame, f"Object: {object_name}", (20, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
70 | cv2.imshow('Object Details', frame)
71 | cv2.waitKey(0) # Wait for a key press before moving to the next frame
72 |
73 | # Release camera and close windows
74 | cap.release()
75 | cv2.destroyAllWindows()
76 |
--------------------------------------------------------------------------------
/increaseBrightness.py:
--------------------------------------------------------------------------------
1 | import platform
2 |
3 | def increase_brightness():
4 | if platform.system() == 'Windows':
5 | # Adjust screen brightness on Windows
6 | try:
7 | import wmi
8 |
9 | brightness = wmi.WMI(namespace='wmi')
10 | methods = brightness.WmiMonitorBrightnessMethods()[0]
11 | methods.WmiSetBrightness(80, 0) # Change 80 to the desired brightness level (0-100)
12 | print("Brightness increased.")
13 | except Exception as e:
14 | print("Failed to increase brightness:", e)
15 |
16 | elif platform.system() == 'Darwin':
17 | # Adjust screen brightness on macOS
18 | try:
19 | import screenbrightness
20 |
21 | current_brightness = screenbrightness.get_brightness()
22 | new_brightness = min(1.0, max(0.0, current_brightness + 0.1)) # Increase brightness by 0.1
23 | screenbrightness.set_brightness(new_brightness)
24 | print("Brightness increased.")
25 | except Exception as e:
26 | print("Failed to increase brightness:", e)
27 |
28 | else:
29 | print("Adjusting brightness is not supported on this platform.")
30 |
31 | def decrease_brightness():
32 | if platform.system() == 'Windows':
33 | # Adjust screen brightness on Windows
34 | try:
35 | import wmi
36 |
37 | brightness = wmi.WMI(namespace='wmi')
38 | methods = brightness.WmiMonitorBrightnessMethods()[0]
39 | methods.WmiSetBrightness(20, 0) # Change 20 to the desired brightness level (0-100)
40 | print("Brightness decreased.")
41 | except Exception as e:
42 | print("Failed to decrease brightness:", e)
43 |
44 | elif platform.system() == 'Darwin':
45 | # Adjust screen brightness on macOS
46 | try:
47 | import screenbrightness
48 |
49 | current_brightness = screenbrightness.get_brightness()
50 | new_brightness = min(1.0, max(0.0, current_brightness - 0.1)) # Decrease brightness by 0.1
51 | screenbrightness.set_brightness(new_brightness)
52 | print("Brightness decreased.")
53 | except Exception as e:
54 | print("Failed to decrease brightness:", e)
55 |
56 | else:
57 | print("Adjusting brightness is not supported on this platform.")
58 |
59 | # Test the functions
60 | # command = input("Enter your command: ").lower()
61 |
62 |
63 |
--------------------------------------------------------------------------------
/web_function.py:
--------------------------------------------------------------------------------
1 | import pyautogui as pg
2 |
3 | import text2speech
4 | import web_apps as wa
5 | import web_Whatsapp
6 | import web_Youtube
7 | import web_chrome
8 | import web_edge
9 | import web_firefox
10 | import web_gmail
11 | import pyttsx3
12 | engine = pyttsx3.init()
13 |
14 | def web_open(app,input_str):
15 |
16 | if "whatsapp" in app:
17 | print(app)
18 | ip = web_Whatsapp.main()
19 | return ip
20 |
21 | if "youtube" in app:
22 | print(app)
23 | ip = web_Youtube.main(input_str)
24 | return ip
25 |
26 | if "chrome" in app:
27 | print(app)
28 | ip = web_chrome.main(input_str)
29 | return ip
30 |
31 | if "edge" in app:
32 | print(app)
33 | ip = web_edge.main(input_str)
34 | return ip
35 |
36 | if "firefox" in app:
37 | try:
38 | ip = web_firefox.main(input_str) # This line calls web_firefox function with input_str as argument
39 | return ip # This line returns the value of ip
40 | except Exception as e: # This line captures any exception raised within the try block
41 | print("FireFox is not in this system", e)
42 | text2speech.text2speech(engine, "FireFox is not available")
43 |
44 | if "gmail" in app:
45 | print(app)
46 | ip = web_gmail.main()
47 | return ip
48 |
49 | if "telegram" in app:
50 | print(app)
51 | wa.open_telegram()
52 | return
53 |
54 | if "google calendar" in app:
55 | print(app)
56 | wa.open_google_calendar()
57 | return
58 |
59 | if "google maps" in app:
60 | print(app)
61 | wa.open_google_maps()
62 | return
63 |
64 | if "one drive" in app:
65 | print(app)
66 | wa.open_onedrive()
67 | return
68 |
69 | if "google drive" in app:
70 | print(app)
71 | wa.open_google_drive()
72 |
73 | if "teams" in app:
74 | print(app)
75 | wa.open_teams()
76 | return
77 |
78 | if "spotify" in app:
79 | print(app)
80 | wa.open_spotify()
81 | return
82 |
83 | if "chat gpt" in app:
84 | print(app)
85 | wa.open_chatgpt()
86 | return
87 |
88 | if "linkedin" in app:
89 | print(app)
90 | wa.open_linkedin()
91 | return
92 |
93 | if "github" in app:
94 | print(app)
95 | wa.open_github()
96 | return
97 |
--------------------------------------------------------------------------------
/ScheduleGmeet.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import datetime
3 | import os.path
4 | from googleapiclient.discovery import build
5 | from google_auth_oauthlib.flow import InstalledAppFlow
6 | from google.auth.transport.requests import Request
7 | from google.oauth2.credentials import Credentials
8 |
9 | # If modifying these scopes, delete the file token.json.
10 | SCOPES = ['https://www.googleapis.com/auth/calendar']
11 |
12 | def main():
13 | """Shows basic usage of the Google Calendar API.
14 | Prints the start and name of the next 10 events on the user's calendar.
15 | """
16 | creds = None
17 | # The file token.json stores the user's access and refresh tokens, and is
18 | # created automatically when the authorization flow completes for the first
19 | # time.
20 | if os.path.exists('token.json'):
21 | creds = Credentials.from_authorized_user_file('token.json')
22 | # If there are no (valid) credentials available, let the user log in.
23 | if not creds or not creds.valid:
24 | if creds and creds.expired and creds.refresh_token:
25 | creds.refresh(Request())
26 | else:
27 | flow = InstalledAppFlow.from_client_secrets_file(
28 | 'credentials.json', SCOPES)
29 | creds = flow.run_local_server(port=0)
30 | # Save the credentials for the next run
31 | with open('token.json', 'w') as token:
32 | token.write(creds.to_json())
33 |
34 | service = build('calendar', 'v3', credentials=creds)
35 |
36 | # Call the Calendar API
37 | now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
38 | end_time = (datetime.datetime.utcnow() + datetime.timedelta(hours=1)).isoformat() + 'Z'
39 | event = {
40 | 'summary': 'Google Meet',
41 | 'description': 'Meeting description',
42 | 'start': {
43 | 'dateTime': now,
44 | 'timeZone': 'UTC',
45 | },
46 | 'end': {
47 | 'dateTime': end_time,
48 | 'timeZone': 'UTC',
49 | },
50 | 'conferenceData': {
51 | 'createRequest': {
52 | 'requestId': '7qxalsvy0e'
53 | }
54 | },
55 | }
56 |
57 | event = service.events().insert(calendarId='primary', body=event, conferenceDataVersion=1).execute()
58 | print('Event created: %s' % event.get('htmlLink'))
59 |
60 | # Print Google Meet joining info
61 | print('Google Meet joining info:')
62 | print('Meeting Link:', event.get('hangoutLink'))
63 |
--------------------------------------------------------------------------------
/youtube.py:
--------------------------------------------------------------------------------
1 | import speech_recognition as sr
2 | from selenium import webdriver
3 | from selenium.webdriver.common.keys import Keys
4 | import time
5 |
6 | def recognize_input():
7 | recognizer = sr.Recognizer()
8 |
9 | with sr.Microphone() as source:
10 | recognizer.adjust_for_ambient_noise(source, duration=2)
11 | recognizer.pause_threshold = 1.5
12 |
13 | print("Listening... Say something:")
14 | try:
15 | audio = recognizer.listen(source, timeout=10)
16 | text = recognizer.recognize_google(audio)
17 | print(f"User (Voice): {text}")
18 | return text.lower()
19 | except sr.UnknownValueError:
20 | print("Sorry, I couldn't understand the audio.")
21 | return None
22 | except sr.RequestError as e:
23 | print(f"Could not request results from Google Speech Recognition service; {e}")
24 | return None
25 |
26 | def open_google():
27 | print("Opening Google...")
28 | driver = webdriver.Chrome() # You need to have the ChromeDriver executable in your PATH or specify its path here
29 | driver.get("https://www.google.com/")
30 | return driver
31 |
32 | def open_youtube(driver):
33 | print("Opening YouTube...")
34 | driver.get("https://www.youtube.com/")
35 |
36 | def search_youtube(driver, query):
37 | search_box = driver.find_element("name", "search_query")
38 | search_box.clear() # Clear the search box
39 | search_box.send_keys(query)
40 | search_box.send_keys(Keys.RETURN)
41 |
42 | # Main program
43 | web_driver = None
44 | while True:
45 | user_input = recognize_input()
46 |
47 | if user_input:
48 | if "stop" in user_input:
49 | print("Exiting program...")
50 | break
51 | elif "open chrome" in user_input:
52 | web_driver = open_google()
53 | elif "open youtube" in user_input:
54 | web_driver = open_google()
55 | if web_driver:
56 | open_youtube(web_driver)
57 | else:
58 | print("Please open a web page first, for example, say 'open Chrome'.")
59 | elif "search for" in user_input and web_driver:
60 | query_start_index = user_input.find("search for") + len("search for")
61 | search_query = user_input[query_start_index:].strip()
62 | print(f"Searching YouTube for: {search_query}")
63 | search_youtube(web_driver, search_query)
64 | else:
65 | print("Invalid command. Please say 'open Google', 'open YouTube', or 'search for [query]'.")
66 |
--------------------------------------------------------------------------------
/SpeechAuthentication.py:
--------------------------------------------------------------------------------
1 | import pyaudio
2 | import wave
3 | import speech_recognition as sr
4 | import os
5 |
6 | # Function to train a new user's voice and store it
7 | def train_voice(username):
8 | CHUNK = 1024
9 | FORMAT = pyaudio.paInt16
10 | CHANNELS = 1
11 | RATE = 44100
12 | RECORD_SECONDS = 5
13 | WAVE_OUTPUT_FILENAME = f"{username}_voice_sample.wav"
14 |
15 | audio = pyaudio.PyAudio()
16 |
17 | # Start recording
18 | print("Please say something to train your voice.")
19 | stream = audio.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK)
20 | frames = []
21 | for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
22 | data = stream.read(CHUNK)
23 | frames.append(data)
24 | print("Voice trained successfully.")
25 |
26 | # Stop recording and save voice sample
27 | stream.stop_stream()
28 | stream.close()
29 | audio.terminate()
30 |
31 | with wave.open(WAVE_OUTPUT_FILENAME, 'wb') as wf:
32 | wf.setnchannels(CHANNELS)
33 | wf.setsampwidth(audio.get_sample_size(FORMAT))
34 | wf.setframerate(RATE)
35 | wf.writeframes(b''.join(frames))
36 |
37 | # Function to authenticate the user's voice during login
38 | def authenticate_voice():
39 | recognizer = sr.Recognizer()
40 | with sr.Microphone() as source:
41 | print("Please say something to authenticate.")
42 | audio = recognizer.listen(source)
43 |
44 | # Compare the recorded voice with stored voice samples
45 | for file in os.listdir("."):
46 | if file.endswith("_voice_sample.wav"):
47 | with sr.AudioFile(file) as f:
48 | recorded_audio = recognizer.record(f)
49 | try:
50 | recognized_text = recognizer.recognize_google(recorded_audio)
51 | # You can use a more sophisticated voice recognition algorithm here
52 | # For simplicity, we're just comparing the recognized text
53 | if recognized_text == "hello": # Assuming the passphrase is "hello"
54 | username = file.split("_")[0]
55 | print(f"Hello welcome back {username}")
56 | return
57 | except sr.UnknownValueError:
58 | pass
59 |
60 | print("Voice authentication failed.")
61 |
62 | # Example usage
63 | def main():
64 | # Train voice for a new user
65 | train_voice("John")
66 |
67 | # Authenticate voice during login
68 | authenticate_voice()
69 |
70 | if __name__ == "__main__":
71 | main()
72 |
--------------------------------------------------------------------------------
/Just_youtube_functionality.py:
--------------------------------------------------------------------------------
1 | import speech_recognition as sr
2 | from selenium import webdriver
3 | from selenium.webdriver.common.keys import Keys
4 | import time
5 |
6 | def recognize_input():
7 | recognizer = sr.Recognizer()
8 |
9 | with sr.Microphone() as source:
10 | recognizer.adjust_for_ambient_noise(source, duration=2)
11 | recognizer.pause_threshold = 1.5
12 |
13 | print("Listening... Say something:")
14 | try:
15 | audio = recognizer.listen(source, timeout=10)
16 | text = recognizer.recognize_google(audio)
17 | print(f"User (Voice): {text}")
18 | return text.lower()
19 | except sr.UnknownValueError:
20 | print("Sorry, I couldn't understand the audio.")
21 | return None
22 | except sr.RequestError as e:
23 | print(f"Could not request results from Google Speech Recognition service; {e}")
24 | return None
25 |
26 | def open_google():
27 | print("Opening Google...")
28 | driver = webdriver.Chrome() # You need to have the ChromeDriver executable in your PATH or specify its path here
29 | driver.get("https://www.google.com/")
30 | return driver
31 |
32 | def open_youtube(driver):
33 | print("Opening YouTube...")
34 | driver.get("https://www.youtube.com/")
35 |
36 | def search_youtube(driver, query):
37 | search_box = driver.find_element("name", "search_query")
38 | search_box.clear() # Clear the search box
39 | search_box.send_keys(query)
40 | search_box.send_keys(Keys.RETURN)
41 |
42 | # Main program
43 | web_driver = None
44 | while True:
45 | user_input = recognize_input()
46 |
47 | if user_input:
48 | if "stop" in user_input:
49 | print("Exiting program...")
50 | break
51 | elif "open chrome" in user_input:
52 | web_driver = open_google()
53 | elif "open youtube" in user_input:
54 | web_driver = open_google()
55 | if web_driver:
56 | open_youtube(web_driver)
57 | else:
58 | print("Please open a web page first, for example, say 'open Chrome'.")
59 | elif "search for" in user_input and web_driver:
60 | query_start_index = user_input.find("search for") + len("search for")
61 | search_query = user_input[query_start_index:].strip()
62 | print(f"Searching YouTube for: {search_query}")
63 | search_youtube(web_driver, search_query)
64 | else:
65 | print("Invalid command. Please say 'open Google', 'open YouTube', or 'search for [query]'.")
66 |
--------------------------------------------------------------------------------
/Remainders and Alarms.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import speech_recognition as sr
4 | import pyttsx3
5 | import datetime
6 | import re
7 | import time
8 |
9 | def speak(text):
10 | engine = pyttsx3.init()
11 | engine.say(text)
12 | engine.runAndWait()
13 |
14 | def listen():
15 | recognizer = sr.Recognizer()
16 | with sr.Microphone() as source:
17 | print("Listening...")
18 | recognizer.pause_threshold = 1
19 | audio = recognizer.listen(source)
20 | try:
21 | print("Recognizing...")
22 | query = recognizer.recognize_google(audio, language='en-in')
23 | print(f"User said: {query}\n")
24 | return query.lower()
25 | except sr.UnknownValueError:
26 | speak("Sorry, I could not understand what you said.")
27 | return ""
28 | except sr.RequestError as e:
29 | speak("Sorry, I am facing some technical issues.")
30 | return ""
31 |
32 | def set_alarm():
33 | speak("What time would you like to set the alarm for?")
34 | alarm_time = listen()
35 | if alarm_time:
36 | # Using regular expression to extract time from user's input
37 | match = re.search(r'\b\d{1,2}:\d{2} [ap]m\b', alarm_time)
38 | if match:
39 | alarm_time = match.group()
40 | try:
41 | alarm_time = datetime.datetime.strptime(alarm_time, "%I:%M %p")
42 | current_time = datetime.datetime.now()
43 | alarm_time = alarm_time.replace(year=current_time.year, month=current_time.month, day=current_time.day)
44 | if alarm_time < current_time:
45 | alarm_time += datetime.timedelta(days=1)
46 | delta_time = alarm_time - current_time
47 | seconds = delta_time.total_seconds()
48 | speak(f"Alarm set for {alarm_time.strftime('%I:%M %p')}.")
49 | return seconds
50 | except ValueError:
51 | speak("Sorry, I could not understand the time.")
52 | else:
53 | speak("Sorry, I could not understand the time format.")
54 |
55 | if __name__ == "__main__":
56 | speak("Hello! I am your alarm assistant.")
57 | while True:
58 | speak("What would you like me to do?")
59 | command = listen()
60 | if "set alarm" in command:
61 | time_diff = set_alarm()
62 | if time_diff:
63 | time.sleep(time_diff)
64 | speak("Wake up! It's time.")
65 | break
66 | elif "exit" in command:
67 | speak("Goodbye!")
68 | break
69 |
70 |
--------------------------------------------------------------------------------
/JarvisGUI/signUpImpl.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import csv
3 | from PyQt5.QtWidgets import QDialog, QApplication, QMessageBox
4 | from signUpGUI import Ui_SignUp
5 |
6 | class mainFileNew(QDialog):
7 | def __init__(self):
8 | super(mainFileNew, self).__init__()
9 | print("Setting up GUI")
10 | self.firstUI = Ui_SignUp()
11 | self.firstUI.setupUi(self)
12 |
13 | self.username = None # Initialize username attribute
14 |
15 | self.firstUI.exitBtn.clicked.connect(self.close)
16 | self.firstUI.SignupBtn.clicked.connect(self.saveUserData)
17 | self.firstUI.backBtn.clicked.connect(self.goToMainPage)
18 |
19 | def getUserNameEntry(self):
20 | return self.firstUI.userNameEntry
21 |
22 | def saveUserData(self):
23 | self.username = self.firstUI.userNameEntry.text() # Set the username attribute
24 | password = self.firstUI.passwordEntry.text()
25 | confirm_password = self.firstUI.ConfirmpasswordEntry.text()
26 |
27 | if password != confirm_password:
28 | self.showMessageBox("Warning", "Passwords do not match. Please try again.")
29 | return
30 |
31 | # Check if the username already exists
32 | if self.checkUsernameExists(self.username):
33 | self.showMessageBox("Warning", "Username already exists. Please choose a different username.")
34 | return
35 |
36 | # If username is not found, save the user data
37 | with open('user_data.csv', 'a', newline='') as file:
38 | writer = csv.writer(file)
39 | writer.writerow([self.username, password])
40 |
41 | self.showMessageBox("Success", "User registered successfully.")
42 |
43 | from newUserPyFaceRecogFile import newFaceRecog
44 | self.shownewFaceRecogWindow = newFaceRecog()
45 | self.close() # Close the current dialog
46 | self.shownewFaceRecogWindow.show()
47 |
48 | def checkUsernameExists(self, username):
49 | with open('user_data.csv', 'r', newline='') as file:
50 | reader = csv.reader(file)
51 | for row in reader:
52 | if row and row[0] == username:
53 | return True
54 | return False
55 |
56 | def goToMainPage(self):
57 | from main import mainFileNew
58 | self.showMainWindow = mainFileNew()
59 | self.close() # Close the current dialog
60 | self.showMainWindow.show()
61 |
62 | def showMessageBox(self, title, message):
63 | msg = QMessageBox()
64 | msg.setIcon(QMessageBox.Information)
65 | msg.setWindowTitle(title)
66 | msg.setStyleSheet("color: white; background-color: #333333; font-size: 12pt;")
67 | msg.setText(message)
68 | msg.exec_()
69 |
70 |
71 | if __name__ == "__main__":
72 | app = QApplication(sys.argv)
73 | ui = mainFileNew()
74 | ui.show()
75 | sys.exit(app.exec_())
76 |
--------------------------------------------------------------------------------
/JarvisGUI/FaceRecogGUI/faceRecogGUI.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Form implementation generated from reading ui file 'form.ui'
4 | #
5 | # Created by: PyQt5 UI code generator 5.15.10
6 | #
7 | # WARNING: Any manual changes made to this file will be lost when pyuic5 is
8 | # run again. Do not edit this file unless you know what you are doing.
9 |
10 |
11 | from PyQt5 import QtCore, QtGui, QtWidgets
12 |
13 |
14 | class Ui_Widget(object):
15 | def setupUi(self, Widget):
16 | Widget.setObjectName("Widget")
17 | Widget.resize(1080, 720)
18 | Widget.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
19 | Widget.setStyleSheet("background-color: rgb(0, 0, 0);")
20 | self.label = QtWidgets.QLabel(Widget)
21 | self.label.setGeometry(QtCore.QRect(290, 10, 531, 81))
22 | self.label.setText("")
23 | self.label.setPixmap(QtGui.QPixmap("../JarvisImages/logo.png"))
24 | self.label.setScaledContents(True)
25 | self.label.setObjectName("label")
26 | self.label_2 = QtWidgets.QLabel(Widget)
27 | self.label_2.setGeometry(QtCore.QRect(750, 100, 321, 511))
28 | self.label_2.setText("")
29 | self.label_2.setPixmap(QtGui.QPixmap("../JarvisImages/ironmanSidePose.jpg"))
30 | self.label_2.setScaledContents(True)
31 | self.label_2.setObjectName("label_2")
32 | self.label_3 = QtWidgets.QLabel(Widget)
33 | self.label_3.setGeometry(QtCore.QRect(59, 116, 631, 401))
34 | self.label_3.setStyleSheet("border-color: rgb(255, 255, 255);\n"
35 | "color: rgb(255, 255, 255);\n"
36 | "border-style: solid;\n"
37 | "border-width: 1px;")
38 | self.label_3.setText("")
39 | self.label_3.setObjectName("label_3")
40 | self.loginBtn = QtWidgets.QPushButton(Widget)
41 | self.loginBtn.setGeometry(QtCore.QRect(110, 570, 171, 61))
42 | self.loginBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
43 | self.loginBtn.setStyleSheet("border-image: url(:/JarvisImages/loginButton.png);")
44 | self.loginBtn.setText("")
45 | self.loginBtn.setObjectName("loginBtn")
46 | self.exitBtn = QtWidgets.QPushButton(Widget)
47 | self.exitBtn.setGeometry(QtCore.QRect(440, 570, 171, 61))
48 | self.exitBtn.setStyleSheet("border-image: url(:/JarvisImages/exitButton.png);")
49 | self.exitBtn.setText("")
50 | self.exitBtn.setObjectName("exitBtn")
51 |
52 | self.retranslateUi(Widget)
53 | QtCore.QMetaObject.connectSlotsByName(Widget)
54 |
55 | def retranslateUi(self, Widget):
56 | _translate = QtCore.QCoreApplication.translate
57 | Widget.setWindowTitle(_translate("Widget", "Widget"))
58 |
59 |
60 | if __name__ == "__main__":
61 | import sys
62 | app = QtWidgets.QApplication(sys.argv)
63 | Widget = QtWidgets.QWidget()
64 | ui = Ui_Widget()
65 | ui.setupUi(Widget)
66 | Widget.show()
67 | sys.exit(app.exec_())
68 |
--------------------------------------------------------------------------------
/web_Youtube.py:
--------------------------------------------------------------------------------
1 | from selenium import webdriver
2 | from selenium.webdriver.common.keys import Keys
3 | from selenium.webdriver.common.by import By
4 | import time
5 | import text2speech as t2s
6 | import speech2text as s2t
7 | import pyttsx3
8 | engine = pyttsx3.init()
9 |
10 | def open_google():
11 | try:
12 | print("Opening Google...")
13 | driver = webdriver.Chrome() # You need to have the ChromeDriver executable in your PATH or specify its path here
14 | driver.get("https://www.google.com/")
15 | return driver
16 | except Exception as e:
17 | print("Error occurred while playing the video:", e)
18 |
19 | def open_youtube(driver):
20 | try:
21 | print("Opening YouTube...")
22 | driver.get("https://www.youtube.com/")
23 | except Exception as e:
24 | print("Error occurred while playing the video:", e)
25 |
26 | def search_youtube(driver, query):
27 | try:
28 | search_box = driver.find_element("name", "search_query")
29 | search_box.clear() # Clear the search box
30 | search_box.send_keys(query)
31 | search_box.send_keys(Keys.RETURN)
32 | except Exception as e:
33 | print("Error occurred while playing the video:", e)
34 |
35 |
36 |
37 | # Main program
38 | def play_youtube_video(driver,query):
39 | try:
40 | search_box = driver.find_element("name", "search_query")
41 | search_box.clear() # Clear the search box
42 | search_box.send_keys(query)
43 | search_box.send_keys(Keys.RETURN)
44 | time.sleep(2)
45 | first_video = driver.find_element(By.ID,'video-title')
46 | first_video.click()
47 | print("Playing the first video...")
48 | except Exception as e:
49 | print("Error occurred while playing the video:", e)
50 |
51 |
52 | # Modify the main function to include playing a video
53 | def main(ip):
54 | web_driver = open_google()
55 | open_youtube(web_driver)
56 | while True:
57 | print("youtube control")
58 | user_input = (s2t.voice2text("en")).lower()
59 | print("youtube control"+user_input)
60 | if user_input:
61 | if "stop" in user_input or "close youtube" in user_input:
62 | print("Exiting program...")
63 | return
64 | break
65 |
66 | elif "search for" in user_input and web_driver:
67 | query_start_index = user_input.find("search for") + len("search for")
68 | search_query = user_input[query_start_index:].strip()
69 | print(f"Searching YouTube for: {search_query}")
70 | search_youtube(web_driver, search_query)
71 | elif "play" in user_input and web_driver:
72 | query_start_index = user_input.find("play") + len("play")
73 | search_query = user_input[query_start_index:].strip()
74 | print(f"Searching YouTube for: {search_query}")
75 | play_youtube_video(web_driver, search_query)
76 |
77 |
78 | else:
79 | return user_input
80 |
--------------------------------------------------------------------------------
/JarvisGUI/loginWindowMain.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import csv
3 | from PyQt5.QtWidgets import QWidget, QLineEdit, QApplication, QMessageBox
4 | from loginWindowGUI import Ui_Widget
5 | from PyQt5 import QtGui
6 |
7 | class loginWindow(QWidget):
8 | def __init__(self):
9 | super(loginWindow, self).__init__()
10 | print("Setting up GUI")
11 | self.loginUI = Ui_Widget()
12 | self.loginUI.setupUi(self)
13 |
14 | self.loginUI.backBtn.clicked.connect(self.goToInitialPage)
15 | self.loginUI.NewUserBtn.clicked.connect(self.goToSignUpPage) # Connect NewUserBtn to sign-up page
16 |
17 | self.loginUI.label.hide()
18 | self.loginUI.passwordEntry.setEchoMode(QLineEdit.Password)
19 | self.loginUI.loginBtn.clicked.connect(self.validateLogin)
20 |
21 | self.loginUI.retryBtn.clicked.connect(self.retryButton)
22 | self.loginUI.exitBtn.clicked.connect(self.close)
23 |
24 | self.loginUI.IllegalEntrymovie = QtGui.QMovie("D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/loginFailed.gif")
25 | self.loginUI.label.setScaledContents(True)
26 | self.loginUI.label.setMovie(self.loginUI.IllegalEntrymovie)
27 |
28 | def retryButton(self):
29 | self.loginUI.userNameEntry.clear()
30 | self.loginUI.passwordEntry.clear()
31 | self.stopMovie()
32 |
33 | def validateLogin(self):
34 | username = self.loginUI.userNameEntry.text()
35 | password = self.loginUI.passwordEntry.text()
36 | if self.checkCredentials(username, password):
37 | print("Login Success")
38 | self.goToMainPage()
39 | # Add code to go to main page
40 | else:
41 | self.playMovie()
42 |
43 | def checkCredentials(self, username, password):
44 | with open('user_data.csv', 'r', newline='') as file:
45 | reader = csv.reader(file)
46 | for row in reader:
47 | if row[0] == username and row[1] == password:
48 | return True
49 | return False
50 |
51 | def playMovie(self):
52 | self.loginUI.label.show()
53 | self.loginUI.IllegalEntrymovie.start()
54 |
55 | def stopMovie(self):
56 | self.loginUI.label.hide()
57 | self.loginUI.IllegalEntrymovie.stop()
58 |
59 | def goToMainPage(self):
60 | from jarvisMAIN import LoginWindow
61 | self.showMain = LoginWindow()
62 | self.close() # Close the current dialog
63 | self.showMain.show()
64 |
65 | def goToInitialPage(self):
66 | from main import mainFileNew
67 | self.showMainWindow = mainFileNew()
68 | self.close() # Close the current dialog
69 | self.showMainWindow.show()
70 |
71 | def goToSignUpPage(self):
72 | from signUpImpl import mainFileNew # Import the sign-up window class
73 | self.showSignUp = mainFileNew()
74 | self.close() # Close the current dialog
75 | self.showSignUp.show()
76 |
77 | if __name__ == "__main__":
78 | app = QApplication(sys.argv)
79 | ui = loginWindow()
80 | ui.show()
81 | sys.exit(app.exec_())
82 |
--------------------------------------------------------------------------------
/JarvisGUI/mainFileNew.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Form implementation generated from reading ui file 'form.ui'
4 | #
5 | # Created by: PyQt5 UI code generator 5.15.10
6 | #
7 | # WARNING: Any manual changes made to this file will be lost when pyuic5 is
8 | # run again. Do not edit this file unless you know what you are doing.
9 |
10 |
11 | from PyQt5 import QtCore, QtGui, QtWidgets
12 |
13 |
14 | class Ui_Dialog(object):
15 | def setupUi(self, Dialog):
16 | Dialog.setObjectName("Dialog")
17 | Dialog.resize(700, 550)
18 | Dialog.setStyleSheet("background-color: rgb(0, 0, 0);")
19 | self.logo = QtWidgets.QLabel(Dialog)
20 | self.logo.setGeometry(QtCore.QRect(40, 10, 640, 80))
21 | self.logo.setStyleSheet("background-color: rgb(0, 0, 0);")
22 | self.logo.setText("")
23 | self.logo.setPixmap(QtGui.QPixmap("D:\BSc CSD Sem 6\Project\JarvisGUI\JarvisImages\logo.png"))
24 | self.logo.setScaledContents(True)
25 | self.logo.setObjectName("logo")
26 | self.gif1 = QtWidgets.QLabel(Dialog)
27 | self.gif1.setGeometry(QtCore.QRect(-20, 110, 711, 341))
28 | self.gif1.setText("")
29 | self.gif1.setPixmap(QtGui.QPixmap("D:\BSc CSD Sem 6\Project\JarvisGUI\JarvisImages\samplegui3.gif"))
30 | self.gif1.setScaledContents(True)
31 | self.gif1.setObjectName("gif1")
32 | self.startBtn = QtWidgets.QPushButton(Dialog)
33 | self.startBtn.setGeometry(QtCore.QRect(30, 480, 141, 61))
34 | self.startBtn.setStyleSheet("border-image: url(D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/startButton.png);")
35 | self.startBtn.setText("")
36 | self.startBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
37 | self.startBtn.setObjectName("startBtn")
38 | self.loginBtn = QtWidgets.QPushButton(Dialog)
39 | self.loginBtn.setGeometry(QtCore.QRect(280, 480, 141, 61))
40 | self.loginBtn.setStyleSheet("border-image: url(D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/loginButton.png);")
41 | self.loginBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
42 | self.loginBtn.setText("")
43 | self.loginBtn.setObjectName("loginBtn")
44 | self.exitBtn = QtWidgets.QPushButton(Dialog)
45 | self.exitBtn.setGeometry(QtCore.QRect(520, 480, 141, 61))
46 | self.exitBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
47 | self.exitBtn.setStyleSheet("border-image: url(D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/exitButton.png);")
48 | self.exitBtn.setText("")
49 | self.exitBtn.setObjectName("exitBtn")
50 | self.retranslateUi(Dialog)
51 | QtCore.QMetaObject.connectSlotsByName(Dialog)
52 |
53 | def retranslateUi(self, Dialog):
54 | _translate = QtCore.QCoreApplication.translate
55 | Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
56 |
57 |
58 | if __name__ == "__main__":
59 | import sys
60 | app = QtWidgets.QApplication(sys.argv)
61 | Dialog = QtWidgets.QDialog()
62 | ui = Ui_Dialog()
63 | ui.setupUi(Dialog)
64 | Dialog.show()
65 | sys.exit(app.exec_())
66 |
--------------------------------------------------------------------------------
/web_Whatsapp.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import pywhatkit
3 | import time
4 | import pyautogui
5 | import text2speech as t2s
6 | import speech2text as s2t
7 | import pyttsx3
8 | import difflib
9 | engine = pyttsx3.init()
10 | def send_message(target_name, message):
11 | # Read the CSV file into a DataFrame
12 | csv_file = 'contacts.csv'
13 | try:
14 | df = pd.read_csv(csv_file)
15 | if 'Name' not in df.columns or 'Phone' not in df.columns:
16 | raise ValueError("CSV file does not contain 'Name' or 'Phone' columns.")
17 | except Exception as e:
18 | print("Error:", e)
19 | return
20 |
21 | # Initialize phone variable
22 | phone = ""
23 |
24 | # Find the closest match for the target name
25 | matches = difflib.get_close_matches(target_name, df['Name'], n=1, cutoff=0.5)
26 | if matches:
27 | matched_name = matches[0]
28 | # Get the phone number corresponding to the matched name
29 | phone = df[df['Name'] == matched_name]['Phone'].iloc[0]
30 |
31 | # Remove any spaces and format the phone number
32 | phone = ''.join(phone.split())
33 | phone = phone.replace(" ", "")
34 | phone = phone.replace(" ", "")
35 |
36 | # Send message
37 | pywhatkit.sendwhatmsg_instantly(phone, message)
38 | time.sleep(10)
39 | t2s.text2speech(engine, "Message sent successfully")
40 |
41 | pyautogui.press('enter')
42 | print("Message sent to {}: {}".format(matched_name, phone))
43 | else:
44 | t2s.text2speech(engine, "Contact " + phone + " not found")
45 | print("No match found for the given name.")
46 |
47 | def split_input(input_string):
48 | # Split the input string into parts
49 | parts = input_string.split()
50 |
51 | # Initialize variables to store message and contact
52 | message = ""
53 | contact = ""
54 |
55 | # Find the position of "send" and "to" in the input string
56 | send_index = parts.index("send")
57 | to_index = parts.index("to")
58 |
59 | # Extract message
60 | message_start_index = send_index + 1
61 | message_end_index = to_index
62 | message = " ".join(parts[message_start_index:message_end_index])
63 |
64 | # Extract contact
65 | contact_start_index = to_index + 1
66 | contact = " ".join(parts[contact_start_index:])
67 |
68 | return message, contact
69 |
70 | def main():
71 | t2s.text2speech(engine, "Whatsapp initiated")
72 | while True:
73 | user_input = (s2t.voice2text("en")).lower()
74 | print(user_input)
75 | if user_input:
76 | if "stop" in user_input or "close youtube" in user_input:
77 | print("Exiting program...")
78 | t2s.text2speech(engine, "thank you sir")
79 | return
80 | break
81 | elif "send" in user_input:
82 | t2s.text2speech(engine, "Message is sent shortly")
83 | msg,con=split_input(user_input)
84 | send_message(con, msg)
85 |
86 |
87 | else:
88 | return user_input
89 |
90 |
91 |
92 |
--------------------------------------------------------------------------------
/utubeVideoDownloader.py:
--------------------------------------------------------------------------------
1 |
2 | from pytube import YouTube
3 |
4 | def download_video(url, save_path):
5 | try:
6 | yt = YouTube(url)
7 | stream = yt.streams.get_highest_resolution()
8 | stream.download(save_path)
9 | print("Download completed successfully.")
10 | except Exception as e:
11 | print(f"An error occurred: {str(e)}")
12 |
13 | def main():
14 | url = input("Enter the URL of the video: ")
15 | save_path = input("Enter the path to save the video: ")
16 | download_video(url, save_path)
17 |
18 | if __name__ == "__main__":
19 | main()
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 | # import pyautogui
41 | # import time
42 | # import os
43 | #
44 | # def open_paint():
45 | # # Open Paint using Windows Run dialog (Windows + R)
46 | # os.system("start mspaint")
47 | # time.sleep(2) # Wait for Paint to open
48 | #
49 | # def draw_line():
50 | # # Move mouse to starting point of the line
51 | # pyautogui.moveTo(100, 100, duration=0.5)
52 | # pyautogui.mouseDown() # Press left mouse button
53 | # time.sleep(1) # Hold left mouse button for 1 second
54 | # # Move mouse to end point of the line
55 | # pyautogui.moveTo(300, 300, duration=1.5)
56 | # pyautogui.mouseUp() # Release left mouse button
57 | #
58 | # def undo():
59 | # # Press Ctrl + Z to undo
60 | # pyautogui.hotkey('ctrl', 'z')
61 | #
62 | # def pick_red_color():
63 | # # Move mouse to color palette and select red color
64 | # pyautogui.moveTo(50, 200, duration=0.5)
65 | # pyautogui.click()
66 | #
67 | # def draw_square():
68 | # # Move mouse to starting point of the square
69 | # pyautogui.moveTo(200, 200, duration=0.5)
70 | # pyautogui.mouseDown()
71 | # time.sleep(1)
72 | # # Draw square
73 | # pyautogui.moveRel(100, 0, duration=0.5) # Move right
74 | # pyautogui.moveRel(0, 100, duration=0.5) # Move down
75 | # pyautogui.moveRel(-100, 0, duration=0.5) # Move left
76 | # pyautogui.moveRel(0, -100, duration=0.5) # Move up
77 | # pyautogui.mouseUp()
78 | #
79 | # def draw_rectangular_spiral():
80 | # # Move mouse to starting point of the spiral
81 | # pyautogui.moveTo(400, 400, duration=0.5)
82 | # pyautogui.mouseDown()
83 | # side_length = 10
84 | # for _ in range(20):
85 | # pyautogui.moveRel(side_length, 0, duration=0.5) # Move right
86 | # pyautogui.moveRel(0, side_length, duration=0.5) # Move down
87 | # side_length += 10
88 | # pyautogui.moveRel(-side_length, 0, duration=0.5) # Move left
89 | # pyautogui.moveRel(0, -side_length, duration=0.5) # Move up
90 | # side_length += 10
91 | # pyautogui.mouseUp()
92 | #
93 | # def main():
94 | # open_paint()
95 | # time.sleep(2) # Wait for Paint to fully open
96 | # draw_line()
97 | # undo()
98 | # time.sleep(1)
99 | # pick_red_color()
100 | # draw_square()
101 | # draw_rectangular_spiral()
102 | #
103 | # if __name__ == "__main__":
104 | # main()
105 |
--------------------------------------------------------------------------------
/JarvisGUI/form.ui:
--------------------------------------------------------------------------------
1 |
2 |
3 | Dialog
4 |
5 |
6 |
7 | 0
8 | 0
9 | 1460
10 | 720
11 |
12 |
13 |
14 | Dialog
15 |
16 |
17 | background-color: rgb(0, 0, 0);
18 |
19 |
20 |
21 |
22 | 40
23 | 10
24 | 640
25 | 80
26 |
27 |
28 |
29 | background-color: rgb(0, 0, 0);
30 |
31 |
32 |
33 |
34 |
35 | JarvisImages/logo.png
36 |
37 |
38 | true
39 |
40 |
41 |
42 |
43 |
44 | -20
45 | 110
46 | 711
47 | 341
48 |
49 |
50 |
51 |
52 |
53 |
54 | JarvisImages/samplegui3.gif
55 |
56 |
57 | true
58 |
59 |
60 |
61 |
62 |
63 | 30
64 | 480
65 | 141
66 | 61
67 |
68 |
69 |
70 | border-image: url(:/JarvisImages/startButton.png);
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 | 280
80 | 480
81 | 141
82 | 61
83 |
84 |
85 |
86 | border-image: url(:/JarvisImages/loginButton.png);
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 | 520
96 | 480
97 | 141
98 | 61
99 |
100 |
101 |
102 | border-image: url(:/JarvisImages/exitButton.png);
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
--------------------------------------------------------------------------------
/JarvisGUI/FaceRecogForNewUser/newUserFaceRecGUI.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Form implementation generated from reading ui file 'form.ui'
4 | #
5 | # Created by: PyQt5 UI code generator 5.15.10
6 | #
7 | # WARNING: Any manual changes made to this file will be lost when pyuic5 is
8 | # run again. Do not edit this file unless you know what you are doing.
9 |
10 |
11 | from PyQt5 import QtCore, QtGui, QtWidgets
12 |
13 |
14 | class Ui_Widget(object):
15 | def setupUi(self, Widget):
16 | Widget.setObjectName("Widget")
17 | Widget.resize(1080, 720)
18 | Widget.setStyleSheet("background-color: rgb(0, 0, 0);")
19 | self.label_3 = QtWidgets.QLabel(Widget)
20 | self.label_3.setGeometry(QtCore.QRect(29, 116, 631, 401))
21 | self.label_3.setStyleSheet("border-color: rgb(255, 255, 255);\n"
22 | "color: rgb(255, 255, 255);\n"
23 | "border-style: solid;\n"
24 | "border-width: 1px;")
25 | self.label_3.setText("")
26 | self.label_3.setObjectName("label_3")
27 | self.backBtn = QtWidgets.QPushButton(Widget)
28 | self.backBtn.setGeometry(QtCore.QRect(260, 570, 171, 61))
29 | self.backBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
30 | self.backBtn.setStyleSheet("border-image: url(:/JarvisImages/backButton.png);")
31 | self.backBtn.setText("")
32 | self.backBtn.setObjectName("backBtn")
33 | self.label = QtWidgets.QLabel(Widget)
34 | self.label.setGeometry(QtCore.QRect(260, 10, 531, 81))
35 | self.label.setText("")
36 | self.label.setPixmap(QtGui.QPixmap("../JarvisImages/logo.png"))
37 | self.label.setScaledContents(True)
38 | self.label.setObjectName("label")
39 | self.label_2 = QtWidgets.QLabel(Widget)
40 | self.label_2.setGeometry(QtCore.QRect(720, 100, 321, 511))
41 | self.label_2.setText("")
42 | self.label_2.setPixmap(QtGui.QPixmap("../JarvisImages/ironmanSidePose.jpg"))
43 | self.label_2.setScaledContents(True)
44 | self.label_2.setObjectName("label_2")
45 | self.exitBtn = QtWidgets.QPushButton(Widget)
46 | self.exitBtn.setGeometry(QtCore.QRect(500, 570, 171, 61))
47 | self.exitBtn.setStyleSheet("border-image: url(:/JarvisImages/exitButton.png);")
48 | self.exitBtn.setText("")
49 | self.exitBtn.setObjectName("exitBtn")
50 | self.captureBtn = QtWidgets.QPushButton(Widget)
51 | self.captureBtn.setGeometry(QtCore.QRect(30, 570, 171, 61))
52 | self.captureBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
53 | self.captureBtn.setStyleSheet("border-image: url(:/JarvisImages/capture.png);")
54 | self.captureBtn.setText("")
55 | self.captureBtn.setObjectName("captureBtn")
56 |
57 | self.retranslateUi(Widget)
58 | QtCore.QMetaObject.connectSlotsByName(Widget)
59 |
60 | def retranslateUi(self, Widget):
61 | _translate = QtCore.QCoreApplication.translate
62 | Widget.setWindowTitle(_translate("Widget", "Widget"))
63 |
64 |
65 | if __name__ == "__main__":
66 | import sys
67 | app = QtWidgets.QApplication(sys.argv)
68 | Widget = QtWidgets.QWidget()
69 | ui = Ui_Widget()
70 | ui.setupUi(Widget)
71 | Widget.show()
72 | sys.exit(app.exec_())
73 |
--------------------------------------------------------------------------------
/JarvisGUI/FaceRecogGUI/form.ui:
--------------------------------------------------------------------------------
1 |
2 |
3 | Widget
4 |
5 |
6 |
7 | 0
8 | 0
9 | 1080
10 | 720
11 |
12 |
13 |
14 | PointingHandCursor
15 |
16 |
17 | Widget
18 |
19 |
20 | background-color: rgb(0, 0, 0);
21 |
22 |
23 |
24 |
25 | 290
26 | 10
27 | 531
28 | 81
29 |
30 |
31 |
32 |
33 |
34 |
35 | ../JarvisImages/logo.png
36 |
37 |
38 | true
39 |
40 |
41 |
42 |
43 |
44 | 750
45 | 100
46 | 321
47 | 511
48 |
49 |
50 |
51 |
52 |
53 |
54 | ../JarvisImages/ironmanSidePose.jpg
55 |
56 |
57 | true
58 |
59 |
60 |
61 |
62 |
63 | 59
64 | 116
65 | 631
66 | 401
67 |
68 |
69 |
70 | border-color: rgb(255, 255, 255);
71 | color: rgb(255, 255, 255);
72 | border-style: solid;
73 | border-width: 1px;
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 | 110
83 | 570
84 | 171
85 | 61
86 |
87 |
88 |
89 | PointingHandCursor
90 |
91 |
92 | border-image: url(:/JarvisImages/loginButton.png);
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 | 440
102 | 570
103 | 171
104 | 61
105 |
106 |
107 |
108 | border-image: url(:/JarvisImages/exitButton.png);
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
--------------------------------------------------------------------------------
/web_chrome.py:
--------------------------------------------------------------------------------
1 | from selenium import webdriver
2 | from selenium.webdriver.common.keys import Keys
3 | from selenium.webdriver.common.by import By
4 | import time
5 | import speech2text as s2t
6 | import text2speech as t2s
7 | import pyttsx3
8 | import pyautogui
9 |
10 | engine = pyttsx3.init()
11 | from selenium import webdriver
12 |
13 | # Function to initialize and return the Chrome driver
14 | def initialize_chrome_driver():
15 | return webdriver.Chrome()
16 |
17 | # Function to activate the Chrome window
18 | def activate_chrome_window():
19 | pyautogui.click(x=300, y=300) # Click somewhere on the screen to activate Chrome
20 |
21 | def web_search(query, driver):
22 | try:
23 | search_box = driver.find_element(By.NAME,"q")
24 | search_box.clear()
25 | search_box.send_keys(query)
26 | search_box.submit()
27 | except Exception as e:
28 | print("An error occurred during web search:", e)
29 |
30 | def navigate_to_website(url, driver):
31 | try:
32 | driver.get(url)
33 | except Exception as e:
34 | print("An error occurred during website navigation:", e)
35 |
36 | def close_current_tab():
37 | activate_chrome_window()
38 | pyautogui.hotkey('ctrl', 'w')
39 |
40 | def open_new_tab():
41 | activate_chrome_window()
42 | pyautogui.hotkey('ctrl', 't')
43 |
44 | # Function to move to the next tab
45 | def move_to_next_tab():
46 | activate_chrome_window()
47 | pyautogui.hotkey('ctrl', 'tab')
48 |
49 | def close_tab():
50 | activate_chrome_window()
51 | pyautogui.hotkey('ctrl', 'w')
52 |
53 | def close_previous_tab():
54 | activate_chrome_window()
55 | pyautogui.hotkey('ctrl', 'shift', 'tab')
56 | pyautogui.hotkey('ctrl', 'w')
57 |
58 | # Function to move to the previous tab for Windows
59 | def previous_tab():
60 | activate_chrome_window()
61 | pyautogui.hotkey('ctrl', 'shift', 'tab')
62 |
63 | # Function to close all tabs
64 | def close_all_tabs():
65 | activate_chrome_window()
66 | pyautogui.hotkey('ctrl', 'shift', 'w')
67 |
68 | def open_new_window():
69 | activate_chrome_window()
70 | pyautogui.hotkey('ctrl', 'n')
71 |
72 | # Driver program
73 | def main(ip):
74 | driver = initialize_chrome_driver()
75 | print("Chrome driver initialized")
76 | driver.get("https://www.google.com/")
77 | t2s.text2speech(engine, "opening chrome")
78 | while True:
79 | print("chrome control")
80 | command = (s2t.voice2text("en")).lower()
81 | print("chrome control"+command)
82 | if command == "exit" or "close all tabs" in command:
83 | driver.quit()
84 | print("Exiting program...")
85 | break
86 |
87 | elif "search" in command:
88 | query = command.replace("search", "")
89 | web_search(query, driver)
90 | elif command == "navigate_to_website":
91 | url = input("Enter website URL: ")
92 | navigate_to_website(url, driver)
93 | elif "new tab" in command:
94 | open_new_tab()
95 | elif "close tab" in command:
96 | close_tab()
97 | elif "previous tab" in command:
98 | previous_tab()
99 | elif "close previous tab" in command:
100 | close_previous_tab()
101 | elif "next tab" in command:
102 | move_to_next_tab()
103 | elif "new window" in command:
104 | open_new_window()
105 | else:
106 | return command
107 |
108 |
109 |
--------------------------------------------------------------------------------
/JarvisGUI/newUserFaceRecGUI.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Form implementation generated from reading ui file 'form.ui'
4 | #
5 | # Created by: PyQt5 UI code generator 5.15.10
6 | #
7 | # WARNING: Any manual changes made to this file will be lost when pyuic5 is
8 | # run again. Do not edit this file unless you know what you are doing.
9 |
10 |
11 | from PyQt5 import QtCore, QtGui, QtWidgets
12 |
13 |
14 | class Ui_Newface(object):
15 | def setupUi(self, Widget):
16 | Widget.setObjectName("Widget")
17 | Widget.resize(1080, 720)
18 | Widget.setStyleSheet("background-color: rgb(0, 0, 0);")
19 | self.videBack = QtWidgets.QLabel(Widget)
20 | self.videBack.setGeometry(QtCore.QRect(29, 116, 631, 401))
21 | self.videBack.setStyleSheet("border-color: rgb(255, 255, 255);\n"
22 | "color: rgb(255, 255, 255);\n"
23 | "border-style: solid;\n"
24 | "border-width: 1px;")
25 | self.videBack.setText("")
26 | self.videBack.setObjectName("label_3")
27 | self.loginBtn = QtWidgets.QPushButton(Widget)
28 | self.loginBtn.setGeometry(QtCore.QRect(260, 570, 171, 61))
29 | self.loginBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
30 | self.loginBtn.setStyleSheet("border-image: url(D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/loginButton.png);")
31 | self.loginBtn.setText("")
32 | self.loginBtn.setObjectName("loginBtn")
33 | self.label = QtWidgets.QLabel(Widget)
34 | self.label.setGeometry(QtCore.QRect(260, 10, 531, 81))
35 | self.label.setText("")
36 | self.label.setPixmap(QtGui.QPixmap("D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/logo.png"))
37 | self.label.setScaledContents(True)
38 | self.label.setObjectName("label")
39 | self.label_2 = QtWidgets.QLabel(Widget)
40 | self.label_2.setGeometry(QtCore.QRect(720, 100, 321, 511))
41 | self.label_2.setText("")
42 | self.label_2.setPixmap(QtGui.QPixmap("D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/ironmanSidePose.jpg"))
43 | self.label_2.setScaledContents(True)
44 | self.label_2.setObjectName("label_2")
45 | self.exitBtn = QtWidgets.QPushButton(Widget)
46 | self.exitBtn.setGeometry(QtCore.QRect(500, 570, 171, 61))
47 | self.exitBtn.setStyleSheet("border-image: url(D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/exitButton.png);")
48 | self.exitBtn.setText("")
49 | self.exitBtn.setObjectName("exitBtn")
50 | self.exitBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
51 | self.captureBtn = QtWidgets.QPushButton(Widget)
52 | self.captureBtn.setGeometry(QtCore.QRect(30, 570, 171, 61))
53 | self.captureBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
54 | self.captureBtn.setStyleSheet("border-image: url(D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/capture.png);")
55 | self.captureBtn.setText("")
56 | self.captureBtn.setObjectName("captureBtn")
57 |
58 | self.retranslateUi(Widget)
59 | QtCore.QMetaObject.connectSlotsByName(Widget)
60 |
61 | def retranslateUi(self, Widget):
62 | _translate = QtCore.QCoreApplication.translate
63 | Widget.setWindowTitle(_translate("Widget", "Widget"))
64 |
65 |
66 | if __name__ == "__main__":
67 | import sys
68 |
69 | app = QtWidgets.QApplication(sys.argv)
70 | Widget = QtWidgets.QWidget()
71 | ui = Ui_Newface()
72 | ui.setupUi(Widget)
73 | Widget.show()
74 | sys.exit(app.exec_())
75 |
--------------------------------------------------------------------------------
/web_edge.py:
--------------------------------------------------------------------------------
1 | from selenium import webdriver
2 | from selenium.webdriver.common.keys import Keys
3 | from selenium.webdriver.common.by import By
4 | import time
5 | import speech2text as s2t
6 | import text2speech as t2s
7 | import pyttsx3
8 | import pyautogui
9 | engine = pyttsx3.init()
10 |
11 | # Initialize global variables
12 | driver = None
13 | engine = pyttsx3.init()
14 |
15 | # Function to activate the Edge window
16 | def activate_edge_window():
17 | pyautogui.click(x=300, y=300) # Click somewhere on the screen to activate Edge
18 |
19 | def initialize_edge_driver():
20 | global driver
21 | driver = webdriver.Edge()
22 |
23 | def web_search(query):
24 | try:
25 | t2s.text2speech(engine, "Yes sir")
26 | search_box = driver.find_element(By.NAME,"q")
27 | search_box.clear()
28 | search_box.send_keys(query)
29 | search_box.submit()
30 | except Exception as e:
31 | print("An error occurred during web search:", e)
32 | t2s.text2speech(engine, "Sorry, I couldn't perform the search.")
33 |
34 | def navigate_to_website(url):
35 | try:
36 | driver.get(url)
37 | except Exception as e:
38 | print("An error occurred during website navigation:", e)
39 | t2s.text2speech(engine, "Sorry, I couldn't navigate to the website.")
40 |
41 | def close_current_tab():
42 | activate_edge_window()
43 | pyautogui.hotkey('ctrl', 'w')
44 |
45 | def open_new_tab():
46 | activate_edge_window()
47 | pyautogui.hotkey('ctrl', 't')
48 |
49 | # Function to move to the next tab
50 | def move_to_next_tab():
51 | activate_edge_window()
52 | pyautogui.hotkey('ctrl', 'tab')
53 |
54 | def close_tab():
55 | activate_edge_window()
56 | pyautogui.hotkey('ctrl', 'w')
57 |
58 | def close_previous_tab():
59 | activate_edge_window()
60 | pyautogui.hotkey('ctrl', 'shift', 'tab')
61 | pyautogui.hotkey('ctrl', 'w')
62 |
63 | # Function to move to the previous tab for Windows
64 | def previous_tab():
65 | activate_edge_window()
66 | pyautogui.hotkey('ctrl', 'shift', 'tab')
67 |
68 | # Function to close all tabs
69 | def close_all_tabs():
70 | activate_edge_window()
71 | pyautogui.hotkey('ctrl', 'shift', 'w')
72 |
73 | def open_new_window():
74 | activate_edge_window()
75 | pyautogui.hotkey('ctrl', 'n')
76 |
77 | # Driver program
78 | def main(ip):
79 | initialize_edge_driver()
80 | t2s.text2speech(engine, "opening edge")
81 | driver.get("https://www.google.com/")
82 | while True:
83 | print("edge control")
84 | command = (s2t.voice2text("en")).lower()
85 | print("edge control"+command)
86 | if command == "exit" or "close all tabs" in command:
87 | driver.quit()
88 | print("Exiting program...")
89 | break
90 |
91 | elif "search" in command:
92 | query = command.replace("search", "")
93 | web_search(query)
94 | elif command == "navigate_to_website":
95 | url = input("Enter website URL: ")
96 | navigate_to_website(url)
97 | elif "new tab" in command:
98 | open_new_tab()
99 | elif "close tab" in command:
100 | close_tab()
101 | elif "previous tab" in command:
102 | previous_tab()
103 | elif "close previous tab" in command:
104 | close_previous_tab()
105 | elif "next tab" in command:
106 | move_to_next_tab()
107 | elif "new window" in command:
108 | open_new_window()
109 | else:
110 | return command
111 |
--------------------------------------------------------------------------------
/JarvisGUI/faceRecogGUI.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Form implementation generated from reading ui file 'form.ui'
4 | #
5 | # Created by: PyQt5 UI code generator 5.15.10
6 | #
7 | # WARNING: Any manual changes made to this file will be lost when pyuic5 is
8 | # run again. Do not edit this file unless you know what you are doing.
9 |
10 |
11 | from PyQt5 import QtCore, QtGui, QtWidgets
12 |
13 |
14 | class Ui_Widget(object):
15 | def setupUi(self, Widget):
16 | Widget.setObjectName("Widget")
17 | Widget.resize(1080, 720)
18 | Widget.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
19 | Widget.setStyleSheet("background-color: rgb(0, 0, 0);")
20 | self.label = QtWidgets.QLabel(Widget)
21 | self.label.setGeometry(QtCore.QRect(275, 10, 531, 81)) # Adjusted alignment
22 | self.label.setText("")
23 | self.label.setPixmap(QtGui.QPixmap("D:\BSc CSD Sem 6\Project\JarvisGUI\JarvisImages\logo.png"))
24 | self.label.setScaledContents(True)
25 | self.label.setObjectName("label")
26 | self.label_2 = QtWidgets.QLabel(Widget)
27 | self.label_2.setGeometry(QtCore.QRect(750, 100, 321, 511))
28 | self.label_2.setText("")
29 | self.label_2.setPixmap(QtGui.QPixmap("D:\BSc CSD Sem 6\Project\JarvisGUI\JarvisImages\ironmanSidePose.jpg"))
30 | self.label_2.setScaledContents(True)
31 | self.label_2.setObjectName("label_2")
32 | self.videoBack = QtWidgets.QLabel(Widget)
33 | self.videoBack.setGeometry(QtCore.QRect(59, 116, 631, 401))
34 | self.videoBack.setStyleSheet("border-color: rgb(255, 255, 255);\n"
35 | "color: rgb(255, 255, 255);\n"
36 | "border-style: solid;\n"
37 | "border-width: 1px;")
38 | self.videoBack.setText("")
39 | self.videoBack.setObjectName("videoBack")
40 | self.loginBtn = QtWidgets.QPushButton(Widget)
41 | self.loginBtn.setGeometry(QtCore.QRect(110, 570, 171, 61))
42 | self.loginBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
43 | self.loginBtn.setStyleSheet("border-image: url(D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/loginButton.png);")
44 | self.loginBtn.setText("")
45 | self.loginBtn.setObjectName("loginBtn")
46 | self.newUserBtn = QtWidgets.QPushButton(Widget) # Add the New User button
47 | self.newUserBtn.setGeometry(QtCore.QRect(295, 570, 171, 61)) # Adjusted position
48 | self.newUserBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
49 | self.newUserBtn.setStyleSheet("border-image: url(D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/newUser.png);") # Set the style
50 | self.newUserBtn.setText("")
51 | self.newUserBtn.setObjectName("newUserBtn") # Set the object name
52 | self.exitBtn = QtWidgets.QPushButton(Widget)
53 | self.exitBtn.setGeometry(QtCore.QRect(480, 570, 171, 61)) # Adjusted position
54 | self.exitBtn.setStyleSheet("border-image: url(D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/exitButton.png);")
55 | self.exitBtn.setText("")
56 | self.exitBtn.setObjectName("exitBtn")
57 |
58 | self.retranslateUi(Widget)
59 | QtCore.QMetaObject.connectSlotsByName(Widget)
60 |
61 | def retranslateUi(self, Widget):
62 | _translate = QtCore.QCoreApplication.translate
63 | Widget.setWindowTitle(_translate("Widget", "Widget"))
64 |
65 |
66 | if __name__ == "__main__":
67 | import sys
68 |
69 | app = QtWidgets.QApplication(sys.argv)
70 | Widget = QtWidgets.QWidget()
71 | ui = Ui_Widget()
72 | ui.setupUi(Widget)
73 | Widget.show()
74 | sys.exit(app.exec_())
75 |
--------------------------------------------------------------------------------
/web_firefox.py:
--------------------------------------------------------------------------------
1 | from selenium import webdriver
2 | from selenium.webdriver.common.keys import Keys
3 | from selenium.webdriver.common.by import By
4 | import time
5 | import speech2text as s2t
6 | import text2speech as t2s
7 | import pyttsx3
8 | engine = pyttsx3.init()
9 | import pyautogui
10 |
11 | # Initialize global variables
12 | driver = None
13 | engine = pyttsx3.init()
14 |
15 | # Function to activate the Firefox window
16 | def activate_firefox_window():
17 | pyautogui.click(x=300, y=300) # Click somewhere on the screen to activate Firefox
18 |
19 | def initialize_firefox_driver():
20 | global driver
21 | driver = webdriver.Firefox()
22 |
23 | def web_search(query):
24 | try:
25 | t2s.text2speech(engine, "Yes sir")
26 | search_box = driver.find_element(By.NAME,"q")
27 | search_box.clear()
28 | search_box.send_keys(query)
29 | search_box.submit()
30 | except Exception as e:
31 | print("An error occurred during web search:", e)
32 | t2s.text2speech(engine, "Sorry, I couldn't perform the search.")
33 |
34 | def navigate_to_website(url):
35 | try:
36 | driver.get(url)
37 | except Exception as e:
38 | print("An error occurred during website navigation:", e)
39 | t2s.text2speech(engine, "Sorry, I couldn't navigate to the website.")
40 |
41 | def close_current_tab():
42 | activate_firefox_window()
43 | pyautogui.hotkey('ctrl', 'w')
44 |
45 | def open_new_tab():
46 | activate_firefox_window()
47 | pyautogui.hotkey('ctrl', 't')
48 |
49 | # Function to move to the next tab
50 | def move_to_next_tab():
51 | activate_firefox_window()
52 | pyautogui.hotkey('ctrl', 'tab')
53 |
54 | def close_tab():
55 | activate_firefox_window()
56 | pyautogui.hotkey('ctrl', 'w')
57 |
58 | def close_previous_tab():
59 | activate_firefox_window()
60 | pyautogui.hotkey('ctrl', 'shift', 'tab')
61 | pyautogui.hotkey('ctrl', 'w')
62 |
63 | # Function to move to the previous tab for Windows
64 | def previous_tab():
65 | activate_firefox_window()
66 | pyautogui.hotkey('ctrl', 'shift', 'tab')
67 |
68 | # Function to close all tabs
69 | def close_all_tabs():
70 | activate_firefox_window()
71 | pyautogui.hotkey('ctrl', 'shift', 'w')
72 |
73 | def open_new_window():
74 | activate_firefox_window()
75 | pyautogui.hotkey('ctrl', 'n')
76 |
77 | # Driver program
78 | def main(ip):
79 | initialize_firefox_driver()
80 | t2s.text2speech(engine, "opening firefox")
81 | driver.get("https://www.google.com/")
82 | while True:
83 | print("firefox control")
84 | command = (s2t.voice2text("en")).lower()
85 | print("firefox control"+command)
86 | if command == "exit" or "close all tabs" in command:
87 | driver.quit()
88 | print("Exiting program...")
89 | break
90 |
91 | elif "search" in command:
92 | query = command.replace("search", "")
93 | web_search(query)
94 | elif command == "navigate_to_website":
95 | url = input("Enter website URL: ")
96 | navigate_to_website(url)
97 | elif "new tab" in command:
98 | open_new_tab()
99 | elif "close tab" in command:
100 | close_tab()
101 | elif "previous tab" in command:
102 | previous_tab()
103 | elif "close previous tab" in command:
104 | close_previous_tab()
105 | elif "next tab" in command:
106 | move_to_next_tab()
107 | elif "new window" in command:
108 | open_new_window()
109 | else:
110 | return command
111 |
112 |
--------------------------------------------------------------------------------
/JarvisGUI/FaceRecogForNewUser/form.ui:
--------------------------------------------------------------------------------
1 |
2 |
3 | Widget
4 |
5 |
6 |
7 | 0
8 | 0
9 | 1080
10 | 720
11 |
12 |
13 |
14 | Widget
15 |
16 |
17 | background-color: rgb(0, 0, 0);
18 |
19 |
20 |
21 |
22 | 29
23 | 116
24 | 631
25 | 401
26 |
27 |
28 |
29 | border-color: rgb(255, 255, 255);
30 | color: rgb(255, 255, 255);
31 | border-style: solid;
32 | border-width: 1px;
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 | 260
42 | 570
43 | 171
44 | 61
45 |
46 |
47 |
48 | PointingHandCursor
49 |
50 |
51 | border-image: url(:/JarvisImages/backButton.png);
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 | 260
61 | 10
62 | 531
63 | 81
64 |
65 |
66 |
67 |
68 |
69 |
70 | ../JarvisImages/logo.png
71 |
72 |
73 | true
74 |
75 |
76 |
77 |
78 |
79 | 720
80 | 100
81 | 321
82 | 511
83 |
84 |
85 |
86 |
87 |
88 |
89 | ../JarvisImages/ironmanSidePose.jpg
90 |
91 |
92 | true
93 |
94 |
95 |
96 |
97 |
98 | 500
99 | 570
100 | 171
101 | 61
102 |
103 |
104 |
105 | border-image: url(:/JarvisImages/exitButton.png);
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 | 30
115 | 570
116 | 171
117 | 61
118 |
119 |
120 |
121 | PointingHandCursor
122 |
123 |
124 | border-image: url(:/JarvisImages/capture.png);
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
--------------------------------------------------------------------------------
/FaceRecognition.py:
--------------------------------------------------------------------------------
1 | import tkinter as tk
2 | from tkinter import messagebox
3 | import cv2
4 | import os
5 |
6 | class FaceRecognitionApp:
7 | def __init__(self, master):
8 | self.master = master
9 | self.master.title("Face Recognition App")
10 |
11 | self.new_user_button = tk.Button(master, text="New User", command=self.capture_image)
12 | self.new_user_button.pack()
13 |
14 | self.login_button = tk.Button(master, text="Login", command=self.login)
15 | self.login_button.pack()
16 |
17 | def capture_image(self):
18 | face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
19 |
20 | cam = cv2.VideoCapture(0)
21 | while True:
22 | ret, frame = cam.read()
23 | if not ret:
24 | break
25 |
26 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
27 | faces = face_cascade.detectMultiScale(gray, 1.3, 5)
28 |
29 | for (x, y, w, h) in faces:
30 | cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
31 | roi_gray = gray[y:y + h, x:x + w]
32 | roi_color = frame[y:y + h, x:x + w]
33 |
34 | cv2.imwrite('captured_image.jpg', roi_color)
35 |
36 | cv2.imshow('Capture Image', frame)
37 | key = cv2.waitKey(1)
38 | if key == ord('q'):
39 | break
40 | elif key == ord('c'):
41 | if len(faces) == 1:
42 | messagebox.showinfo("Success", "Image Captured Successfully!")
43 | cam.release()
44 | cv2.destroyAllWindows()
45 | return
46 | else:
47 | messagebox.showerror("Error", "Please make sure your face is clearly visible in the camera!")
48 | cam.release()
49 | cv2.destroyAllWindows()
50 | return
51 |
52 |
53 | def histogram_distance(self, image1, image2):
54 | hist1 = cv2.calcHist([image1], [0], None, [256], [0, 256])
55 | hist2 = cv2.calcHist([image2], [0], None, [256], [0, 256])
56 | return cv2.compareHist(hist1, hist2, cv2.HISTCMP_CORREL)
57 |
58 | def login(self):
59 | if not os.path.exists('captured_image.jpg'):
60 | messagebox.showerror("Error", "No image available. Please capture an image first!")
61 | return
62 |
63 | known_image = cv2.imread("captured_image.jpg", cv2.IMREAD_GRAYSCALE)
64 |
65 | cam = cv2.VideoCapture(0)
66 | while True:
67 | ret, frame = cam.read()
68 | if not ret:
69 | break
70 |
71 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
72 | face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
73 | faces = face_cascade.detectMultiScale(gray, 1.3, 5)
74 |
75 | if len(faces) == 1:
76 | (x, y, w, h) = faces[0]
77 | roi_gray = gray[y:y + h, x:x + w]
78 | roi_gray_resized = cv2.resize(roi_gray, (known_image.shape[1], known_image.shape[0]))
79 |
80 | similarity = self.histogram_distance(known_image, roi_gray_resized)
81 | if similarity > 0.8:
82 | print("Similarity:", similarity)
83 | print("Login Successful!")
84 | cam.release()
85 | cv2.destroyAllWindows()
86 | return
87 | else:
88 | print("Similarity:", similarity)
89 |
90 |
91 | cv2.imshow('Login', frame)
92 | if cv2.waitKey(1) & 0xFF == ord('q'):
93 | break
94 |
95 | messagebox.showerror("Error", "Failed to recognize face!")
96 | cam.release()
97 | cv2.destroyAllWindows()
98 |
99 | def main():
100 | root = tk.Tk()
101 | app = FaceRecognitionApp(root)
102 | root.mainloop()
103 |
104 | if __name__ == "__main__":
105 | main()
106 |
107 |
108 |
--------------------------------------------------------------------------------
/JarvisGUI/NewSignUP/signUpGUI.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Form implementation generated from reading ui file 'form.ui'
4 | #
5 | # Created by: PyQt5 UI code generator 5.15.10
6 | #
7 | # WARNING: Any manual changes made to this file will be lost when pyuic5 is
8 | # run again. Do not edit this file unless you know what you are doing.
9 |
10 |
11 | from PyQt5 import QtCore, QtGui, QtWidgets
12 |
13 |
14 | class Ui_Widget(object):
15 | def setupUi(self, Widget):
16 | Widget.setObjectName("Widget")
17 | Widget.resize(1080, 720)
18 | Widget.setStyleSheet("background-color: rgb(0, 0, 0);")
19 | self.frame = QtWidgets.QFrame(Widget)
20 | self.frame.setGeometry(QtCore.QRect(210, 130, 681, 551))
21 | self.frame.setStyleSheet("color: rgb(255, 255, 255);\n"
22 | "border-color: rgb(255, 255, 255);\n"
23 | "border-radius: 30px;\n"
24 | "border-width: 5px 5px 5px 5px;\n"
25 | "border-style: solid;")
26 | self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
27 | self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
28 | self.frame.setObjectName("frame")
29 | self.exitBtn = QtWidgets.QPushButton(self.frame)
30 | self.exitBtn.setGeometry(QtCore.QRect(490, 420, 141, 61))
31 | self.exitBtn.setStyleSheet("border-image: url(:/JarvisImages/exitButton.png);")
32 | self.exitBtn.setText("")
33 | self.exitBtn.setObjectName("exitBtn")
34 | self.backBtn = QtWidgets.QPushButton(self.frame)
35 | self.backBtn.setGeometry(QtCore.QRect(310, 420, 141, 61))
36 | self.backBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
37 | self.backBtn.setStyleSheet("border-image: url(:/JarvisImages/backButton.png);")
38 | self.backBtn.setText("")
39 | self.backBtn.setObjectName("backBtn")
40 | self.userNameEntry = QtWidgets.QLineEdit(self.frame)
41 | self.userNameEntry.setGeometry(QtCore.QRect(110, 40, 481, 81))
42 | self.userNameEntry.setStyleSheet("font: 15pt \"Segoe UI\";\n"
43 | "padding-left: 10px;")
44 | self.userNameEntry.setObjectName("userNameEntry")
45 | self.passwordEntry = QtWidgets.QLineEdit(self.frame)
46 | self.passwordEntry.setGeometry(QtCore.QRect(110, 150, 481, 81))
47 | self.passwordEntry.setStyleSheet("font: 15pt \"Segoe UI\";\n"
48 | "padding-left: 10px;\n"
49 | "background-color: rgb(0, 0, 0);")
50 | self.passwordEntry.setObjectName("passwordEntry")
51 | self.SignupBtn = QtWidgets.QPushButton(self.frame)
52 | self.SignupBtn.setGeometry(QtCore.QRect(100, 420, 171, 61))
53 | self.SignupBtn.setStyleSheet("border-image: url(:/JarvisImages/signUp.png);")
54 | self.SignupBtn.setText("")
55 | self.SignupBtn.setObjectName("SignupBtn")
56 | self.ConfirmpasswordEntry = QtWidgets.QLineEdit(self.frame)
57 | self.ConfirmpasswordEntry.setGeometry(QtCore.QRect(110, 270, 481, 81))
58 | self.ConfirmpasswordEntry.setStyleSheet("font: 15pt \"Segoe UI\";\n"
59 | "padding-left: 10px;\n"
60 | "background-color: rgb(0, 0, 0);")
61 | self.ConfirmpasswordEntry.setObjectName("ConfirmpasswordEntry")
62 | self.logo = QtWidgets.QLabel(Widget)
63 | self.logo.setGeometry(QtCore.QRect(90, -90, 561, 81))
64 | self.logo.setText("")
65 | self.logo.setPixmap(QtGui.QPixmap("../JarvisImages/logo.png"))
66 | self.logo.setScaledContents(True)
67 | self.logo.setObjectName("logo")
68 | self.logo_2 = QtWidgets.QLabel(Widget)
69 | self.logo_2.setGeometry(QtCore.QRect(280, 40, 561, 81))
70 | self.logo_2.setText("")
71 | self.logo_2.setPixmap(QtGui.QPixmap("../JarvisImages/logo.png"))
72 | self.logo_2.setScaledContents(True)
73 | self.logo_2.setObjectName("logo_2")
74 |
75 | self.retranslateUi(Widget)
76 | QtCore.QMetaObject.connectSlotsByName(Widget)
77 |
78 | def retranslateUi(self, Widget):
79 | _translate = QtCore.QCoreApplication.translate
80 | Widget.setWindowTitle(_translate("Widget", "Widget"))
81 | self.userNameEntry.setPlaceholderText(_translate("Widget", "USERNAME"))
82 | self.passwordEntry.setPlaceholderText(_translate("Widget", "PASSWORD"))
83 | self.ConfirmpasswordEntry.setPlaceholderText(_translate("Widget", "CONFIRM PASSWORD"))
84 |
85 |
86 | if __name__ == "__main__":
87 | import sys
88 | app = QtWidgets.QApplication(sys.argv)
89 | Widget = QtWidgets.QWidget()
90 | ui = Ui_Widget()
91 | ui.setupUi(Widget)
92 | Widget.show()
93 | sys.exit(app.exec_())
94 |
--------------------------------------------------------------------------------
/JarvisGUI/LoginUI/loginWindowGUI.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Form implementation generated from reading ui file 'form.ui'
4 | #
5 | # Created by: PyQt5 UI code generator 5.15.10
6 | #
7 | # WARNING: Any manual changes made to this file will be lost when pyuic5 is
8 | # run again. Do not edit this file unless you know what you are doing.
9 |
10 |
11 | from PyQt5 import QtCore, QtGui, QtWidgets
12 |
13 |
14 | class Ui_Widget(object):
15 | def setupUi(self, Widget):
16 | Widget.setObjectName("Widget")
17 | Widget.resize(1080, 720)
18 | Widget.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
19 | Widget.setStyleSheet("background-color: rgb(0, 0, 0);")
20 | self.logo = QtWidgets.QLabel(Widget)
21 | self.logo.setGeometry(QtCore.QRect(260, 0, 561, 81))
22 | self.logo.setText("")
23 | self.logo.setPixmap(QtGui.QPixmap("../JarvisImages/logo.png"))
24 | self.logo.setScaledContents(True)
25 | self.logo.setObjectName("logo")
26 | self.frame = QtWidgets.QFrame(Widget)
27 | self.frame.setGeometry(QtCore.QRect(210, 90, 681, 601))
28 | self.frame.setStyleSheet("color: rgb(255, 255, 255);\n"
29 | "border-color: rgb(255, 255, 255);\n"
30 | "border-radius: 30px;\n"
31 | "border-width: 5px 5px 5px 5px;\n"
32 | "border-style: solid;")
33 | self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
34 | self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
35 | self.frame.setObjectName("frame")
36 | self.retryBtn = QtWidgets.QPushButton(self.frame)
37 | self.retryBtn.setGeometry(QtCore.QRect(60, 490, 141, 61))
38 | self.retryBtn.setStyleSheet("border-image: url(:/JarvisImages/retryButton.png);")
39 | self.retryBtn.setText("")
40 | self.retryBtn.setObjectName("retryBtn")
41 | self.backBtn = QtWidgets.QPushButton(self.frame)
42 | self.backBtn.setGeometry(QtCore.QRect(270, 490, 141, 61))
43 | self.backBtn.setStyleSheet("border-image: url(:/JarvisImages/backButton.png);")
44 | self.backBtn.setText("")
45 | self.backBtn.setObjectName("backBtn")
46 | self.exitBtn = QtWidgets.QPushButton(self.frame)
47 | self.exitBtn.setGeometry(QtCore.QRect(490, 490, 141, 61))
48 | self.exitBtn.setStyleSheet("border-image: url(:/JarvisImages/exitButton.png);")
49 | self.exitBtn.setText("")
50 | self.exitBtn.setObjectName("exitBtn")
51 | self.userNameEntry = QtWidgets.QLineEdit(self.frame)
52 | self.userNameEntry.setGeometry(QtCore.QRect(112, 84, 481, 81))
53 | self.userNameEntry.setStyleSheet("font: 15pt \"Segoe UI\";\n"
54 | "padding-left: 10px;")
55 | self.userNameEntry.setObjectName("userNameEntry")
56 | self.passwordEntry = QtWidgets.QLineEdit(self.frame)
57 | self.passwordEntry.setGeometry(QtCore.QRect(110, 220, 481, 81))
58 | self.passwordEntry.setStyleSheet("font: 15pt \"Segoe UI\";\n"
59 | "padding-left: 10px;")
60 | self.passwordEntry.setObjectName("passwordEntry")
61 | self.loginBtn = QtWidgets.QPushButton(self.frame)
62 | self.loginBtn.setGeometry(QtCore.QRect(140, 390, 141, 61))
63 | self.loginBtn.setStyleSheet("border-image: url(:/JarvisImages/retryButton.png);\n"
64 | "border-image: url(:/JarvisImages/loginButton.png);")
65 | self.loginBtn.setText("")
66 | self.loginBtn.setObjectName("loginBtn")
67 | self.label = QtWidgets.QLabel(self.frame)
68 | self.label.setEnabled(False)
69 | self.label.setGeometry(QtCore.QRect(110, 50, 511, 301))
70 | self.label.setStyleSheet("border: none;")
71 | self.label.setText("")
72 | self.label.setPixmap(QtGui.QPixmap("../JarvisImages/loginFailed.gif"))
73 | self.label.setScaledContents(False)
74 | self.label.setObjectName("label")
75 | self.NewUserBtn = QtWidgets.QPushButton(self.frame)
76 | self.NewUserBtn.setGeometry(QtCore.QRect(390, 390, 171, 61))
77 | self.NewUserBtn.setStyleSheet("border-image: url(:/JarvisImages/newUser.png);")
78 | self.NewUserBtn.setText("")
79 | self.NewUserBtn.setObjectName("NewUserBtn")
80 |
81 | self.retranslateUi(Widget)
82 | QtCore.QMetaObject.connectSlotsByName(Widget)
83 |
84 | def retranslateUi(self, Widget):
85 | _translate = QtCore.QCoreApplication.translate
86 | Widget.setWindowTitle(_translate("Widget", "Widget"))
87 | self.userNameEntry.setPlaceholderText(_translate("Widget", "USERNAME"))
88 | self.passwordEntry.setPlaceholderText(_translate("Widget", "PASSWORD"))
89 |
90 |
91 | if __name__ == "__main__":
92 | import sys
93 | app = QtWidgets.QApplication(sys.argv)
94 | Widget = QtWidgets.QWidget()
95 | ui = Ui_Widget()
96 | ui.setupUi(Widget)
97 | Widget.show()
98 | sys.exit(app.exec_())
99 |
--------------------------------------------------------------------------------
/JarvisGUI/newUserPyFaceRecogFile.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import cv2
4 | import face_recognition
5 | from PyQt5.QtWidgets import QWidget, QApplication, QMessageBox
6 | from PyQt5.QtGui import QImage, QPixmap
7 | from PyQt5.QtCore import pyqtSlot, QTimer
8 | from newUserFaceRecGUI import Ui_Newface
9 | from signUpImpl import mainFileNew # Import the SignUpDialog class
10 |
11 | class newFaceRecog(QWidget):
12 | def __init__(self):
13 | super(newFaceRecog, self).__init__()
14 | self.faceNewUI = Ui_Newface()
15 | self.faceNewUI.setupUi(self)
16 | self.faceNewUI.exitBtn.clicked.connect(self.close)
17 | self.faceNewUI.loginBtn.clicked.connect(self.connectToLoginPage)
18 | self.faceNewUI.captureBtn.clicked.connect(self.captureImage)
19 |
20 | # Create an instance of the SignUpDialog class
21 | self.signup_dialog = mainFileNew()
22 |
23 | self.startVideo()
24 |
25 | @pyqtSlot()
26 | def startVideo(self):
27 | try:
28 | self.capture = cv2.VideoCapture(0)
29 | if not self.capture.isOpened():
30 | print("Failed to open webcam")
31 | return
32 | self.timer = QTimer(self)
33 | self.timer.timeout.connect(self.updateFrames)
34 | self.timer.start(10)
35 | except Exception as e:
36 | print("Error occurred during video capture:", e)
37 |
38 | def updateFrames(self):
39 | try:
40 | ret, self.image = self.capture.read()
41 | if ret:
42 | self.displayImage(self.image)
43 | else:
44 | print("Failed to capture frame")
45 | except Exception as e:
46 | print("Error occurred during frame update:", e)
47 |
48 | def displayImage(self, image):
49 | try:
50 | image = cv2.resize(image, (681, 491))
51 | image, _ = self.faceRec(image)
52 | qformat = QImage.Format_RGB888
53 | outImage = QImage(image, image.shape[1], image.shape[0], image.strides[0], qformat)
54 | outImage = outImage.rgbSwapped()
55 | self.faceNewUI.videBack.setPixmap(QPixmap.fromImage(outImage))
56 | self.faceNewUI.videBack.setScaledContents(True)
57 | except Exception as e:
58 | print("Error occurred during image display:", e)
59 |
60 | def faceRec(self, image):
61 | try:
62 | face_locations = face_recognition.face_locations(image)
63 | for (top, right, bottom, left) in face_locations:
64 | cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2)
65 | return image, face_locations
66 | except Exception as e:
67 | print("Error occurred during face recognition:", e)
68 |
69 | def captureImage(self):
70 | try:
71 | # Open the CSV file in read mode and read the last line to get the username
72 | with open('user_data.csv', 'r') as file:
73 | lines = file.readlines()
74 | last_line = lines[-1].strip() # Get the last line
75 | last_username = last_line.split(',')[0] # Extract the username
76 |
77 | # Save the captured image with the last username as filename
78 | image_path = os.path.join('D:\BSc CSD Sem 6\Project\JarvisGUI\FaceRecogGUI\\faceImages', f'{last_username}.jpg')
79 | cv2.imwrite(image_path, self.image)
80 | print("Image captured and saved successfully!")
81 |
82 | # Display a message box indicating successful capture and save
83 | self.showMessageBox("Success", "Image captured and saved successfully!")
84 |
85 | except Exception as e:
86 | print("Error occurred while capturing and saving image:", e)
87 |
88 | def connectToLoginPage(self):
89 | from loginWindowMain import loginWindow
90 | self.showLoginWindow = loginWindow()
91 | self.timer.stop() # Stop the timer
92 | self.capture.release() # Release the capture object
93 | self.close() # Close the current dialog
94 | self.showLoginWindow.show()
95 |
96 | def showMessageBox(self, title, message):
97 | msg = QMessageBox()
98 | msg.setIcon(QMessageBox.Information)
99 | msg.setWindowTitle(title)
100 | msg.setStyleSheet("color: white; background-color: #333333; font-size: 12pt;")
101 | msg.setText(message)
102 | msg.exec_()
103 |
104 | if __name__ == "__main__":
105 | app = QApplication(sys.argv)
106 | try:
107 | ui = newFaceRecog()
108 | ui.show()
109 | sys.exit(app.exec_())
110 | except Exception as e:
111 | print("Error occurred:", e)
112 |
--------------------------------------------------------------------------------
/JarvisGUI/signUpGUI.py:
--------------------------------------------------------------------------------
1 | from PyQt5 import QtCore, QtGui, QtWidgets
2 |
3 |
4 | class Ui_SignUp(object):
5 | def setupUi(self, Widget):
6 | Widget.setObjectName("Widget")
7 | Widget.resize(1080, 720)
8 | Widget.setStyleSheet("background-color: rgb(0, 0, 0);")
9 | self.frame = QtWidgets.QFrame(Widget)
10 | self.frame.setGeometry(QtCore.QRect(210, 130, 681, 551))
11 | self.frame.setStyleSheet("color: rgb(255, 255, 255);\n"
12 | "border-color: rgb(255, 255, 255);\n"
13 | "border-radius: 30px;\n"
14 | "border-width: 5px 5px 5px 5px;\n"
15 | "border-style: solid;")
16 | self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
17 | self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
18 | self.frame.setObjectName("frame")
19 | self.exitBtn = QtWidgets.QPushButton(self.frame)
20 | self.exitBtn.setGeometry(QtCore.QRect(490, 420, 141, 61))
21 | self.exitBtn.setStyleSheet("border-image: url(D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/exitButton.png);")
22 | self.exitBtn.setText("")
23 | self.exitBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
24 | self.exitBtn.setObjectName("exitBtn")
25 | self.backBtn = QtWidgets.QPushButton(self.frame)
26 | self.backBtn.setGeometry(QtCore.QRect(310, 420, 141, 61))
27 | self.backBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
28 | self.backBtn.setStyleSheet("border-image: url(D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/backButton.png);")
29 | self.backBtn.setText("")
30 | self.backBtn.setObjectName("backBtn")
31 | self.userNameEntry = QtWidgets.QLineEdit(self.frame)
32 | self.userNameEntry.setGeometry(QtCore.QRect(110, 40, 481, 81))
33 | self.userNameEntry.setStyleSheet("font: 15pt \"Segoe UI\";\n"
34 | "padding-left: 10px;")
35 | self.userNameEntry.setObjectName("userNameEntry")
36 | self.passwordEntry = QtWidgets.QLineEdit(self.frame)
37 | self.passwordEntry.setGeometry(QtCore.QRect(110, 150, 481, 81))
38 | self.passwordEntry.setStyleSheet("font: 15pt \"Segoe UI\";\n"
39 | "padding-left: 10px;\n"
40 | "background-color: rgb(0, 0, 0);")
41 | self.passwordEntry.setObjectName("passwordEntry")
42 | self.passwordEntry.setEchoMode(QtWidgets.QLineEdit.Password) # Set echo mode to Password
43 | self.SignupBtn = QtWidgets.QPushButton(self.frame)
44 | self.SignupBtn.setGeometry(QtCore.QRect(100, 420, 171, 61))
45 | self.SignupBtn.setStyleSheet("border-image: url(D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/signUp.png);")
46 | self.SignupBtn.setText("")
47 | self.SignupBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
48 | self.SignupBtn.setObjectName("SignupBtn")
49 | self.ConfirmpasswordEntry = QtWidgets.QLineEdit(self.frame)
50 | self.ConfirmpasswordEntry.setGeometry(QtCore.QRect(110, 270, 481, 81))
51 | self.ConfirmpasswordEntry.setStyleSheet("font: 15pt \"Segoe UI\";\n"
52 | "padding-left: 10px;\n"
53 | "background-color: rgb(0, 0, 0);")
54 | self.ConfirmpasswordEntry.setObjectName("ConfirmpasswordEntry")
55 | self.ConfirmpasswordEntry.setEchoMode(QtWidgets.QLineEdit.Password) # Set echo mode to Password
56 | self.logo = QtWidgets.QLabel(Widget)
57 | self.logo.setGeometry(QtCore.QRect(90, -90, 561, 81))
58 | self.logo.setText("")
59 | self.logo.setPixmap(QtGui.QPixmap("D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/logo.png"))
60 | self.logo.setScaledContents(True)
61 | self.logo.setObjectName("logo")
62 | self.logo_2 = QtWidgets.QLabel(Widget)
63 | self.logo_2.setGeometry(QtCore.QRect(280, 40, 561, 81))
64 | self.logo_2.setText("")
65 | self.logo_2.setPixmap(QtGui.QPixmap("D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/logo.png"))
66 | self.logo_2.setScaledContents(True)
67 | self.logo_2.setObjectName("logo_2")
68 |
69 | self.retranslateUi(Widget)
70 | QtCore.QMetaObject.connectSlotsByName(Widget)
71 |
72 | def retranslateUi(self, Widget):
73 | _translate = QtCore.QCoreApplication.translate
74 | Widget.setWindowTitle(_translate("Widget", "Widget"))
75 | self.userNameEntry.setPlaceholderText(_translate("Widget", "USERNAME"))
76 | self.passwordEntry.setPlaceholderText(_translate("Widget", "PASSWORD"))
77 | self.ConfirmpasswordEntry.setPlaceholderText(_translate("Widget", "CONFIRM PASSWORD"))
78 |
79 |
80 | if __name__ == "__main__":
81 | import sys
82 |
83 | app = QtWidgets.QApplication(sys.argv)
84 | Widget = QtWidgets.QWidget()
85 | ui = Ui_SignUp()
86 | ui.setupUi(Widget)
87 | Widget.show()
88 | sys.exit(app.exec_())
89 |
--------------------------------------------------------------------------------
/JarvisGUI/NewSignUP/form.ui:
--------------------------------------------------------------------------------
1 |
2 |
3 | Widget
4 |
5 |
6 |
7 | 0
8 | 0
9 | 1080
10 | 720
11 |
12 |
13 |
14 | Widget
15 |
16 |
17 | background-color: rgb(0, 0, 0);
18 |
19 |
20 |
21 |
22 | 210
23 | 130
24 | 681
25 | 551
26 |
27 |
28 |
29 | color: rgb(255, 255, 255);
30 | border-color: rgb(255, 255, 255);
31 | border-radius: 30px;
32 | border-width: 5px 5px 5px 5px;
33 | border-style: solid;
34 |
35 |
36 | QFrame::StyledPanel
37 |
38 |
39 | QFrame::Raised
40 |
41 |
42 |
43 |
44 | 490
45 | 420
46 | 141
47 | 61
48 |
49 |
50 |
51 | border-image: url(:/JarvisImages/exitButton.png);
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 | 310
61 | 420
62 | 141
63 | 61
64 |
65 |
66 |
67 | PointingHandCursor
68 |
69 |
70 | border-image: url(:/JarvisImages/backButton.png);
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 | 110
80 | 40
81 | 481
82 | 81
83 |
84 |
85 |
86 | font: 15pt "Segoe UI";
87 | padding-left: 10px;
88 |
89 |
90 | USERNAME
91 |
92 |
93 |
94 |
95 |
96 | 110
97 | 150
98 | 481
99 | 81
100 |
101 |
102 |
103 | font: 15pt "Segoe UI";
104 | padding-left: 10px;
105 | background-color: rgb(0, 0, 0);
106 |
107 |
108 | PASSWORD
109 |
110 |
111 |
112 |
113 |
114 | 100
115 | 420
116 | 171
117 | 61
118 |
119 |
120 |
121 | border-image: url(:/JarvisImages/signUp.png);
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 | 110
131 | 270
132 | 481
133 | 81
134 |
135 |
136 |
137 | font: 15pt "Segoe UI";
138 | padding-left: 10px;
139 | background-color: rgb(0, 0, 0);
140 |
141 |
142 | CONFIRM PASSWORD
143 |
144 |
145 |
146 |
147 |
148 |
149 | 90
150 | -90
151 | 561
152 | 81
153 |
154 |
155 |
156 |
157 |
158 |
159 | ../JarvisImages/logo.png
160 |
161 |
162 | true
163 |
164 |
165 |
166 |
167 |
168 | 280
169 | 40
170 | 561
171 | 81
172 |
173 |
174 |
175 |
176 |
177 |
178 | ../JarvisImages/logo.png
179 |
180 |
181 | true
182 |
183 |
184 |
185 |
186 |
187 |
188 |
--------------------------------------------------------------------------------
/JarvisGUI/SignUpUI/jarvisMainGUI.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Form implementation generated from reading ui file 'form.ui'
4 | #
5 | # Created by: PyQt5 UI code generator 5.15.10
6 | #
7 | # WARNING: Any manual changes made to this file will be lost when pyuic5 is
8 | # run again. Do not edit this file unless you know what you are doing.
9 |
10 |
11 | from PyQt5 import QtCore, QtGui, QtWidgets
12 |
13 |
14 | class Ui_Dialog(object):
15 | def setupUi(self, Dialog):
16 | Dialog.setObjectName("Dialog")
17 | Dialog.resize(1280, 720)
18 | Dialog.setStyleSheet("background-color: rgb(0, 0, 0);")
19 | self.label = QtWidgets.QLabel(Dialog)
20 | self.label.setGeometry(QtCore.QRect(370, 10, 601, 91))
21 | self.label.setText("")
22 | self.label.setPixmap(QtGui.QPixmap("../JarvisImages/logo.png"))
23 | self.label.setScaledContents(True)
24 | self.label.setObjectName("label")
25 | self.listeningImg = QtWidgets.QLabel(Dialog)
26 | self.listeningImg.setEnabled(False)
27 | self.listeningImg.setGeometry(QtCore.QRect(10, 100, 361, 261))
28 | self.listeningImg.setText("")
29 | self.listeningImg.setPixmap(QtGui.QPixmap("../JarvisImages/voicerecog.gif"))
30 | self.listeningImg.setScaledContents(True)
31 | self.listeningImg.setObjectName("listeningImg")
32 | self.label_3 = QtWidgets.QLabel(Dialog)
33 | self.label_3.setGeometry(QtCore.QRect(1020, 100, 261, 481))
34 | self.label_3.setStyleSheet("background-color:transparent;")
35 | self.label_3.setText("")
36 | self.label_3.setPixmap(QtGui.QPixmap("../JarvisImages/ironman.webp"))
37 | self.label_3.setScaledContents(True)
38 | self.label_3.setObjectName("label_3")
39 | self.arc_reactor = QtWidgets.QLabel(Dialog)
40 | self.arc_reactor.setGeometry(QtCore.QRect(390, 110, 511, 291))
41 | self.arc_reactor.setText("")
42 | self.arc_reactor.setPixmap(QtGui.QPixmap("../JarvisImages/arcreactor.gif"))
43 | self.arc_reactor.setScaledContents(True)
44 | self.arc_reactor.setObjectName("arc_reactor")
45 | self.label_5 = QtWidgets.QLabel(Dialog)
46 | self.label_5.setGeometry(QtCore.QRect(380, 110, 111, 91))
47 | self.label_5.setText("")
48 | self.label_5.setPixmap(QtGui.QPixmap("../JarvisImages/blkimg.png"))
49 | self.label_5.setScaledContents(True)
50 | self.label_5.setObjectName("label_5")
51 | self.label_6 = QtWidgets.QLabel(Dialog)
52 | self.label_6.setGeometry(QtCore.QRect(800, 310, 111, 91))
53 | self.label_6.setText("")
54 | self.label_6.setPixmap(QtGui.QPixmap("../JarvisImages/blkimg.png"))
55 | self.label_6.setScaledContents(True)
56 | self.label_6.setObjectName("label_6")
57 | self.micImg = QtWidgets.QLabel(Dialog)
58 | self.micImg.setGeometry(QtCore.QRect(10, 100, 361, 261))
59 | self.micImg.setText("")
60 | self.micImg.setPixmap(QtGui.QPixmap("../JarvisImages/listening.gif"))
61 | self.micImg.setScaledContents(True)
62 | self.micImg.setObjectName("micImg")
63 | self.frame = QtWidgets.QFrame(Dialog)
64 | self.frame.setGeometry(QtCore.QRect(30, 420, 1041, 271))
65 | self.frame.setStyleSheet("color: rgb(255, 255, 255);\n"
66 | "border-color: rgb(255, 255, 255);\n"
67 | "border-radius: 30px;\n"
68 | "border-width: 5px 5px 5px 5px;\n"
69 | "border-style: solid;")
70 | self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
71 | self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
72 | self.frame.setObjectName("frame")
73 | self.submitBtn = QtWidgets.QPushButton(self.frame)
74 | self.submitBtn.setGeometry(QtCore.QRect(880, 200, 141, 51))
75 | self.submitBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
76 | self.submitBtn.setStyleSheet("border-image: url(:/JarvisImages/startButton.png);")
77 | self.submitBtn.setText("")
78 | self.submitBtn.setObjectName("submitBtn")
79 | self.terminalInputBox = QtWidgets.QLineEdit(self.frame)
80 | self.terminalInputBox.setGeometry(QtCore.QRect(20, 200, 841, 51))
81 | self.terminalInputBox.setStyleSheet("font: 15pt \"Segoe UI\";\n"
82 | "padding-left: 10px;")
83 | self.terminalInputBox.setObjectName("terminalInputBox")
84 | self.terminalOutputBox = QtWidgets.QPlainTextEdit(self.frame)
85 | self.terminalOutputBox.setGeometry(QtCore.QRect(20, 20, 1001, 161))
86 | self.terminalOutputBox.setObjectName("terminalOutputBox")
87 | self.pushButton = QtWidgets.QPushButton(Dialog)
88 | self.pushButton.setGeometry(QtCore.QRect(1110, 620, 131, 61))
89 | self.pushButton.setStyleSheet("border-image: url(:/JarvisImages/exitButton.png);")
90 | self.pushButton.setText("")
91 | self.pushButton.setObjectName("pushButton")
92 |
93 | self.retranslateUi(Dialog)
94 | QtCore.QMetaObject.connectSlotsByName(Dialog)
95 |
96 | def retranslateUi(self, Dialog):
97 | _translate = QtCore.QCoreApplication.translate
98 | Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
99 | self.terminalInputBox.setPlaceholderText(_translate("Dialog", "Enter your command"))
100 |
101 |
102 | if __name__ == "__main__":
103 | import sys
104 | app = QtWidgets.QApplication(sys.argv)
105 | Dialog = QtWidgets.QDialog()
106 | ui = Ui_Dialog()
107 | ui.setupUi(Dialog)
108 | Dialog.show()
109 | sys.exit(app.exec_())
110 |
--------------------------------------------------------------------------------
/JarvisGUI/faceRecog.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import cv2
4 | import face_recognition
5 | from PyQt5.QtWidgets import QWidget, QApplication
6 | from PyQt5.QtGui import QImage, QPixmap
7 | from PyQt5.QtCore import pyqtSlot, QTimer
8 | from faceRecogGUI import Ui_Widget
9 |
10 | class faceRecog(QWidget):
11 | def __init__(self):
12 | super(faceRecog, self).__init__()
13 | print("Setting up GUI")
14 | self.faceUI = Ui_Widget()
15 | self.faceUI.setupUi(self)
16 |
17 | self.timer = QTimer(self)
18 | self.timer.timeout.connect(self.updateFrames)
19 |
20 | self.faceUI.exitBtn.clicked.connect(self.close)
21 | self.faceUI.loginBtn.clicked.connect(self.connectToLoginPage)
22 | self.faceUI.newUserBtn.clicked.connect(self.goToSignUpPage)
23 |
24 | self.capture = None # Initialize the capture object
25 |
26 | self.startVideo()
27 |
28 | def startVideo(self):
29 | print("Encoding started")
30 | try:
31 | self.capture = cv2.VideoCapture(0) # Open webcam
32 | if not self.capture.isOpened():
33 | print("Failed to open webcam")
34 | return
35 | self.timer.start(10)
36 | except Exception as e:
37 | print("Error occurred during video capture:", e)
38 |
39 | @pyqtSlot()
40 | def updateFrames(self):
41 | try:
42 | ret, self.image = self.capture.read()
43 | if ret:
44 | self.displayImage()
45 | else:
46 | print("Failed to capture frame")
47 | except Exception as e:
48 | print("Error occurred during frame update:", e)
49 |
50 | def displayImage(self):
51 | try:
52 | image = cv2.resize(self.image, (681, 491))
53 | image, detected_name = self.faceRec(image)
54 |
55 | qformat = QImage.Format_RGB888
56 | outImage = QImage(image, image.shape[1], image.shape[0], image.strides[0], qformat)
57 | outImage = outImage.rgbSwapped()
58 |
59 | self.faceUI.videoBack.setPixmap(QPixmap.fromImage(outImage))
60 | self.faceUI.videoBack.setScaledContents(True)
61 |
62 | if detected_name == "hari":
63 | self.connectToJarvisMainFile()
64 | self.timer.stop()
65 |
66 | except Exception as e:
67 | print("Error occurred during image display:", e)
68 |
69 | def faceRec(self, image):
70 | try:
71 | path = r'D:\BSc CSD Sem 6\Project\JarvisGUI\faceRecogGUI\faceImages'
72 | known_face_encodings = []
73 | known_face_names = []
74 |
75 | for filename in os.listdir(path):
76 | image_path = os.path.join(path, filename)
77 | img = face_recognition.load_image_file(image_path)
78 | face_encoding_list = face_recognition.face_encodings(img)
79 | if face_encoding_list:
80 | face_encoding = face_encoding_list[0]
81 | known_face_encodings.append(face_encoding)
82 | known_face_names.append(os.path.splitext(filename)[0])
83 |
84 | if not known_face_encodings:
85 | print("No face encodings found.")
86 | return image, "Unknown"
87 |
88 | face_locations = face_recognition.face_locations(image)
89 | face_encodings = face_recognition.face_encodings(image, face_locations)
90 |
91 | detected_name = "Unknown"
92 |
93 | for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
94 | matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
95 | name = "Unknown"
96 |
97 | if True in matches:
98 | first_match_index = matches.index(True)
99 | name = known_face_names[first_match_index]
100 | detected_name = name
101 |
102 | cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2)
103 | cv2.putText(image, name, (left + 6, bottom - 6), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
104 |
105 | return image, detected_name
106 |
107 | except Exception as e:
108 | print("Error occurred during face recognition:", e)
109 | return image, "Unknown"
110 |
111 | def connectToJarvisMainFile(self):
112 | from subprocess import call
113 | python_executable = sys.executable
114 | self.timer.stop() # Stop the timer
115 | self.capture.release() # Release the capture object
116 | call([python_executable, "jarvisMAIN.py"])
117 | self.close()
118 |
119 | def connectToLoginPage(self):
120 | from loginWindowMain import loginWindow
121 | self.timer.stop() # Stop the timer
122 | self.capture.release() # Release the capture object
123 | self.showLoginWindow = loginWindow()
124 | self.close()
125 | self.showLoginWindow.show()
126 |
127 | def goToSignUpPage(self):
128 | from signUpImpl import mainFileNew # Import the sign-up window class
129 | self.timer.stop() # Stop the timer
130 | self.capture.release() # Release the capture object
131 | self.showSignUp = mainFileNew()
132 | self.close() # Close the current dialog
133 | self.showSignUp.show()
134 |
135 | if __name__ == "__main__":
136 | app = QApplication(sys.argv)
137 | try:
138 | ui = faceRecog()
139 | ui.show()
140 | sys.exit(app.exec_())
141 | except Exception as e:
142 | print("Error occurred:", e)
143 |
--------------------------------------------------------------------------------
/MAIN.py:
--------------------------------------------------------------------------------
1 | from sklearn.feature_extraction.text import CountVectorizer
2 | from sklearn.metrics.pairwise import cosine_similarity
3 | import winsearch as ws
4 | import pandas as pd
5 | import spacy
6 | import text2speech as t2s
7 | import speech2text as s2t
8 | import string
9 | import Email
10 | import WeatherUpdates
11 | import utubeVideoDownloader
12 | import gptIntegration
13 | import ScheduleGmeet
14 | import pyttsx3
15 | import searchFile
16 | import os
17 |
18 |
19 | ln = "en"
20 |
21 | def preprocess_text(text):
22 | doc = nlp(text)
23 | tokens = [token.text.lower() for token in doc if not token.is_stop and token.text not in string.punctuation]
24 | processed_text = " ".join(tokens)
25 | return processed_text
26 |
27 | def predict_label(input_text, df, vectorizer):
28 | preprocessed_input_text = preprocess_text(input_text)
29 | print(preprocessed_input_text)
30 | input_vector = vectorizer.transform([preprocessed_input_text])
31 | similarities = cosine_similarity(input_vector, vectorizer.transform(df['text']))
32 | max_index = similarities.argmax()
33 | max_similarity = similarities[0, max_index]
34 | return df['label'][max_index], max_similarity
35 |
36 | engine = pyttsx3.init()
37 | nlp = spacy.load('en_core_web_sm')
38 | df = pd.read_csv('os_dataset.csv')
39 | df.dropna(subset=['text'], inplace=True)
40 | df.reset_index(drop=True, inplace=True)
41 | vectorizer = CountVectorizer().fit(df['text'])
42 | while(True):
43 |
44 | input_text=s2t.voice2text(ln)
45 | input_text=input_text.lower()
46 |
47 |
48 | if "send email" in input_text:
49 | print("Recipient's Email:")
50 |
51 | t2s.text2speech(engine, "tell Recipient's Email address")
52 | receiver_email = s2t.voice2text(ln).lower().replace(" ", "")
53 | while not receiver_email:
54 | receiver_email = s2t.voice2text(ln).lower().replace(" ", "")
55 | print("Receiver:", receiver_email)
56 | print("Subject:")
57 | t2s.text2speech(engine, "Subject of the email")
58 | subject = s2t.voice2text(ln)
59 | while not subject:
60 | subject = s2t.voice2text(ln)
61 | print("Body:")
62 | t2s.text2speech(engine, "Body of the Email")
63 | body = s2t.voice2text(ln)
64 | while not body:
65 | body = s2t.voice2text(ln)
66 | Email.send_email(receiver_email, subject, body)
67 |
68 | t2s.text2speech(engine, "Email Sent successufully")
69 |
70 |
71 | elif "check weather" in input_text:
72 | print("Which city do you want to check weather for?")
73 | t2s.text2speech(engine, "Which city do you want to check weather for?")
74 | city_name = s2t.voice2text(ln)
75 | while not city_name:
76 | print("Which city do you want to check weather for?")
77 | t2s.text2speech(engine, "Which city do you want to check weather for?")
78 | city_name = s2t.voice2text(ln)
79 | city_name.strip()
80 | if city_name.lower() == 'exit':
81 | break
82 | else:
83 | weather=WeatherUpdates.get_weather(city_name)
84 | t2s.text2speech(engine, f"Todays weather in {city_name} is {weather} degree celcius")
85 |
86 | elif "download youtube video" in input_text:
87 |
88 | t2s.text2speech(engine, "Enter the URL of the video: ")
89 | url = input("Enter the URL of the video: ")
90 | username = os.environ['USERNAME']
91 | # save_path = r"C:\Users\{username}\Downloads"
92 | t2s.text2speech(engine, "Enter the path to save the video ")
93 | save_path=input("Enter the path")
94 | utubeVideoDownloader.download_video(url, save_path)
95 | t2s.text2speech(engine, "The video is saved to your downloads")
96 |
97 | elif "go to interactive mode" in input_text:
98 | t2s.text2speech(engine, "Interactive mode activated")
99 | gptIntegration.chat()
100 |
101 | elif "schedule meeting" in input_text:
102 | ScheduleGmeet.main()
103 | t2s.text2speech(engine, "Yes sir")
104 |
105 | elif "search file" in input_text:
106 | t2s.text2speech(engine, "What the name of the file do you want to search")
107 | name = s2t.voice2text(ln)
108 | searchFile.open_windows_search(name)
109 |
110 | elif "change to tamil" in input_text:
111 | ln="ta"
112 | t2s.text2speech(engine, "neengal ippoludhu tamilaum pesalam")
113 | elif "change to english" in input_text:
114 | ln="en"
115 | t2s.text2speech(engine, "You can speak in english")
116 |
117 |
118 | elif "switch to jarvis" in input_text:
119 | t2s.switch_voice(engine,'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_DAVID_11.0')
120 |
121 | elif "switch to friday" in input_text:
122 | t2s.switch_voice(engine, 'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_ZIRA_11.0')
123 |
124 | elif input_text=="stop listening":
125 | t2s.text2speech(engine, "Sure")
126 | break
127 |
128 | else:
129 | predicted_label, max_similarity = predict_label(input_text, df, vectorizer)
130 | print("Predicted label:", predicted_label)
131 | print("Maximum similarity score:", max_similarity)
132 |
133 | if(max_similarity > 0.5):
134 | if "open" in predicted_label :
135 | app = predicted_label.replace("open ","")
136 | ws.os_open(app)
137 | t2s.text2speech(engine, "Yes sir")
138 | elif "close" in predicted_label:
139 | app = predicted_label.replace("close ","")
140 | ws.close_application(app)
141 | t2s.text2speech(engine, "Yes sir")
142 |
143 |
144 |
--------------------------------------------------------------------------------
/JarvisGUI/loginWindowGUI.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Form implementation generated from reading ui file 'form.ui'
4 | #
5 | # Created by: PyQt5 UI code generator 5.15.10
6 | #
7 | # WARNING: Any manual changes made to this file will be lost when pyuic5 is
8 | # run again. Do not edit this file unless you know what you are doing.
9 |
10 |
11 | from PyQt5 import QtCore, QtGui, QtWidgets
12 |
13 |
14 | class Ui_Widget(object):
15 | def __init__(self):
16 | self.NewUserBtn = None
17 |
18 | def setupUi(self, Widget):
19 | Widget.setObjectName("Widget")
20 | Widget.resize(1080, 720)
21 | Widget.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
22 | Widget.setStyleSheet("background-color: rgb(0, 0, 0);")
23 | self.logo = QtWidgets.QLabel(Widget)
24 | self.logo.setGeometry(QtCore.QRect(260, 0, 561, 81))
25 | self.logo.setText("")
26 | self.logo.setPixmap(QtGui.QPixmap("D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/logo.png"))
27 | self.logo.setScaledContents(True)
28 | self.logo.setObjectName("logo")
29 | self.frame = QtWidgets.QFrame(Widget)
30 | self.frame.setGeometry(QtCore.QRect(210, 90, 681, 601))
31 | self.frame.setStyleSheet("color: rgb(255, 255, 255);\n"
32 | "border-color: rgb(255, 255, 255);\n"
33 | "border-radius: 30px;\n"
34 | "border-width: 5px 5px 5px 5px;\n"
35 | "border-style: solid;")
36 | self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
37 | self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
38 | self.frame.setObjectName("frame")
39 | self.retryBtn = QtWidgets.QPushButton(self.frame)
40 | self.retryBtn.setGeometry(QtCore.QRect(60, 490, 141, 61))
41 | self.retryBtn.setStyleSheet("border-image: url(D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/retryButton.png);")
42 | self.retryBtn.setText("")
43 | self.retryBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
44 | self.retryBtn.setObjectName("retryBtn")
45 | self.backBtn = QtWidgets.QPushButton(self.frame)
46 | self.backBtn.setGeometry(QtCore.QRect(270, 490, 141, 61))
47 | self.backBtn.setStyleSheet("border-image: url(D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/backButton.png);")
48 | self.backBtn.setText("")
49 | self.backBtn.setObjectName("backBtn")
50 | self.backBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
51 | self.exitBtn = QtWidgets.QPushButton(self.frame)
52 | self.exitBtn.setGeometry(QtCore.QRect(490, 490, 141, 61))
53 | self.exitBtn.setStyleSheet("border-image: url(D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/exitButton.png);")
54 | self.exitBtn.setText("")
55 | self.exitBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
56 | self.exitBtn.setObjectName("exitBtn")
57 | self.userNameEntry = QtWidgets.QLineEdit(self.frame)
58 | self.userNameEntry.setGeometry(QtCore.QRect(112, 84, 481, 81))
59 | self.userNameEntry.setStyleSheet("font: 15pt \"Segoe UI\";\n"
60 | "padding-left: 10px;")
61 | self.userNameEntry.setObjectName("userNameEntry")
62 | self.passwordEntry = QtWidgets.QLineEdit(self.frame)
63 | self.passwordEntry.setGeometry(QtCore.QRect(110, 220, 481, 81))
64 | self.passwordEntry.setStyleSheet("font: 15pt \"Segoe UI\";\n"
65 | "padding-left: 10px;")
66 | self.passwordEntry.setObjectName("passwordEntry")
67 | self.loginBtn = QtWidgets.QPushButton(self.frame)
68 | self.loginBtn.setGeometry(QtCore.QRect(140, 390, 141, 61))
69 | self.loginBtn.setStyleSheet("border-image: url(D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/retryButton.png);\n"
70 | "border-image: url(D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/loginButton.png);")
71 | self.loginBtn.setText("")
72 | self.loginBtn.setObjectName("loginBtn")
73 | self.loginBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
74 |
75 | self.label = QtWidgets.QLabel(self.frame)
76 | self.label.setEnabled(False)
77 | self.label.setGeometry(QtCore.QRect(110, 50, 511, 301))
78 | self.label.setStyleSheet("border: none;")
79 | self.label.setText("")
80 | self.label.setPixmap(QtGui.QPixmap("D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/loginFailed.gif"))
81 | self.label.setScaledContents(False)
82 | self.label.setObjectName("label")
83 | self.NewUserBtn = QtWidgets.QPushButton(self.frame)
84 | self.NewUserBtn.setGeometry(QtCore.QRect(390, 390, 171, 61))
85 | self.NewUserBtn.setStyleSheet("border-image: url(D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/newUser.png);")
86 | self.NewUserBtn.setText("")
87 | self.NewUserBtn.setObjectName("NewUserBtn")
88 | self.NewUserBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
89 | self.retranslateUi(Widget)
90 | QtCore.QMetaObject.connectSlotsByName(Widget)
91 |
92 | def retranslateUi(self, Widget):
93 | _translate = QtCore.QCoreApplication.translate
94 | Widget.setWindowTitle(_translate("Widget", "Widget"))
95 | self.userNameEntry.setPlaceholderText(_translate("Widget", "USERNAME"))
96 | self.passwordEntry.setPlaceholderText(_translate("Widget", "PASSWORD"))
97 |
98 |
99 | if __name__ == "__main__":
100 | import sys
101 |
102 | app = QtWidgets.QApplication(sys.argv)
103 | Widget = QtWidgets.QWidget()
104 | ui = Ui_Widget()
105 | ui.setupUi(Widget)
106 | Widget.show()
107 | sys.exit(app.exec_())
108 |
--------------------------------------------------------------------------------
/JarvisGUI/LoginUI/form.ui:
--------------------------------------------------------------------------------
1 |
2 |
3 | Widget
4 |
5 |
6 |
7 | 0
8 | 0
9 | 1080
10 | 720
11 |
12 |
13 |
14 | ArrowCursor
15 |
16 |
17 | Widget
18 |
19 |
20 | background-color: rgb(0, 0, 0);
21 |
22 |
23 |
24 |
25 | 260
26 | 0
27 | 561
28 | 81
29 |
30 |
31 |
32 |
33 |
34 |
35 | ../JarvisImages/logo.png
36 |
37 |
38 | true
39 |
40 |
41 |
42 |
43 |
44 | 210
45 | 90
46 | 681
47 | 601
48 |
49 |
50 |
51 | color: rgb(255, 255, 255);
52 | border-color: rgb(255, 255, 255);
53 | border-radius: 30px;
54 | border-width: 5px 5px 5px 5px;
55 | border-style: solid;
56 |
57 |
58 | QFrame::StyledPanel
59 |
60 |
61 | QFrame::Raised
62 |
63 |
64 |
65 |
66 | 60
67 | 490
68 | 141
69 | 61
70 |
71 |
72 |
73 | border-image: url(:/JarvisImages/retryButton.png);
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 | 270
83 | 490
84 | 141
85 | 61
86 |
87 |
88 |
89 | border-image: url(:/JarvisImages/backButton.png);
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 | 490
99 | 490
100 | 141
101 | 61
102 |
103 |
104 |
105 | border-image: url(:/JarvisImages/exitButton.png);
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 | 112
115 | 84
116 | 481
117 | 81
118 |
119 |
120 |
121 | font: 15pt "Segoe UI";
122 | padding-left: 10px;
123 |
124 |
125 | USERNAME
126 |
127 |
128 |
129 |
130 |
131 | 110
132 | 220
133 | 481
134 | 81
135 |
136 |
137 |
138 | font: 15pt "Segoe UI";
139 | padding-left: 10px;
140 |
141 |
142 | PASSWORD
143 |
144 |
145 |
146 |
147 |
148 | 140
149 | 390
150 | 141
151 | 61
152 |
153 |
154 |
155 | border-image: url(:/JarvisImages/retryButton.png);
156 | border-image: url(:/JarvisImages/loginButton.png);
157 |
158 |
159 |
160 |
161 |
162 |
163 |
164 | false
165 |
166 |
167 |
168 | 110
169 | 50
170 | 511
171 | 301
172 |
173 |
174 |
175 | border: none;
176 |
177 |
178 |
179 |
180 |
181 | ../JarvisImages/loginFailed.gif
182 |
183 |
184 | false
185 |
186 |
187 |
188 |
189 |
190 | 390
191 | 390
192 | 171
193 | 61
194 |
195 |
196 |
197 | border-image: url(:/JarvisImages/newUser.png);
198 |
199 |
200 |
201 |
202 |
203 |
204 |
205 |
206 |
207 |
208 |
--------------------------------------------------------------------------------
/JarvisGUI/jarvisMainGUI.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Form implementation generated from reading ui file 'form.ui'
4 | #
5 | # Created by: PyQt5 UI code generator 5.15.10
6 | #
7 | # WARNING: Any manual changes made to this file will be lost when pyuic5 is
8 | # run again. Do not edit this file unless you know what you are doing.
9 |
10 |
11 | from PyQt5 import QtCore, QtGui, QtWidgets
12 |
13 |
14 | class Ui_JarvisMainGUI(object):
15 | def setupUi(self, Dialog):
16 | Dialog.setObjectName("Dialog")
17 | Dialog.resize(1280, 720)
18 | Dialog.setStyleSheet("background-color: rgb(0, 0, 0);")
19 | self.label = QtWidgets.QLabel(Dialog)
20 | self.label.setGeometry(QtCore.QRect(370, 10, 601, 91))
21 | self.label.setText("")
22 | self.label.setPixmap(QtGui.QPixmap("D:\BSc CSD Sem 6\Project\JarvisGUI\JarvisImages\logo.png"))
23 | self.label.setScaledContents(True)
24 | self.label.setObjectName("label")
25 | self.listeningImg = QtWidgets.QLabel(Dialog)
26 | self.listeningImg.setEnabled(False)
27 | self.listeningImg.setGeometry(QtCore.QRect(10, 100, 361, 261))
28 | self.listeningImg.setText("")
29 | self.listeningImg.setPixmap(QtGui.QPixmap("D:\BSc CSD Sem 6\Project\JarvisGUI\JarvisImages\\voicerecog.gif"))
30 | self.listeningImg.setScaledContents(True)
31 | self.listeningImg.setObjectName("listeningImg")
32 | self.label_3 = QtWidgets.QLabel(Dialog)
33 | self.label_3.setGeometry(QtCore.QRect(1020, 100, 261, 481))
34 | self.label_3.setStyleSheet("background-color:transparent;")
35 | self.label_3.setText("")
36 | self.label_3.setPixmap(QtGui.QPixmap("D:\BSc CSD Sem 6\Project\JarvisGUI\JarvisImages\ironman.webp"))
37 | self.label_3.setScaledContents(True)
38 | self.label_3.setObjectName("label_3")
39 | self.arc_reactor = QtWidgets.QLabel(Dialog)
40 | self.arc_reactor.setGeometry(QtCore.QRect(390, 110, 511, 291))
41 | self.arc_reactor.setText("")
42 | self.arc_reactor.setPixmap(QtGui.QPixmap("D:\BSc CSD Sem 6\Project\JarvisGUI\JarvisImages\\arcreactor.gif"))
43 | self.arc_reactor.setScaledContents(True)
44 | self.arc_reactor.setObjectName("arc_reactor")
45 | self.label_5 = QtWidgets.QLabel(Dialog)
46 | self.label_5.setGeometry(QtCore.QRect(380, 110, 111, 91))
47 | self.label_5.setText("")
48 | self.label_5.setPixmap(QtGui.QPixmap("D:\BSc CSD Sem 6\Project\JarvisGUI\JarvisImages\\blkimg.png"))
49 | self.label_5.setScaledContents(True)
50 | self.label_5.setObjectName("label_5")
51 | self.label_6 = QtWidgets.QLabel(Dialog)
52 | self.label_6.setGeometry(QtCore.QRect(800, 310, 111, 91))
53 | self.label_6.setText("")
54 | self.label_6.setPixmap(QtGui.QPixmap("D:\BSc CSD Sem 6\Project\JarvisGUI\JarvisImages\\blkimg.png"))
55 | self.label_6.setScaledContents(True)
56 | self.label_6.setObjectName("label_6")
57 | self.micImg = QtWidgets.QLabel(Dialog)
58 | self.micImg.setGeometry(QtCore.QRect(10, 100, 361, 261))
59 | self.micImg.setText("")
60 | self.micImg.setPixmap(QtGui.QPixmap("D:\BSc CSD Sem 6\Project\JarvisGUI\JarvisImages\\listening.gif"))
61 | self.micImg.setScaledContents(True)
62 | self.micImg.setObjectName("micImg")
63 | self.frame = QtWidgets.QFrame(Dialog)
64 | self.frame.setGeometry(QtCore.QRect(30, 420, 1041, 271))
65 | self.frame.setStyleSheet("color: rgb(255, 255, 255);\n"
66 | "border-color: rgb(255, 255, 255);\n"
67 | "border-radius: 30px;\n"
68 | "border-width: 5px 5px 5px 5px;\n"
69 | "border-style: solid;")
70 | self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
71 | self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
72 | self.frame.setObjectName("frame")
73 | self.submitBtn = QtWidgets.QPushButton(self.frame)
74 | self.submitBtn.setGeometry(QtCore.QRect(880, 200, 141, 51))
75 | self.submitBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
76 | self.submitBtn.setStyleSheet("border-image: url(D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/startButton.png);")
77 | self.submitBtn.setText("")
78 | self.submitBtn.setObjectName("submitBtn")
79 | self.terminalInputBox = QtWidgets.QLineEdit(self.frame)
80 | self.terminalInputBox.setGeometry(QtCore.QRect(20, 200, 841, 51))
81 | self.terminalInputBox.setStyleSheet("font: 15pt \"Segoe UI\";\n"
82 | "padding-left: 10px;")
83 | self.terminalInputBox.setObjectName("terminalInputBox")
84 | self.terminalOutputBox = QtWidgets.QPlainTextEdit(self.frame)
85 | self.terminalOutputBox.setGeometry(QtCore.QRect(20, 20, 1001, 161))
86 | self.terminalOutputBox.setObjectName("terminalOutputBox")
87 | self.terminalOutputBox.setStyleSheet("font: 12pt \"Segoe UI\";\n"
88 | "padding-left: 10px;")
89 | # Modify font style and size for terminalOutputBox
90 | # font = QtGui.QFont()
91 | # font.setFamily("Arial")
92 | # font.setPointSize(10)
93 | # self.terminalOutputBox.setFont(font)
94 | self.pushButton = QtWidgets.QPushButton(Dialog)
95 | self.pushButton.setGeometry(QtCore.QRect(1110, 620, 131, 61))
96 | self.pushButton.setStyleSheet("border-image: url(D:/BSc CSD Sem 6/Project/JarvisGUI/JarvisImages/exitButton.png);")
97 | self.pushButton.setText("")
98 | self.pushButton.setObjectName("pushButton")
99 |
100 | self.retranslateUi(Dialog)
101 | QtCore.QMetaObject.connectSlotsByName(Dialog)
102 |
103 | def retranslateUi(self, Dialog):
104 | _translate = QtCore.QCoreApplication.translate
105 | Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
106 | self.terminalInputBox.setPlaceholderText(_translate("Dialog", "Enter your command"))
107 |
108 |
109 | if __name__ == "__main__":
110 | import sys
111 |
112 | app = QtWidgets.QApplication(sys.argv)
113 | Dialog = QtWidgets.QDialog()
114 | ui = Ui_JarvisMainGUI()
115 | ui.setupUi(Dialog)
116 | Dialog.show()
117 | sys.exit(app.exec_())
118 |
--------------------------------------------------------------------------------
/JarvisGUI/SignUpUI/form.ui:
--------------------------------------------------------------------------------
1 |
2 |
3 | Dialog
4 |
5 |
6 |
7 | 0
8 | 0
9 | 1280
10 | 720
11 |
12 |
13 |
14 | Dialog
15 |
16 |
17 | background-color: rgb(0, 0, 0);
18 |
19 |
20 |
21 |
22 | 370
23 | 10
24 | 601
25 | 91
26 |
27 |
28 |
29 |
30 |
31 |
32 | ../JarvisImages/logo.png
33 |
34 |
35 | true
36 |
37 |
38 |
39 |
40 | false
41 |
42 |
43 |
44 | 10
45 | 100
46 | 361
47 | 261
48 |
49 |
50 |
51 |
52 |
53 |
54 | ../JarvisImages/voicerecog.gif
55 |
56 |
57 | true
58 |
59 |
60 |
61 |
62 |
63 | 1020
64 | 100
65 | 261
66 | 481
67 |
68 |
69 |
70 | background-color:transparent;
71 |
72 |
73 |
74 |
75 |
76 | ../JarvisImages/ironman.webp
77 |
78 |
79 | true
80 |
81 |
82 |
83 |
84 |
85 | 390
86 | 110
87 | 511
88 | 291
89 |
90 |
91 |
92 |
93 |
94 |
95 | ../JarvisImages/arcreactor.gif
96 |
97 |
98 | true
99 |
100 |
101 |
102 |
103 |
104 | 380
105 | 110
106 | 111
107 | 91
108 |
109 |
110 |
111 |
112 |
113 |
114 | ../JarvisImages/blkimg.png
115 |
116 |
117 | true
118 |
119 |
120 |
121 |
122 |
123 | 800
124 | 310
125 | 111
126 | 91
127 |
128 |
129 |
130 |
131 |
132 |
133 | ../JarvisImages/blkimg.png
134 |
135 |
136 | true
137 |
138 |
139 |
140 |
141 |
142 | 10
143 | 100
144 | 361
145 | 261
146 |
147 |
148 |
149 |
150 |
151 |
152 | ../JarvisImages/listening.gif
153 |
154 |
155 | true
156 |
157 |
158 |
159 |
160 |
161 | 30
162 | 420
163 | 1041
164 | 271
165 |
166 |
167 |
168 | color: rgb(255, 255, 255);
169 | border-color: rgb(255, 255, 255);
170 | border-radius: 30px;
171 | border-width: 5px 5px 5px 5px;
172 | border-style: solid;
173 |
174 |
175 | QFrame::StyledPanel
176 |
177 |
178 | QFrame::Raised
179 |
180 |
181 |
182 |
183 | 880
184 | 200
185 | 141
186 | 51
187 |
188 |
189 |
190 | PointingHandCursor
191 |
192 |
193 | border-image: url(:/JarvisImages/startButton.png);
194 |
195 |
196 |
197 |
198 |
199 |
200 |
201 |
202 | 20
203 | 200
204 | 841
205 | 51
206 |
207 |
208 |
209 | font: 15pt "Segoe UI";
210 | padding-left: 10px;
211 |
212 |
213 | Enter your command
214 |
215 |
216 |
217 |
218 |
219 | 20
220 | 20
221 | 1001
222 | 161
223 |
224 |
225 |
226 |
227 |
228 |
229 |
230 | 1110
231 | 620
232 | 131
233 | 61
234 |
235 |
236 |
237 | border-image: url(:/JarvisImages/exitButton.png);
238 |
239 |
240 |
241 |
242 |
243 |
244 |
245 |
246 |
247 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # JARVIS - Personal AI Assistant
2 |
3 | A comprehensive Python-based AI assistant inspired by Tony Stark's JARVIS, featuring voice recognition, face recognition, GUI interface, and various automation capabilities.
4 |
5 | ## 🌟 Features
6 |
7 | ### Core Functionality
8 | - **Voice Recognition & Speech Synthesis** - Natural language interaction with text-to-speech capabilities
9 | - **Face Recognition** - Secure user authentication and identification
10 | - **GUI Interface** - Modern PyQt-based graphical user interface
11 | - **Email Integration** - Send and manage emails through voice commands
12 | - **Web Automation** - Control various web applications and services
13 |
14 | ### Smart Capabilities
15 | - **Weather Updates** - Real-time weather information
16 | - **News Integration** - Latest news updates through API
17 | - **YouTube Integration** - Search, play, and download videos
18 | - **Google Maps Integration** - Navigation and location services
19 | - **WhatsApp Automation** - Send messages through voice commands
20 | - **Gmail Integration** - Email management and composition
21 |
22 | ### Advanced Features
23 | - **Gesture Control** - Hand gesture recognition for system control
24 | - **Object Detection** - Computer vision for object identification
25 | - **File Search** - Intelligent file searching and management
26 | - **System Control** - OS-level operations and application management
27 | - **Brightness & Volume Control** - System parameter adjustments
28 | - **Scheduled Tasks** - Reminders and alarms functionality
29 |
30 | ### Security & User Management
31 | - **Multi-user Support** - Individual user profiles and authentication
32 | - **Voice Authentication** - Speaker verification for added security
33 | - **Face Recognition Login** - Biometric authentication system
34 | - **User Registration** - New user signup and profile creation
35 |
36 | ## 🛠️ Installation
37 |
38 | ### Prerequisites
39 | - Python 3.7 or higher
40 | - Webcam (for face recognition)
41 | - Microphone (for voice commands)
42 | - Internet connection (for web services)
43 |
44 | ### Dependencies
45 | Install the required packages using pip:
46 |
47 | ```bash
48 | pip install -r requirements.txt
49 | ```
50 |
51 | Key dependencies include:
52 | - PyQt5/PyQt6 (GUI framework)
53 | - OpenCV (Computer vision)
54 | - SpeechRecognition (Voice processing)
55 | - pyttsx3 (Text-to-speech)
56 | - face_recognition (Face detection and recognition)
57 | - requests (API communication)
58 | - selenium (Web automation)
59 | - pywhatkit (WhatsApp automation)
60 |
61 | ## 🚀 Quick Start
62 |
63 | ### 1. Initial Setup
64 | ```bash
65 | # Clone the repository
66 | git clone
67 | cd jarvis-ai-assistant
68 |
69 | # Install dependencies
70 | pip install -r requirements.txt
71 |
72 | # Run the main application
73 | python jarvisMAIN.py
74 | ```
75 |
76 | ### 2. First Time User Setup
77 | 1. Launch the application
78 | 2. Click "New User" to register
79 | 3. Complete face registration for biometric login
80 | 4. Configure voice settings and preferences
81 |
82 | ### 3. Login
83 |
84 |
85 |
86 | - Use face recognition for quick login
87 |
88 |
89 |
90 | - Alternative: Username/password authentication
91 |
92 |
93 |
94 | - Voice authentication for enhanced security
95 |
96 |
97 |
98 |
99 | ## 📁 Project Structure
100 |
101 | ```
102 | jarvis-ai-assistant/
103 | ├── JarvisGUI/ # Main GUI application
104 | │ ├── jarvisMAIN.py # Primary application entry point
105 | │ ├── loginWindowGUI.py # Login interface
106 | │ ├── signUpGUI.py # User registration
107 | │ └── FaceRecogGUI/ # Face recognition modules
108 | ├── Gesture Control/ # Hand gesture recognition
109 | ├── FaceRecognition.py # Core face recognition
110 | ├── AdvancedSpeech.py # Speech processing
111 | ├── WeatherUpdates.py # Weather API integration
112 | ├── NewsApi.py # News service integration
113 | ├── web_*.py # Web automation modules
114 | ├── Email.py # Email functionality
115 | └── requirements.txt # Project dependencies
116 | ```
117 |
118 | ## 🎯 Usage
119 |
120 | ### Voice Commands
121 | - "Hello Jarvis" - Wake up command
122 | - "What's the weather?" - Get weather updates
123 | - "Send email to [contact]" - Email composition
124 | - "Play [song] on YouTube" - Music playback
125 | - "Open [application]" - Launch applications
126 | - "Set reminder for [time]" - Schedule reminders
127 |
128 | ### GUI Features
129 | - **Dashboard** - Central control panel
130 | - **Settings** - Customize preferences and configurations
131 | - **User Management** - Add/remove users and manage profiles
132 | - **Voice Training** - Improve speech recognition accuracy
133 | - **System Monitor** - View system status and performance
134 |
135 | ### Gesture Controls
136 | - Hand gestures for volume control
137 | - Navigation gestures for presentations
138 | - System control through hand movements
139 |
140 | ## 🔧 Configuration
141 |
142 | ### API Keys
143 | Configure the following API keys in your environment or config files:
144 | - Weather API (OpenWeatherMap)
145 | - News API
146 | - Google Maps API
147 | - Email service credentials
148 |
149 | ### Voice Settings
150 | - Adjust speech rate and volume
151 | - Select voice gender and accent
152 | - Configure wake word sensitivity
153 |
154 | ### Face Recognition
155 | - Register multiple faces per user
156 | - Adjust recognition threshold
157 | - Configure fallback authentication
158 |
159 | ## 🤝 Contributing
160 |
161 | 1. Fork the repository
162 | 2. Create a feature branch (`git checkout -b feature/AmazingFeature`)
163 | 3. Commit your changes (`git commit -m 'Add some AmazingFeature'`)
164 | 4. Push to the branch (`git push origin feature/AmazingFeature`)
165 | 5. Open a Pull Request
166 |
167 | ## 📝 License
168 |
169 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
170 |
171 | ## 🙏 Acknowledgments
172 |
173 | - Inspired by Marvel's JARVIS AI assistant
174 | - Built with Python and open-source libraries
175 | - Thanks to the computer vision and speech recognition communities
176 |
177 | ## 🔮 Future Enhancements
178 |
179 | - [ ] Smart home integration (IoT devices)
180 | - [ ] Natural language processing improvements
181 | - [ ] Mobile app companion
182 | - [ ] Cloud synchronization
183 | - [ ] Advanced machine learning capabilities
184 | - [ ] Multi-language support
185 |
186 | ---
187 |
188 | *"Sometimes you gotta run before you can walk."* - Tony Stark
189 |
--------------------------------------------------------------------------------
/JarvisGUI/jarvisMAIN.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from PyQt5.QtWidgets import QWidget, QApplication
3 | from PyQt5.QtCore import QThread, pyqtSignal, Qt
4 | from PyQt5.QtGui import QMovie
5 | from jarvisMainGUI import Ui_JarvisMainGUI
6 | from sklearn.feature_extraction.text import CountVectorizer
7 | from sklearn.metrics.pairwise import cosine_similarity
8 | import pandas as pd
9 | import pyautogui as pg
10 | import spacy
11 | import text2speech as t2s
12 | import speech2text as s2t
13 | import string
14 | import Email
15 | import WeatherUpdates
16 | import utubeVideoDownloader
17 | import gptIntegration
18 | import ScheduleGmeet
19 | import pyttsx3
20 | import searchFile
21 | import winsearch as ws
22 | import web_function as wb
23 | import increaseBrightness as iB
24 | import increaseVolume as iV
25 |
26 | class VoiceListener(QThread):
27 | new_text_signal = pyqtSignal(str)
28 |
29 | def __init__(self, parent=None):
30 | super(VoiceListener, self).__init__(parent)
31 |
32 | def run(self):
33 | ln = "en"
34 | while True:
35 | input_text = s2t.voice2text(ln)
36 | input_text = input_text.lower()
37 | if input_text:
38 | self.new_text_signal.emit(input_text)
39 |
40 |
41 | class LoginWindow(QWidget):
42 | def __init__(self):
43 | super(LoginWindow, self).__init__()
44 | print("Setting up GUI")
45 | self.jarvis_ui = Ui_JarvisMainGUI()
46 | self.jarvis_ui.setupUi(self)
47 | self.jarvis_ui.pushButton.clicked.connect(self.close)
48 |
49 | # Initialize engine and NLP model
50 | self.engine = pyttsx3.init()
51 | self.nlp = spacy.load('en_core_web_sm')
52 |
53 | # Load dataset and vectorizer
54 | self.df = pd.read_csv('os_dataset.csv')
55 | self.df.dropna(subset=['text'], inplace=True)
56 | self.df.reset_index(drop=True, inplace=True)
57 | self.vectorizer = CountVectorizer().fit(self.df['text'])
58 |
59 | # Load dataset for web
60 | self.dfw = pd.read_csv('web_Dataset.csv')
61 | self.dfw.dropna(subset=['text'], inplace=True)
62 | self.dfw.reset_index(drop=True, inplace=True)
63 | self.vectorizerw = CountVectorizer().fit(self.dfw['text'])
64 |
65 | t2s.text2speech(self.engine, "Jarvis Activated")
66 | # Connect voice recognition function to GUI button
67 | self.jarvis_ui.submitBtn.clicked.connect(self.run_jarvis)
68 |
69 | # Connect text box return pressed signal to execute command
70 | self.jarvis_ui.terminalInputBox.returnPressed.connect(self.run_jarvis)
71 |
72 | # Start continuous listening in a separate thread
73 | self.listener_thread = VoiceListener()
74 | self.listener_thread.new_text_signal.connect(self.execute_command)
75 | self.listener_thread.start()
76 | self.runAllMovies()
77 |
78 | def terminalPrint(self, text):
79 | self.jarvis_ui.terminalOutputBox.appendPlainText(text)
80 |
81 | def runAllMovies(self):
82 | self.jarvis_ui.listeningMovie = QMovie("D:\BSc CSD Sem 6\Project\JarvisGUI\JarvisImages\\voicerecog.gif")
83 | self.jarvis_ui.micImg.setMovie(self.jarvis_ui.listeningMovie)
84 | self.jarvis_ui.listeningMovie.start()
85 |
86 | self.jarvis_ui.arcreactorMovie = QMovie("D:\BSc CSD Sem 6\Project\JarvisGUI\JarvisImages\\arcreactor.gif")
87 | self.jarvis_ui.arc_reactor.setMovie(self.jarvis_ui.arcreactorMovie)
88 | self.jarvis_ui.arcreactorMovie.start()
89 |
90 | def preprocess_text(self, text):
91 | doc = self.nlp(text)
92 | tokens = [token.text.lower() for token in doc if not token.is_stop and token.text not in string.punctuation]
93 | processed_text = " ".join(tokens)
94 | return processed_text
95 |
96 | def predict_label(self, input_text, vectorizer, df):
97 | preprocessed_input_text = self.preprocess_text(input_text)
98 | input_vector = vectorizer.transform([preprocessed_input_text])
99 | similarities = cosine_similarity(input_vector, vectorizer.transform(df['text']))
100 | max_index = similarities.argmax()
101 | max_similarity = similarities[0, max_index]
102 | return df['label'][max_index], max_similarity
103 |
104 | def run_jarvis(self):
105 | input_text = self.jarvis_ui.terminalInputBox.text().lower()
106 | ln = "en"
107 | if input_text:
108 | self.terminalPrint(f"You: {input_text}")
109 | self.execute_command(input_text)
110 | self.jarvis_ui.terminalInputBox.clear()
111 |
112 | def execute_command(self, input_text):
113 | ln = "en"
114 |
115 | if "send email" in input_text:
116 | # Email sending logic
117 | t2s.text2speech(self.engine, "Tell recipient's email address")
118 | receiver_email = s2t.voice2text("en").lower().replace(" ", "")
119 | while not receiver_email:
120 | receiver_email = s2t.voice2text("en").lower().replace(" ", "")
121 | t2s.text2speech(self.engine, "Subject of the email")
122 | subject = s2t.voice2text("en")
123 | while not subject:
124 | subject = s2t.voice2text("en")
125 | t2s.text2speech(self.engine, "Body of the email")
126 | body = s2t.voice2text("en")
127 | while not body:
128 | body = s2t.voice2text("en")
129 | Email.send_email(receiver_email, subject, body)
130 | self.terminalPrint("Jarvis: Email sent successfully")
131 |
132 | elif "activate type mode" in input_text:
133 | t2s.text2speech(self.engine, "ok sir i am ready")
134 | while True:
135 | st = s2t.voice2text(ln)
136 | st1 = st.lower()
137 | if "stop typing" in st1:
138 | t2s.text2speech(self.engine, "Typing Stopped")
139 | break
140 | elif "enter" in st1:
141 | pg.press('enter')
142 | elif "backspace" in st1:
143 | pg.press('backspace')
144 | elif "tab" in st1:
145 | pg.press('tab')
146 | else:
147 | pg.write(st + " ")
148 |
149 | elif "save" in input_text and "file" in input_text:
150 | # Extracting the file name from the input text
151 | file_name_index = input_text.find("as") + 3 # Index after "as" keyword
152 | file_name = input_text[file_name_index:].strip() # Extracting the file name
153 | file_path = file_name + ".txt" # Appending .txt extension
154 |
155 | # Sending keyboard shortcuts to save the file
156 | pg.hotkey('ctrl', 's')
157 | pg.write(file_path) # Typing the file path
158 | pg.press('enter') # Pressing Enter to save
159 | pg.press('enter') # Pressing Enter again to confirm if needed
160 |
161 | t2s.text2speech(self.engine, "File saved successfully")
162 |
163 | elif "close app" in input_text or "close current app" in input_text or "close it" in input_text:
164 | pg.click(300, 300)
165 | pg.hotkey('alt', 'F4')
166 |
167 | elif "switch next" in input_text:
168 | pg.click(300, 300)
169 | pg.hotkey('alt', 'tab')
170 |
171 | elif "switch previous" in input_text:
172 | pg.click(300, 300)
173 | pg.hotkey('alt', 'shift', 'tab')
174 |
175 | elif "check weather" in input_text:
176 | # Weather checking logic
177 | t2s.text2speech(self.engine, "Which city do you want to check weather for?")
178 | city_name = s2t.voice2text("en")
179 | while not city_name:
180 | t2s.text2speech(self.engine, "Which city do you want to check weather for?")
181 | city_name = s2t.voice2text("en")
182 | city_name.strip()
183 | if city_name.lower() != 'exit':
184 | weather = WeatherUpdates.get_weather(city_name)
185 | self.terminalPrint(f"Jarvis: Todays weather in {city_name} is {weather} degree celcius")
186 | elif "download youtube video" in input_text:
187 | # YouTube video downloading logic
188 | t2s.text2speech(self.engine, "Enter the URL of the video:")
189 | url = s2t.voice2text("en")
190 | t2s.text2speech(self.engine, "Enter the path to save the video:")
191 | save_path = s2t.voice2text("en")
192 | utubeVideoDownloader.download_video(url, save_path)
193 | self.terminalPrint("Jarvis: The video is saved to your downloads")
194 | elif "go to interactive mode" in input_text:
195 | # Interactive mode logic
196 | gptIntegration.chat()
197 | elif "schedule meeting" in input_text:
198 | # Meeting scheduling logic
199 | ScheduleGmeet.main()
200 | self.terminalPrint("Jarvis: Yes sir")
201 |
202 | elif "search file" in input_text:
203 | # File search logic
204 | t2s.text2speech(self.engine, "What is the name of the file you want to search?")
205 | name = s2t.voice2text(ln)
206 | searchFile.open_windows_search(name)
207 | elif "change to tamil" in input_text:
208 | ln = "ta"
209 | self.terminalPrint("Jarvis: Now, you can speak in Tamil")
210 | elif "change to english" in input_text:
211 | ln = "en"
212 | self.terminalPrint("Jarvis: Now, you can speak in English")
213 | elif "switch to jarvis" in input_text:
214 | # Switch to Jarvis voice
215 | t2s.switch_voice(self.engine, 'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_DAVID_11.0')
216 | self.terminalPrint("Jarvis: Voice changed to Male")
217 | elif "switch to friday" in input_text:
218 | # Switch to Friday voice
219 | t2s.switch_voice(self.engine, 'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_ZIRA_11.0')
220 | self.terminalPrint("Jarvis: Voice changed to female")
221 | elif "increase brightness" in input_text:
222 | iB.increase_brightness()
223 | t2s.text2speech(self.engine, "Brightness increased")
224 | elif "decrease brightness" in input_text:
225 | iB.decrease_brightness()
226 | t2s.text2speech(self.engine, "Brightness decreased")
227 | elif "increase volume" in input_text:
228 | iV.process_command("increase")
229 | t2s.text2speech(self.engine, "Volume increased")
230 | elif "decrease volume" in input_text:
231 | iV.process_command("decrease")
232 | t2s.text2speech(self.engine, "Volume decreased")
233 | else:
234 | predicted_label, max_similarity = self.predict_label(input_text, self.vectorizer, self.df)
235 | print("Predicted label:", predicted_label)
236 | print("Maximum similarity score:", max_similarity)
237 |
238 | predicted_labelw, max_similarityw = self.predict_label(input_text, self.vectorizerw, self.dfw)
239 | print("Predicted label:", predicted_labelw)
240 | print("Maximum similarity score:", max_similarityw)
241 | if predicted_label == "open powerpoint" and "powerpoint" in input_text:
242 | max_similarity += 0.1
243 | elif predicted_label == "open powerpoint":
244 | max_similarity -= 0.1
245 | if predicted_label == "open firefox" and "firefox" in input_text:
246 | max_similarityw += 0.1
247 | elif predicted_label == "open firefox":
248 | max_similarityw -= 0.5
249 |
250 | if max_similarity < max_similarityw:
251 | self.listener_thread._is_running = False
252 | if "open" in predicted_labelw:
253 | app = predicted_labelw.replace("open ", "")
254 | ip = wb.web_open(app, input_text)
255 | print(app)
256 | t2s.text2speech(self.engine, "Yes sir")
257 | self.listener_thread.start()
258 | elif "close" in predicted_labelw:
259 | app = predicted_labelw.replace("close ", "")
260 | pg.click(300, 300)
261 | pg.hotkey('ctrl', 'w')
262 | t2s.text2speech(self.engine, "Yes sir")
263 | else:
264 | if "open" in predicted_label:
265 | app = predicted_label.replace("open ", "")
266 | print("App: " + app)
267 | ws.os_open(app)
268 |
269 | elif "close" in predicted_label:
270 | app = predicted_label.replace("close ", "")
271 | ws.close_application(app)
272 | t2s.text2speech(self.engine, "Yes sir")
273 | elif "date" in predicted_label:
274 | app = predicted_label.replace("date ", "")
275 | ws.os_open(app)
276 | elif "lock the screen" in predicted_label:
277 | # Lock the screen logic
278 | pg.hotkey('win', 'l')
279 | self.terminalPrint("Jarvis: Screen locked")
280 | t2s.text2speech(self.engine, "Screen locked")
281 | elif "go to sleep" in predicted_label or "screen off" in predicted_label:
282 | # Go to sleep logic
283 | pg.hotkey('win', 'x')
284 | pg.hotkey('u', 's')
285 | # Add your code to put the system to sleep here
286 | self.terminalPrint("Jarvis: Going to sleep")
287 | t2s.text2speech(self.engine, "Going to sleep")
288 |
289 |
290 | if __name__ == "__main__":
291 | app = QApplication(sys.argv)
292 | ui = LoginWindow()
293 | ui.show()
294 | sys.exit(app.exec_())
295 |
--------------------------------------------------------------------------------