├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ └── feature_request.md
└── workflows
│ └── codeql.yml
├── .gitignore
├── .idea
├── .gitignore
├── Python-Voice-Assistant.iml
├── inspectionProfiles
│ ├── Project_Default.xml
│ └── profiles_settings.xml
├── misc.xml
├── modules.xml
└── vcs.xml
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Documentation
├── Picture1.png
├── Picture2.png
├── Picture3.png
├── Picture4.png
├── Picture5.png
├── Picture6.png
└── USEGPT.md
├── LICENSE
├── README.md
├── SECURITY.md
├── Setup.bat
├── Setup.sh
├── jarvis.py
├── memories.txt
├── profile.txt
├── requirements.txt
└── tempCodeRunnerFile.py
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 | 1. Go to '...'
16 | 2. Click on '....'
17 | 3. Scroll down to '....'
18 | 4. See error
19 |
20 | **Expected behavior**
21 | A clear and concise description of what you expected to happen.
22 |
23 | **Screenshots**
24 | If applicable, add screenshots to help explain your problem.
25 |
26 | **Desktop (please complete the following information):**
27 | - OS: [e.g. iOS]
28 | - Browser [e.g. chrome, safari]
29 | - Version [e.g. 22]
30 |
31 | **Smartphone (please complete the following information):**
32 | - Device: [e.g. iPhone6]
33 | - OS: [e.g. iOS8.1]
34 | - Browser [e.g. stock browser, safari]
35 | - Version [e.g. 22]
36 |
37 | **Additional context**
38 | Add any other context about the problem here.
39 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/.github/workflows/codeql.yml:
--------------------------------------------------------------------------------
1 | name: 'CodeQL'
2 |
3 | on:
4 | push:
5 | branches: ['main']
6 | pull_request:
7 | branches: ['main']
8 | schedule:
9 | - cron: '37 16 * * 3'
10 |
11 | jobs:
12 | analyze:
13 | name: Analyze
14 |
15 | runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
16 | timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }}
17 | permissions:
18 | actions: read
19 | contents: read
20 | security-events: write
21 |
22 | strategy:
23 | fail-fast: false
24 | matrix:
25 | language: ['python']
26 |
27 | steps:
28 | - name: Checkout repository
29 | uses: actions/checkout@v4
30 |
31 | # Initializes the CodeQL tools for scanning.
32 | - name: Initialize CodeQL
33 | uses: github/codeql-action/init@v3
34 | with:
35 | languages: ${{ matrix.language }}
36 |
37 | - name: Autobuild
38 | uses: github/codeql-action/autobuild@v3
39 |
40 | - name: Perform CodeQL Analysis
41 | uses: github/codeql-action/analyze@v3
42 | with:
43 | category: '/language:${{matrix.language}}'
44 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | env/
2 |
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 |
--------------------------------------------------------------------------------
/.idea/Python-Voice-Assistant.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/Project_Default.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | We as members, contributors, and leaders pledge to make participation in our
6 | community a harassment-free experience for everyone, regardless of age, body
7 | size, visible or invisible disability, ethnicity, sex characteristics, gender
8 | identity and expression, level of experience, education, socio-economic status,
9 | nationality, personal appearance, race, religion, or sexual identity
10 | and orientation.
11 |
12 | We pledge to act and interact in ways that contribute to an open, welcoming,
13 | diverse, inclusive, and healthy community.
14 |
15 | ## Our Standards
16 |
17 | Examples of behavior that contributes to a positive environment for our
18 | community include:
19 |
20 | * Demonstrating empathy and kindness toward other people
21 | * Being respectful of differing opinions, viewpoints, and experiences
22 | * Giving and gracefully accepting constructive feedback
23 | * Accepting responsibility and apologizing to those affected by our mistakes,
24 | and learning from the experience
25 | * Focusing on what is best not just for us as individuals, but for the
26 | overall community
27 |
28 | Examples of unacceptable behavior include:
29 |
30 | * The use of sexualized language or imagery, and sexual attention or
31 | advances of any kind
32 | * Trolling, insulting or derogatory comments, and personal or political attacks
33 | * Public or private harassment
34 | * Publishing others' private information, such as a physical or email
35 | address, without their explicit permission
36 | * Other conduct which could reasonably be considered inappropriate in a
37 | professional setting
38 |
39 | ## Enforcement Responsibilities
40 |
41 | Community leaders are responsible for clarifying and enforcing our standards of
42 | acceptable behavior and will take appropriate and fair corrective action in
43 | response to any behavior that they deem inappropriate, threatening, offensive,
44 | or harmful.
45 |
46 | Community leaders have the right and responsibility to remove, edit, or reject
47 | comments, commits, code, wiki edits, issues, and other contributions that are
48 | not aligned to this Code of Conduct, and will communicate reasons for moderation
49 | decisions when appropriate.
50 |
51 | ## Scope
52 |
53 | This Code of Conduct applies within all community spaces, and also applies when
54 | an individual is officially representing the community in public spaces.
55 | Examples of representing our community include using an official e-mail address,
56 | posting via an official social media account, or acting as an appointed
57 | representative at an online or offline event.
58 |
59 | ## Enforcement
60 |
61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
62 | reported to the community leaders responsible for enforcement at
63 | .
64 | All complaints will be reviewed and investigated promptly and fairly.
65 |
66 | All community leaders are obligated to respect the privacy and security of the
67 | reporter of any incident.
68 |
69 | ## Enforcement Guidelines
70 |
71 | Community leaders will follow these Community Impact Guidelines in determining
72 | the consequences for any action they deem in violation of this Code of Conduct:
73 |
74 | ### 1. Correction
75 |
76 | **Community Impact**: Use of inappropriate language or other behavior deemed
77 | unprofessional or unwelcome in the community.
78 |
79 | **Consequence**: A private, written warning from community leaders, providing
80 | clarity around the nature of the violation and an explanation of why the
81 | behavior was inappropriate. A public apology may be requested.
82 |
83 | ### 2. Warning
84 |
85 | **Community Impact**: A violation through a single incident or series
86 | of actions.
87 |
88 | **Consequence**: A warning with consequences for continued behavior. No
89 | interaction with the people involved, including unsolicited interaction with
90 | those enforcing the Code of Conduct, for a specified period of time. This
91 | includes avoiding interactions in community spaces as well as external channels
92 | like social media. Violating these terms may lead to a temporary or
93 | permanent ban.
94 |
95 | ### 3. Temporary Ban
96 |
97 | **Community Impact**: A serious violation of community standards, including
98 | sustained inappropriate behavior.
99 |
100 | **Consequence**: A temporary ban from any sort of interaction or public
101 | communication with the community for a specified period of time. No public or
102 | private interaction with the people involved, including unsolicited interaction
103 | with those enforcing the Code of Conduct, is allowed during this period.
104 | Violating these terms may lead to a permanent ban.
105 |
106 | ### 4. Permanent Ban
107 |
108 | **Community Impact**: Demonstrating a pattern of violation of community
109 | standards, including sustained inappropriate behavior, harassment of an
110 | individual, or aggression toward or disparagement of classes of individuals.
111 |
112 | **Consequence**: A permanent ban from any sort of public interaction within
113 | the community.
114 |
115 | ## Attribution
116 |
117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage],
118 | version 2.0, available at
119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
120 |
121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct
122 | enforcement ladder](https://github.com/mozilla/diversity).
123 |
124 | [homepage]: https://www.contributor-covenant.org
125 |
126 | For answers to common questions about this code of conduct, see the FAQ at
127 | https://www.contributor-covenant.org/faq. Translations are available at
128 | https://www.contributor-covenant.org/translations.
129 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/Documentation/Picture1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/subh05sus/Python-Voice-Assistant/8531a761d64b53e2c0f53a2af5effee7abb6493b/Documentation/Picture1.png
--------------------------------------------------------------------------------
/Documentation/Picture2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/subh05sus/Python-Voice-Assistant/8531a761d64b53e2c0f53a2af5effee7abb6493b/Documentation/Picture2.png
--------------------------------------------------------------------------------
/Documentation/Picture3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/subh05sus/Python-Voice-Assistant/8531a761d64b53e2c0f53a2af5effee7abb6493b/Documentation/Picture3.png
--------------------------------------------------------------------------------
/Documentation/Picture4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/subh05sus/Python-Voice-Assistant/8531a761d64b53e2c0f53a2af5effee7abb6493b/Documentation/Picture4.png
--------------------------------------------------------------------------------
/Documentation/Picture5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/subh05sus/Python-Voice-Assistant/8531a761d64b53e2c0f53a2af5effee7abb6493b/Documentation/Picture5.png
--------------------------------------------------------------------------------
/Documentation/Picture6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/subh05sus/Python-Voice-Assistant/8531a761d64b53e2c0f53a2af5effee7abb6493b/Documentation/Picture6.png
--------------------------------------------------------------------------------
/Documentation/USEGPT.md:
--------------------------------------------------------------------------------
1 | ## Using ChatGPT
2 | To use ChatGPT, you would need an API key. To get the API Key follow the steps below.
3 | ### Getting API Keys
4 | 1) First go to https://openai.com/
5 |
6 | 
7 |
8 | 2) Now click on API and Sign Up and make sure to state that the account is for Personal works.
9 | 3) Now go to Manage Accounts and go to Usage to make sure you have some free credits.
10 |
11 | 
12 |
13 | 
14 |
15 | 4) Now go to API Keys and generate an API key.
16 |
17 | 
18 |
19 | 
20 |
21 | 5) Copy and paste the API Key in the code.
22 |
23 | 
24 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Subhadip Saha
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Python-Voice-Assistant
2 |
3 | A Python-based voice assistant that can greet you and perform a variety of tasks, including searching the web, opening social media, and creating a to-do list and much more to play with the amazing features.
4 |
5 | 
6 |
7 |
8 |
9 |
10 |
11 |
12 | [](https://github.com/ellerbrock/open-source-badges/)
13 | 
14 | 
15 | 
16 | 
17 | 
18 | 
19 |
20 | 
21 |
22 | 
23 | 
24 | 
25 | 
26 | 
27 |
28 |
29 |
30 | ## How does this code work?
31 |
32 | This voice assistant is built using a combination of various Python packages, making it easy for you to use and access a wide range of functionality.
33 |
34 | ## Installation
35 |
36 | To install the required dependencies, use pip by running:
37 |
38 | ```sh
39 | pip install -r requirements.txt
40 | ```
41 |
42 | ### Script-based Installation
43 |
44 | An alternative method for easy installation is to use the provided scripts.
45 | To install all necessary dependencies, run the script corresponding to your operating system:
46 |
47 | #### Windows
48 |
49 | ```sh
50 | cd /your_path/Python-Voice-Assistant
51 | ./Setup.bat
52 | ```
53 |
54 | #### Unix-based/Linux
55 |
56 | ```sh
57 | cd /your_path/Python-Voice-Assistant
58 | ./Setup.sh
59 | ```
60 |
61 | ## Using ChatGPT
62 |
63 | To use ChatGPT, you would need an API key. To get the API Key follow the steps below.
64 |
65 | ### Getting API Keys
66 |
67 | 1. First go to [https://openai.com/](https://openai.com/)
68 |
69 | 
70 |
71 | 2. Now click on API and Sign Up and make sure to state that the account is for Personal works.
72 | 3. Now go to Manage Accounts and go to Usage to make sure you have some free credits.
73 |
74 | 
75 |
76 | 
77 |
78 | 4. Now go to API Keys and generate an API key.
79 |
80 | 
81 |
82 | 
83 |
84 | 5. Copy and paste the API Key in the code.
85 |
86 | 
87 |
88 | ## Contributing
89 |
90 | If you have a suggestion that would make this better, please fork the repo and create a pull request. You can also open an issue with the tag `enhancement`.
91 |
92 | 1. Fork the Repository.
93 | 2. Create your Feature Branch. `git checkout -b feature/feature-name`
94 | 3. Commit your Changes. `git commit -s -m "Add some AmazingFeature"`
95 | 4. Push to the Branch. `git push origin feature/feature-name`
96 | 5. Open a Pull Request.
97 |
98 | ## Contributors
99 |
100 | [](https://github.com/subhadip-saha-05/PythOn-voice-assistant/graphs/contributors)
101 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Security Policy
2 |
3 | ## Supported Versions
4 |
5 | Use this section to tell people about which versions of your project are
6 | currently being supported with security updates.
7 |
8 | | Version | Supported |
9 | | ------- | ------------------ |
10 | | 5.1.x | :white_check_mark: |
11 | | 5.0.x | :x: |
12 | | 4.0.x | :white_check_mark: |
13 | | < 4.0 | :x: |
14 |
15 | ## Reporting a Vulnerability
16 |
17 | Use this section to tell people how to report a vulnerability.
18 |
19 | Tell them where to go, how often they can expect to get an update on a
20 | reported vulnerability, what to expect if the vulnerability is accepted or
21 | declined, etc.
22 |
--------------------------------------------------------------------------------
/Setup.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 |
3 | title Installing Pre-requsites
4 | type requirements.txt
5 | echo:
6 |
7 | set /p yn="Do you want to proceed? [Y/n] "
8 |
9 | if /i "%yn%"=="y" goto install
10 | if /i "%yn%"=="yes" goto install
11 | if /i "%yn%"=="n" goto exit
12 | if /i "%yn%"=="no" goto exit
13 | echo invalid response
14 | goto end
15 |
16 | :install
17 | echo installing...
18 | start pip install -r requirements.txt
19 | goto end
20 |
21 | :exit
22 | echo exiting...
23 | exit
24 |
25 | :end
--------------------------------------------------------------------------------
/Setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | cat requirements.txt
4 |
5 | echo
6 | read -p "Do you want to proceed? [Y/n] " yn
7 |
8 | case $yn in
9 | y ) echo installing...;;
10 | yes ) echo installing...;;
11 | n ) echo exiting...;
12 | exit;;
13 | no ) echo exiting...;
14 | exit;;
15 | * ) echo invalid response;
16 | exit 1;;
17 | esac
18 |
19 | pip install -r requirements.txt or pip3 install -r requirements.txt
20 | sudo apt-get install libasound-dev
--------------------------------------------------------------------------------
/jarvis.py:
--------------------------------------------------------------------------------
1 | import wmi # windows management information for any kind for information regarding system
2 | import os # provides functions for interacting with the operating system
3 | import requests # for making HTTP requests to a specified URL
4 | from time import strftime
5 | import pyttsx3 # text-to-speech conversion library
6 | import sys
7 | import datetime
8 | import speech_recognition as sr
9 | import wikipedia # ********* to improve wikipedia searching
10 | import webbrowser
11 | import random
12 | import pyautogui # used to take ss
13 | import psutil # used to track resource utilization in the system
14 | import subprocess # used to run other programs
15 | import speedtest as speedtest
16 | from ecapture import ecapture as ec
17 | import pyautogui # to take screenshot
18 | from time import sleep
19 | import screen_brightness_control as sbc
20 | import pyjokes
21 | import pywhatkit # to send whatsapp msg
22 | import googletrans
23 | from bs4 import BeautifulSoup # to pull data out of html or XML files
24 | import openai
25 | import time
26 | from playsound import playsound
27 | from pywikihow import search_wikihow
28 | from PyDictionary import PyDictionary
29 | import turtle
30 | import smtplib #library to send email
31 | import PyPDF2
32 | from PIL import Image
33 |
34 | engine = pyttsx3.init()
35 | voices = engine.getProperty('voices')
36 | engine.setProperty('voices', voices[0].id)
37 |
38 |
39 | list_of_jokes = ["The three most well known languages in India are English, Hindi, and... JavaScript",
40 | "Interviewer... Where were you born?Me in India... Interviewer:.. oh, which part?... Me: What ‘which part’ ..? Whole body was born in India",
41 | "how many Indians does it take to fix a lightbulb?Two. One to do the task and other to explain how lightbulbs were actually invented in ancient India",
42 | "What do you call bread from India? It's Naan of your business",
43 | "Britain: Drive on the left side... Europe and America: Drive on the right side...India: lol what's a 'traffic law'?"]
44 | jokes = len(list_of_jokes) - 1
45 | ran_joke = random.randint(0, jokes)
46 | global name
47 |
48 |
49 | def speak(audio): # speak audio
50 | print(audio)
51 | engine.say(audio)
52 | engine.runAndWait()
53 |
54 |
55 | def bytes_to_mb(bytes):
56 | KB = 1024 # One Kilobyte is 1024 bytes
57 | MB = KB * 1024 # One MB is 1024 KB
58 | return int(bytes / MB)
59 |
60 |
61 | def wishMe(): # wishes me
62 | speak("Hey Jarvis here,Whats your name?")
63 | name = takeCommand().lower()
64 |
65 | hour = int(datetime.datetime.now().hour)
66 | if hour >= 0 and hour <= 3:
67 | speak("I am Your Personal assistant, Jarvis! version 1.0!")
68 | speak(f"As its too late {name}, better if you sleep early ...")
69 |
70 | elif hour >= 4 and hour < 12:
71 | speak(f"Good Morning {name}!")
72 | speak("I am Your Personal assistant, Jarvis! version 1.0!")
73 | elif hour >= 12 and hour < 17:
74 | speak(f"Good Afternoon {name} !")
75 | speak("I am Your Personal assistant, Jarvis! version 1.0!")
76 | elif hour >= 17 and hour < 19:
77 | speak(f"Good Evening {name}!")
78 | speak("I am Your Personal assistant, Jarvis! version 1.0!")
79 | elif hour >= 19 and hour < 24:
80 | speak(f"Hello {name} ,I am Your Personal assistant, Jarvis! version 1.0!")
81 | # good night will be greeted after the task is performed and exit command is given
82 | return name
83 |
84 |
85 | def takeCommand(): # takes microphone inout and returns output
86 | r = sr.Recognizer()
87 | with sr.Microphone() as source:
88 | print("Listening...")
89 | r.pause_threshold = 1
90 | audio = r.listen(source)
91 |
92 | try:
93 | print("Recognizing...")
94 | # Using google for voice recognition
95 | query = r.recognize_google(audio, language='en-in')
96 | print(f"User said: {query}\n") # User query will be printed
97 | except Exception as e:
98 | # Say that again will be printed in case of improper voice
99 | speak("Say that again please...")
100 | return "None" # None string will be returned
101 | return query
102 |
103 |
104 | with open('profile.txt', 'r') as f:
105 | email = f.readline().strip()
106 | password = f.readline().strip()
107 |
108 | def sendemail(to, content):
109 | server = smtplib.SMTP('smtp.gmail.com', 587)
110 | server.ehlo()
111 | server.starttls()
112 | server.login(email, password)
113 | server.sendmail(email, to, content)
114 | server.close()
115 |
116 | def readBooks():
117 | speak("Enter the path of the file including it's name.")
118 | filePath = input("Enter the path of the file (including it's name): ")
119 | try:
120 | os.startfile(filePath)
121 | book = open(filePath, 'rb')
122 | pdfreader = PyPDF2.PdfReader(book)
123 | pages = len(pdfreader.pages)
124 | speak(f"Number of pages in this books are {pages}")
125 | speak("From Which Page I Have To Start Reading ?")
126 | try:
127 | Page = takeCommand()
128 | numPage = int(Page)
129 | except:
130 | speak("Sorry Sir, Please Write The Page Number.")
131 | numPage = int(input("Enter The Page Number: "))
132 | page = pdfreader.pages[numPage-1]
133 | text = page.extract_text()
134 | speak(text)
135 | except:
136 | speak("This Book is not Present!")
137 |
138 | def NasaNews(API_KEY):
139 | speak("On which day would you like to know ?")
140 | Date = input("Enter date as (2022-10-21): ")
141 |
142 | speak("Extracting Data From Nasa...")
143 | Url = "https://api.nasa.gov/planetary/apod?api_key=" + str(API_KEY)
144 | Params = {'date':str(Date)}
145 | r = requests.get(Url, params = Params)
146 |
147 | Data = r.json()
148 | print("\n")
149 | copyR = Data['copyright']
150 | Info = Data['explanation']
151 | Title = Data['title']
152 | Image_Url = Data['url']
153 | Image_r = requests.get(Image_Url)
154 | FileName = str(Date) + '.jpg'
155 |
156 | with open(FileName, 'wb') as f:
157 | f.write(Image_r.content)
158 | img = Image.open(FileName)
159 | img.show()
160 |
161 | speak(f"{Title} is copyright by {copyR}\n")
162 | speak(f"Acoording To Nasa : {Info}")
163 |
164 | print(f"CopyRight by {copyR}\n")
165 | print(f"Title: {Title}\n")
166 | print(f"FileName: {FileName}\n")
167 |
168 | if __name__ == "__main__":
169 | name = wishMe()
170 | speak("How May I Help You?")
171 | while True:
172 | query = takeCommand().lower()
173 |
174 | if 'wikipedia' in query:
175 | speak('What you wanna search on it?')
176 | lookfor = takeCommand()
177 | results = wikipedia.summary(lookfor, sentences=5)
178 | source = wikipedia.page(lookfor).url
179 | speak("According to Wikipedia")
180 | speak(results)
181 | speak("You may refer to this url for more info")
182 | print(source)
183 |
184 | elif 'read books' in query:
185 | readBooks()
186 |
187 | elif 'nasa news' in query:
188 | speak('Provide the path of text file having API KEY of NASA Organization.')
189 | filePath = input('Enter the path of API KEY text file: ')
190 | try:
191 | with open(filePath, 'r') as file:
192 | API_KEY = file.read().strip()
193 | if API_KEY and API_KEY != "None":
194 | NasaNews(API_KEY)
195 | except FileNotFoundError:
196 | print(f"Error: {filePath} not found.")
197 |
198 | elif 'internet speed' in query:
199 | st = speedtest.Speedtest()
200 | dl = bytes_to_mb(st.download())
201 | up = bytes_to_mb(st.upload())
202 | speak(
203 | f'{name} we have {dl} MB per second of DOWNLOAD SPEED and {up} MB per second of UPLOAD SPEED')
204 |
205 | elif 'stop' in query or 'shut up' in query or 'sleep' in query:
206 | speak('Alright Sir! Ping me up when you need me again')
207 | sys.exit(0)
208 |
209 | elif 'thank you' in query or 'appreciate' in query:
210 | speak("It's my duty to assist you anytime sir")
211 |
212 |
213 | elif 'open youtube' in query:
214 | speak("Here We Go")
215 | webbrowser.open("youtube.com")
216 |
217 | elif 'youtube' in query and 'search' in query:
218 | speak(f"What Should I Search {name}?")
219 | search_yt = takeCommand()
220 | search_yt = search_yt.replace(" ", "+")
221 | speak("Here We Go")
222 | webbrowser.open(
223 | f"https://www.youtube.com/results?search_query={search_yt}")
224 |
225 | elif 'open google' in query:
226 | speak("Here We Go")
227 | webbrowser.open("google.com")
228 |
229 | elif 'google' in query and 'search' in query:
230 | speak(f"What Should I Search {name} ?")
231 | search_go = takeCommand()
232 | search_go = search_go.replace(" ", "+")
233 | speak("Here We Go")
234 | webbrowser.open(f"https://www.google.com/search?q={search_go}")
235 |
236 | elif 'open instagram' in query:
237 | speak("Here We Go")
238 | webbrowser.open("instagram.com")
239 |
240 | elif 'relax' in query:
241 | speak("Relaxing........................")
242 | w = 500
243 | h = 500
244 | food_size = 10
245 | delay = 100
246 |
247 | offsets = {
248 | "up": (0, 20),
249 | "down": (0, -20),
250 | "left": (-20, 0),
251 | "right": (20, 0)
252 | }
253 |
254 | def reset():
255 | global snake, snake_dir, food_position, pen
256 | snake = [[0, 0], [0, 20], [0, 40], [0, 60], [0, 80]]
257 | snake_dir = "up"
258 | food_position = get_random_food_position()
259 | food.goto(food_position)
260 | move_snake()
261 |
262 | def move_snake():
263 | global snake_dir
264 |
265 | new_head = snake[-1].copy()
266 | new_head[0] = snake[-1][0] + offsets[snake_dir][0]
267 | new_head[1] = snake[-1][1] + offsets[snake_dir][1]
268 |
269 |
270 | if new_head in snake[:-1]:
271 | reset()
272 | else:
273 | snake.append(new_head)
274 |
275 |
276 | if not food_collision():
277 | snake.pop(0)
278 |
279 |
280 | if snake[-1][0] > w / 2:
281 | snake[-1][0] -= w
282 | elif snake[-1][0] < - w / 2:
283 | snake[-1][0] += w
284 | elif snake[-1][1] > h / 2:
285 | snake[-1][1] -= h
286 | elif snake[-1][1] < -h / 2:
287 | snake[-1][1] += h
288 |
289 |
290 | pen.clearstamps()
291 |
292 |
293 | for segment in snake:
294 | pen.goto(segment[0], segment[1])
295 | pen.stamp()
296 |
297 |
298 | screen.update()
299 |
300 | turtle.ontimer(move_snake, delay)
301 |
302 | def food_collision():
303 | global food_position
304 | if get_distance(snake[-1], food_position) < 20:
305 | food_position = get_random_food_position()
306 | food.goto(food_position)
307 | return True
308 | return False
309 |
310 | def get_random_food_position():
311 | x = random.randint(- w / 2 + food_size, w / 2 - food_size)
312 | y = random.randint(- h / 2 + food_size, h / 2 - food_size)
313 | return (x, y)
314 |
315 | def get_distance(pos1, pos2):
316 | x1, y1 = pos1
317 | x2, y2 = pos2
318 | distance = ((y2 - y1) ** 2 + (x2 - x1) ** 2) ** 0.5
319 | return distance
320 | def go_up():
321 | global snake_dir
322 | if snake_dir != "down":
323 | snake_dir = "up"
324 |
325 | def go_right():
326 | global snake_dir
327 | if snake_dir != "left":
328 | snake_dir = "right"
329 |
330 | def go_down():
331 | global snake_dir
332 | if snake_dir!= "up":
333 | snake_dir = "down"
334 |
335 | def go_left():
336 | global snake_dir
337 | if snake_dir != "right":
338 | snake_dir = "left"
339 |
340 |
341 | screen = turtle.Screen()
342 | screen.setup(w, h)
343 | screen.title("Snake")
344 | screen.bgcolor("blue")
345 | screen.setup(500, 500)
346 | screen.tracer(0)
347 |
348 |
349 | pen = turtle.Turtle("square")
350 | pen.penup()
351 |
352 |
353 | food = turtle.Turtle()
354 | food.shape("square")
355 | food.color("yellow")
356 | food.shapesize(food_size / 20)
357 | food.penup()
358 |
359 |
360 | screen.listen()
361 | screen.onkey(go_up, "Up")
362 | screen.onkey(go_right, "Right")
363 | screen.onkey(go_down, "Down")
364 | screen.onkey(go_left, "Left")
365 |
366 |
367 | reset()
368 | turtle.done()
369 |
370 | # code by PK284---------
371 | elif 'search flight' in query:
372 | speak("What is the source of the Flight Sir!!")
373 | source= takeCommand()
374 | speak("What is the Destination of the Flight Sir!!")
375 | destination = takeCommand()
376 | # speak("What is the Travel date sir Please speak in numberic format")
377 | # traveldate = takeCommand()
378 | # webbrowser.open(f"https://www.google.com/search?q={search_go}")
379 | # webbrowser.open(f"https://www.makemytrip.com/flight/search?itinerary={source}-{destination}-25/01/2023-&tripType=O&paxType=A-1_C-0_I-0&intl=false&=&cabinClass=E")
380 | webbrowser.open(f"https://www.makemytrip.com/flight/search?itinerary={source}-{destination}-26/01/2023&tripType=O&paxType=A-2_C-0_I-0&intl=false&cabinClass=E&ccde=IN&lang=eng")
381 |
382 |
383 |
384 | elif 'open facebook' in query:
385 | speak("Here We Go")
386 | webbrowser.open("facebook.com")
387 |
388 | elif 'open twitter' in query:
389 | speak("Here We Go")
390 | webbrowser.open("twitter.com")
391 |
392 | elif 'download youtube videos' in query:
393 | speak("Here We Go")
394 | webbrowser.open("en.onlinevideoconverter.pro")
395 |
396 | elif 'open whatsapp' in query:
397 | speak("Here We Go")
398 | webbrowser.open("web.whatsapp.com")
399 |
400 | elif 'open reddit' in query:
401 | speak("Here We Go")
402 | webbrowser.open("reddit.com")
403 |
404 | elif 'open linkedin' in query:
405 | speak("Here We Go")
406 | webbrowser.open("linkedin.com")
407 |
408 | elif 'open pinterest' in query:
409 | speak("Here We Go")
410 | webbrowser.open("pinterest.com")
411 |
412 | elif 'open quora' in query:
413 | speak("Here We Go")
414 | webbrowser.open("quora.com")
415 |
416 | elif 'open discord' in query:
417 | speak("Here We Go")
418 | webbrowser.open("discord.com")
419 |
420 | elif ('open prime video' or 'open amazon prime video') in query:
421 | speak("Here We Go")
422 | webbrowser.open("primevideo.com")
423 |
424 | elif ('open netflix') in query:
425 | speak("Here We Go")
426 | webbrowser.open("netflix.com")
427 |
428 | elif ('open hotstar') in query:
429 | speak("Here We Go")
430 | webbrowser.open("hotstar.com")
431 |
432 | elif 'the time' in query:
433 | strTime = datetime.datetime.now().strftime("%H:%M:%S")
434 | speak(strTime)
435 |
436 | elif 'the date' in query:
437 | today = datetime.date.today()
438 | speak(today)
439 |
440 | elif query == 'jarvis':
441 | speak(f"At Your Service {name}, How can I help you")
442 |
443 | elif 'joke' in query:
444 | URL = 'https://v2.jokeapi.dev/joke/Any'
445 | response = requests.get(URL)
446 | data = response.json()
447 | if response.status_code == 200:
448 | speak(data['setup'])
449 | speak(data['delivery'])
450 | else:
451 | speak(list_of_jokes[ran_joke])
452 |
453 | elif "volume up" in query:
454 | pyautogui.press("volumeup")
455 | speak("volume upped")
456 | sleep(1)
457 | speak("anything else for which I may assist you!")
458 |
459 | elif "volume down" in query:
460 | pyautogui.press("volumedown")
461 | speak("volume lowered")
462 | sleep(1)
463 |
464 | speak("anything else for which i may assist you")
465 |
466 | elif 'battery' in query:
467 | battery = psutil.sensors_battery()
468 | percentage = battery.percent
469 | speak(f'{name} our System still has {percentage} percent battery')
470 | if percentage >= 75:
471 | print("\U0001F601")
472 | speak(f'{name} we have enough power to continue our work!')
473 | elif percentage >= 40 and percentage < 75:
474 | speak(
475 | f'{name} we should think of connecting our system to the battery supply!')
476 | elif percentage <= 40 and percentage >= 15:
477 | speak(
478 | f"{name} we don't have enough power to work through!... Connect now sir!")
479 | elif percentage < 15:
480 | speak(
481 | f'{name} we have very low power!... Our System may Shutdown anytime soon!...')
482 |
483 | elif "mute" in query:
484 |
485 | if count==0:
486 | pyautogui.press("volumemute")
487 | speak("volume muted")
488 | sleep(1)
489 | count = 1
490 |
491 | elif count == 1:
492 | pyautogui.press("volumemute")
493 | speak("Voluble Now")
494 | sleep(1)
495 | count = 0
496 |
497 | speak("anything else for which i may assist you")
498 |
499 | elif "brightness" in query:
500 | try:
501 | current = sbc.get_brightness()
502 | bright = int(takeCommand())
503 | set = sbc.set_brightness(bright)
504 | speak(f"brightness set to {set} percent")
505 | sleep(1)
506 | speak("anything else for which i may assist you...")
507 | except Exception as e:
508 | print(e)
509 | speak("error")
510 |
511 | elif 'todo' in query or 'to do' in query:
512 | if 'add' in query or 'create' in query:
513 | with open('todo.txt', 'a') as f:
514 | todo_w = takeCommand()
515 | f.write(f"{todo_w}\n")
516 | speak("To Do is updated successfully !")
517 |
518 | elif 'read' in query or 'tell' in query:
519 | with open('todo.txt', 'r') as f:
520 | todo_r = f.read()
521 | if todo_r == "":
522 | todo_r = "No Pendning Tasks "
523 | speak(todo_r)
524 |
525 | elif 'erase' in query or 'remove all' in query or 'clear' in query:
526 | with open("todo.txt", "w") as f:
527 | f.write("")
528 | speak("All Tasks has been cleared!")
529 |
530 | elif 'open spotify' in query:
531 | speak("Opening spotify")
532 | webbrowser.open("spotify.com")
533 |
534 | elif 'screenshot' in query:
535 | sc = pyautogui.screenshot()
536 | sc.save('pa_ss.png')
537 | speak("Screenshot taken successfully.")
538 |
539 | elif "translate" in query:
540 | translator = googletrans.Translator()
541 | lang = ['en', 'ta', 'te', 'kn', 'ml']
542 | # To Print all the languages that Google Translator Support
543 | # Command to print Languages Supported
544 | # print(googletrans.LANGUAGES)
545 | speak(f"{name} please tell me the Sentence that you want me to translate")
546 | text = takeCommand().lower()
547 | speak(
548 | "Please choose a Source Language by pressing a number from the following List!")
549 | print(
550 | " english ---> 1 Tamil ---> 2 Telugu ---> 3 Kannada ----> 4 Malayalam ---> 5")
551 | numberS = int(input("Enter here: "))
552 | speak(
553 | "Please choose a Destination Language by pressing a number from the following List!")
554 | print(
555 | " english ---> 1 Tamil ---> 2 Telugu ---> 3 Kannada ----> 4 Malayalam ---> 5")
556 | numberD = int(input("Enter here: "))
557 | translated = translator.translate(
558 | text, src=lang[numberS - 1], dest=lang[numberD - 1])
559 | print(translated.text)
560 | print("Legibility is:",
561 | (translated.extra_data['confidence']) * 100, "%")
562 |
563 | elif "log off" in query or "sign out" in query:
564 | speak(
565 | "Ok , your pc will log off in 10 seconds! make sure you exit from all applications")
566 | subprocess.call(["shutdown", "/l"])
567 |
568 | elif "camera" in query or "take a photo" in query:
569 | ec.capture(0, "Jarvis-camera", "img.jpg")
570 |
571 | elif 'play' in query:
572 | song = query.replace('play', '')
573 | speak('playing ' + song)
574 | pywhatkit.playonyt(song)
575 |
576 | elif "weather" in query:
577 | api_key = "8ef61edcf1c576d65d836254e11ea420"
578 | base_url = "https://api.openweathermap.org/data/2.5/weather?"
579 | speak("What is the name of the city?")
580 | city_name = takeCommand()
581 |
582 | print(f"{city_name} whether conditions : ")
583 |
584 | complete_url = base_url + "appid=" + api_key + "&q=" + city_name
585 | response = requests.get(complete_url)
586 | x = response.json()
587 | if x["cod"] != "404":
588 | y = x["main"]
589 | current_temperature = y["temp"] - 273.15
590 | current_temperature = float('%.2f' % current_temperature)
591 | current_humidiy = y["humidity"]
592 | z = x["weather"]
593 | weather_description = z[0]["description"]
594 | speak(" Temperature in Celcius unit is " +
595 | str(current_temperature) +
596 | "\n humidity in percentage is " +
597 | str(current_humidiy) +
598 | "\n description " +
599 | str(weather_description))
600 | print(" Temperature in Celcius unit = " +
601 | str(current_temperature) +
602 | "\n humidity (in percentage) = " +
603 | str(current_humidiy) +
604 | "\n description = " +
605 | str(weather_description))
606 | else:
607 | speak("Can't find details about this city")
608 |
609 | elif "current news" in query or "latest news" in query:
610 | url = "https://www.indiatoday.in/india"
611 | page = requests.get(url)
612 | soup = BeautifulSoup(page.content, 'html.parser')
613 |
614 | # Find all the headlines on the page
615 | headlines = soup.find_all("h2")
616 | for headline in headlines[:4]:
617 | print(headline.text)
618 | speak(headline.text)
619 |
620 | elif "who made you" in query or "who created you" in query or "who discovered you" in query:
621 | speak("I am a human creation built by all sets of knowledge of humans.I am nothing without humans")
622 |
623 |
624 | elif "initiate" in query or "chat" in query or "Veronica" in query or "gpt" in query:
625 | def GPT():
626 | speak("Connecting to Veronica")
627 |
628 | # Enter API KEY or Leave blank if you don't want to use this function
629 | API_KEY = ""
630 | openai.api_key = API_KEY
631 | if API_KEY == "":
632 | print("Please Enter the API Key!")
633 | speak("Please Enter the API Key!")
634 | while API_KEY != "":
635 | engine1 = pyttsx3.init()
636 | voices = engine1.getProperty('voices')
637 | engine1.setProperty('voice', voices[1].id)
638 | r = sr.Recognizer()
639 | mic = sr.Microphone(device_index=1)
640 |
641 | conversation = ""
642 |
643 | user_name = str(input("Enter your name: "))
644 | bot_name = "Veronica"
645 | print("Hey," + user_name)
646 |
647 | while True:
648 | with mic as source:
649 | print("\nlistening...")
650 | r.adjust_for_ambient_noise(source, duration=0.2)
651 | audio = r.listen(source)
652 | print("no longer listening.\n")
653 |
654 | try:
655 | user_input = r.recognize_google(audio)
656 | except:
657 | continue
658 |
659 | prompt = user_name + ": " + user_input + "\n" + bot_name + ": "
660 |
661 | conversation += prompt # allows for context
662 | # fetch response from open AI api
663 | response = openai.Completion.create(engine='text-davinci-003', prompt=conversation,
664 | max_tokens=50)
665 | response_str = response["choices"][0]["text"].replace("\n", "")
666 | response_str = response_str.split(user_name + ": ", 1)[0].split(bot_name + ": ", 1)[0]
667 |
668 | conversation += response_str + "\n"
669 | print(response_str)
670 | engine1.say(response_str)
671 |
672 | prompt = user_name + ": " + user_input + "\n" + bot_name + ": "
673 |
674 | conversation += prompt # allows for context
675 | # fetch response from open AI api
676 | response = openai.Completion.create(
677 | engine='text-davinci-003', prompt=conversation, max_tokens=50)
678 | response_str = response["choices"][0]["text"].replace(
679 | "\n", "")
680 | response_str = response_str.split(
681 | user_name + ": ", 1)[0].split(bot_name + ": ", 1)[0]
682 |
683 | conversation += response_str + "\n"
684 | print(response_str)
685 | engine1.say(response_str)
686 | engine1.runAndWait()
687 |
688 |
689 | GPT()
690 |
691 | elif 'news' in query:
692 | api_key = '9bb9b456bf124f80aba6a0e09cc2f811'
693 | URL = 'https://newsapi.org/v2/top-headlines?country=us&apiKey=' + api_key
694 |
695 | resp = requests.get(URL)
696 | if resp.status_code == 200:
697 | data = resp.json()
698 | news = data['articles'][0]
699 | speak(news['title'])
700 | speak(news['description'])
701 | else:
702 | speak("Cannot find a news at this moment")
703 |
704 |
705 | elif "ip address" in query:
706 | ip = requests.get('https://api.ipify.org').text
707 | print(ip)
708 | speak(f"Your ip address is {ip}")
709 |
710 | elif "switch the window" in query or "switch window" in query:
711 | speak(f"Okay {name}, Switching the window")
712 | pyautogui.keyDown("alt")
713 | pyautogui.press("tab")
714 | pyautogui.keyUp("alt")
715 | elif 'screenshot' in query:
716 | speak("Taking screenshot")
717 | times = time.time()
718 | name_img = r"{}.png".format(str(times))
719 | img = pyautogui.screenshot(name_img)
720 | speak("Done!")
721 | img.show()
722 |
723 | elif "system" in query:
724 |
725 | c = wmi.WMI()
726 | my_system = c.Win32_ComputerSystem()[0]
727 | speak(f"Manufacturer: {my_system.Manufacturer}")
728 | speak(f"Model: {my_system.Model}")
729 | speak(f"Name: {my_system.Name}")
730 | speak(f"NumberOfProcessors: {my_system.NumberOfProcessors}")
731 | speak(f"SystemType: {my_system.SystemType}")
732 | speak(f"SystemFamily: {my_system.SystemFamily}")
733 |
734 | elif 'how to' in query:
735 | try:
736 | # query = query.replace('how to', '')
737 | max_results = 1
738 | data = search_wikihow(query, max_results)
739 | # assert len(data) == 1
740 | data[0].print()
741 | speak(data[0].summary)
742 | except Exception as e:
743 | speak('Sorry, I am unable to find the answer for your query.')
744 |
745 |
746 | elif 'set alarm' in query:
747 | speak(
748 | "Tell me the time to set an Alarm. ")
749 | speak("How do you want to set time in ,like hours/minutes/second")
750 | a_info = takeCommand()
751 | if('hours' in a_info):
752 | speak("Tell me time in hours!")
753 | a_info=int(input("Type it"))
754 | # a_info = int(takeCommand())
755 | speak(f"Alarm set for {a_info} hours")
756 | time.sleep(a_info *3600)
757 | elif('minutes' in a_info):
758 | speak("Tell me time in minutes!")
759 | a_info = int(input("Type it"))
760 | # a_info = int(takeCommand())
761 | time.sleep(a_info * 60)
762 | else:
763 | speak("Tell me time in seconds!")
764 | a_info = int(input("Type it"))
765 | # a_info = int(takeCommand())
766 | time.sleep(a_info)
767 |
768 | # playsound('Alarm.mp3')
769 | speak("Hi I am back!!! Wake Up Wake Up Wake Up Wake Up Wake Up Wake Up!!")
770 |
771 | elif 'meaning' in query:
772 | speak(f"Which word do you want me to define {name}?")
773 | queryword = takeCommand().lower()
774 |
775 |
776 | meaning = PyDictionary.meaning(queryword)
777 |
778 | for i in meaning:
779 | print(meaning[i])
780 | speak("Sir the meaning is ", str(meaning[i]))
781 |
782 | elif 'generate image' in query or 'image with ai' in query or 'image with artificial intelligence' in query:
783 | speak("What kind of photo do you want to generate?")
784 | imageinfo = takeCommand()
785 | if imageinfo == "":
786 | pass
787 | else:
788 | speak("just wait a bit! I'm processing it!")
789 | response = openai.Image.create(
790 | prompt=imageinfo, n=1, size="1024x1024")
791 | image_url = response['data'][0]['url']
792 | webbrowser.open(image_url)
793 | speak(f"Here is is!! {imageinfo}")
794 | print(f"Here is is!! {imageinfo}")
795 |
796 | elif 'quit' in query or 'exit' in query or 'close' in query or 'bye' in query:
797 | speak(f"Thank you for using Jarvis {name}")
798 | if 19 <= int(datetime.datetime.now().hour) < 24:
799 | speak(f"Have a very Good Night {name} and sweet dreams!")
800 | else:
801 | speak(f"See you soon,have a very Good Day {name}!")
802 | exit()
803 |
804 |
805 | elif 'send email' in query:
806 | try:
807 | speak("What should I say?")
808 | content = takeCommand()
809 | speak("What is the recipient's email address?")
810 | to = takeCommand()
811 | sendemail(to,content)
812 | speak("email has been sent.")
813 |
814 | except Exception as e:
815 | print(e)
816 | speak("Unable to send email.")
817 |
818 |
819 | speak("What do you want to continue with?")
820 |
--------------------------------------------------------------------------------
/memories.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/subh05sus/Python-Voice-Assistant/8531a761d64b53e2c0f53a2af5effee7abb6493b/memories.txt
--------------------------------------------------------------------------------
/profile.txt:
--------------------------------------------------------------------------------
1 | youremail@gmail.com
2 | yourpassword
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | WMI~=1.5.1
2 | requests~=2.28.1
3 | pyttsx3~=2.90
4 | DateTime~=4.9
5 | wikipedia~=1.4.0
6 | PyAutoGUI~=0.9.53
7 | psutil~=5.9.4
8 | speedtest~=0.0.1
9 | pyjokes~=0.6.0
10 | pywhatkit~=5.4
11 | googletrans~=3.0.0
12 | openai~=0.26.0
13 | ecapture~=2.0.2
14 | bs4~=0.0.1
15 | beautifulsoup4~=4.11.1
16 | playsound~=1.3.0
17 | pywikihow~=0.5.7
18 | PyDictionary~=2.0.1
--------------------------------------------------------------------------------
/tempCodeRunnerFile.py:
--------------------------------------------------------------------------------
1 |
2 | # speak("What is the Destination of the Flight Sir!!")
3 | # destination = takeCommand()
4 | # speak("What is the Travel date sir Please speak in numberic format")
5 | # traveldate = takeCommand()
--------------------------------------------------------------------------------