├── 9781484221662.jpg ├── LICENSE.txt ├── Pant_Ch02_Understanding_and_Building_an_Application_with_STT_and_TTS └── Pant_Ch02_Understanding_and_Building_an_Application_with_STT_and_TTS │ ├── Melissa-Core │ ├── GreyMatter │ │ ├── SenseCells │ │ │ ├── __init__.py │ │ │ └── tts.py │ │ └── __init__.py │ ├── LICENSE.md │ ├── main.py │ └── profile.yaml.default │ ├── audio_recorder.py │ ├── google_speech_recognition.py │ └── tts.py ├── Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module └── Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module │ ├── Melissa-Core-1 │ ├── GreyMatter │ │ ├── SenseCells │ │ │ ├── __init__.py │ │ │ └── tts.py │ │ ├── __init__.py │ │ └── general_conversations.py │ ├── LICENSE.md │ ├── brain.py │ ├── main.py │ └── profile.yaml.default │ └── Melissa-Core-2 │ ├── GreyMatter │ ├── SenseCells │ │ ├── __init__.py │ │ └── tts.py │ ├── __init__.py │ ├── general_conversations.py │ └── tell_time.py │ ├── LICENSE.md │ ├── brain.py │ ├── main.py │ └── profile.yaml.default ├── Pant_Ch04_Using_Internet_to_Gather_Information └── Pant_Ch04_Using_Internet_to_Gather_Information │ └── Melissa-Core │ ├── GreyMatter │ ├── SenseCells │ │ ├── __init__.py │ │ └── tts.py │ ├── __init__.py │ ├── business_news_reader.py │ ├── connect_proxy.py │ ├── define_subject.py │ ├── general_conversations.py │ ├── open_firefox.py │ ├── sleep.py │ ├── tell_time.py │ └── weather.py │ ├── LICENSE.md │ ├── brain.py │ ├── main.py │ └── profile.yaml.default ├── Pant_Ch05_Developing_a_Music_Player_for_Melissa └── Pant_Ch05_Developing_a_Music_Player_for_Melissa │ └── Melissa-Core │ ├── GreyMatter │ ├── SenseCells │ │ ├── __init__.py │ │ └── tts.py │ ├── __init__.py │ ├── business_news_reader.py │ ├── connect_proxy.py │ ├── define_subject.py │ ├── general_conversations.py │ ├── open_firefox.py │ ├── play_music.py │ ├── sleep.py │ ├── tell_time.py │ └── weather.py │ ├── LICENSE.md │ ├── brain.py │ ├── main.py │ └── profile.yaml.default ├── Pant_Ch06_Developing_a_Note_Taking_Application └── Pant_Ch06_Developing_a_Note_Taking_Application │ └── Melissa-Core │ ├── GreyMatter │ ├── SenseCells │ │ ├── __init__.py │ │ └── tts.py │ ├── __init__.py │ ├── business_news_reader.py │ ├── connect_proxy.py │ ├── define_subject.py │ ├── general_conversations.py │ ├── notes.py │ ├── open_firefox.py │ ├── play_music.py │ ├── sleep.py │ ├── tell_time.py │ └── weather.py │ ├── LICENSE.md │ ├── brain.py │ ├── main.py │ ├── memory.db.default │ └── profile.yaml.default ├── Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur └── Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur │ └── Melissa-Core │ ├── GreyMatter │ ├── SenseCells │ │ ├── __init__.py │ │ └── tts.py │ ├── __init__.py │ ├── business_news_reader.py │ ├── connect_proxy.py │ ├── define_subject.py │ ├── general_conversations.py │ ├── imgur_handler.py │ ├── notes.py │ ├── open_firefox.py │ ├── play_music.py │ ├── sleep.py │ ├── tell_time.py │ ├── twitter_interaction.py │ ├── twitter_pull.py │ └── weather.py │ ├── LICENSE.md │ ├── brain.py │ ├── main.py │ ├── memory.db.default │ └── profile.yaml.default ├── Pant_Ch08_Building_a_Web_Interface_for_Melissa └── Pant_Ch08_Building_a_Web_Interface_for_Melissa │ └── Melissa-Web │ ├── GreyMatter │ ├── SenseCells │ │ ├── __init__.py │ │ └── tts.py │ ├── __init__.py │ ├── connect_proxy.py │ ├── define_subject.py │ ├── general_conversations.py │ ├── notes.py │ ├── open_firefox.py │ ├── play_music.py │ ├── sleep.py │ ├── tell_time.py │ ├── twitter_pull.py │ └── weather.py │ ├── LICENSE.md │ ├── brain.py │ ├── main.py │ ├── memory.db.default │ ├── profile.yaml.default │ ├── static │ ├── audiodisplay.js │ ├── img │ │ ├── bg-sky.png │ │ ├── mic.png │ │ ├── save.png │ │ └── upload.png │ ├── main.js │ ├── recorder.js │ └── recorderWorker.js │ ├── templates │ └── index.html │ └── web-gateway.py ├── README.md └── contributing.md /9781484221662.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/building-a-virtual-assistant-for-raspberry-pi/5ce696e268fc19bfc319b785b1511be07baffc66/9781484221662.jpg -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/building-a-virtual-assistant-for-raspberry-pi/5ce696e268fc19bfc319b785b1511be07baffc66/LICENSE.txt -------------------------------------------------------------------------------- /Pant_Ch02_Understanding_and_Building_an_Application_with_STT_and_TTS/Pant_Ch02_Understanding_and_Building_an_Application_with_STT_and_TTS/Melissa-Core/GreyMatter/SenseCells/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/building-a-virtual-assistant-for-raspberry-pi/5ce696e268fc19bfc319b785b1511be07baffc66/Pant_Ch02_Understanding_and_Building_an_Application_with_STT_and_TTS/Pant_Ch02_Understanding_and_Building_an_Application_with_STT_and_TTS/Melissa-Core/GreyMatter/SenseCells/__init__.py -------------------------------------------------------------------------------- /Pant_Ch02_Understanding_and_Building_an_Application_with_STT_and_TTS/Pant_Ch02_Understanding_and_Building_an_Application_with_STT_and_TTS/Melissa-Core/GreyMatter/SenseCells/tts.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | def tts(message): 5 | """ 6 | This function takes a message as an argument and converts it to speech depending on the OS. 7 | """ 8 | if sys.platform == 'darwin': 9 | tts_engine = 'say' 10 | return os.system(tts_engine + ' ' + message) 11 | elif sys.platform == 'linux2' or sys.platform == 'linux': 12 | tts_engine = 'espeak' 13 | return os.system(tts_engine + ' "' + message + '"') 14 | -------------------------------------------------------------------------------- /Pant_Ch02_Understanding_and_Building_an_Application_with_STT_and_TTS/Pant_Ch02_Understanding_and_Building_an_Application_with_STT_and_TTS/Melissa-Core/GreyMatter/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/building-a-virtual-assistant-for-raspberry-pi/5ce696e268fc19bfc319b785b1511be07baffc66/Pant_Ch02_Understanding_and_Building_an_Application_with_STT_and_TTS/Pant_Ch02_Understanding_and_Building_an_Application_with_STT_and_TTS/Melissa-Core/GreyMatter/__init__.py -------------------------------------------------------------------------------- /Pant_Ch02_Understanding_and_Building_an_Application_with_STT_and_TTS/Pant_Ch02_Understanding_and_Building_an_Application_with_STT_and_TTS/Melissa-Core/LICENSE.md: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Tanay Pant 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Pant_Ch02_Understanding_and_Building_an_Application_with_STT_and_TTS/Pant_Ch02_Understanding_and_Building_an_Application_with_STT_and_TTS/Melissa-Core/main.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import yaml 4 | import speech_recognition as sr 5 | 6 | from GreyMatter.SenseCells.tts import tts 7 | 8 | profile = open('profile.yaml') 9 | profile_data = yaml.safe_load(profile) 10 | profile.close() 11 | 12 | # Functioning Variables 13 | name = profile_data['name'] 14 | city_name = profile_data['city_name'] 15 | 16 | tts('Welcome ' + name + ', systems are now ready to run. How can I help you?') 17 | 18 | def main(): 19 | r = sr.Recognizer() 20 | with sr.Microphone() as source: 21 | print("Say something!") 22 | audio = r.listen(source) 23 | 24 | try: 25 | speech_text = r.recognize_google(audio).lower().replace("'", "") 26 | print("Melissa thinks you said '" + speech_text + "'") 27 | except sr.UnknownValueError: 28 | print("Melissa could not understand audio") 29 | except sr.RequestError as e: 30 | print("Could not request results from Google Speech Recognition service; {0}".format(e)) 31 | 32 | tts(speech_text) 33 | 34 | main() 35 | -------------------------------------------------------------------------------- /Pant_Ch02_Understanding_and_Building_an_Application_with_STT_and_TTS/Pant_Ch02_Understanding_and_Building_an_Application_with_STT_and_TTS/Melissa-Core/profile.yaml.default: -------------------------------------------------------------------------------- 1 | name: 2 | Tanay 3 | city_name: 4 | New Delhi -------------------------------------------------------------------------------- /Pant_Ch02_Understanding_and_Building_an_Application_with_STT_and_TTS/Pant_Ch02_Understanding_and_Building_an_Application_with_STT_and_TTS/audio_recorder.py: -------------------------------------------------------------------------------- 1 | import speech_recognition as sr 2 | 3 | r = sr.Recognizer() 4 | with sr.Microphone() as source: 5 | print("Say something!") 6 | audio = r.listen(source) 7 | 8 | with open("recording.wav", "wb") as f: 9 | f.write(audio.get_wav_data()) 10 | -------------------------------------------------------------------------------- /Pant_Ch02_Understanding_and_Building_an_Application_with_STT_and_TTS/Pant_Ch02_Understanding_and_Building_an_Application_with_STT_and_TTS/google_speech_recognition.py: -------------------------------------------------------------------------------- 1 | import speech_recognition as sr 2 | 3 | # obtain audio from the microphone 4 | r = sr.Recognizer() 5 | with sr.Microphone() as source: 6 | print("Say something!") 7 | audio = r.listen(source) 8 | 9 | # recognize speech using Google Speech Recognition 10 | try: 11 | # for testing purposes, we're just using the default API key 12 | # to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")` 13 | # instead of `r.recognize_google(audio)` 14 | print("Google Speech Recognition thinks you said " + r.recognize_google(audio)) 15 | except sr.UnknownValueError: 16 | print("Google Speech Recognition could not understand audio") 17 | except sr.RequestError as e: 18 | print("Could not request results from Google Speech Recognition service; {0}".format(e)) 19 | -------------------------------------------------------------------------------- /Pant_Ch02_Understanding_and_Building_an_Application_with_STT_and_TTS/Pant_Ch02_Understanding_and_Building_an_Application_with_STT_and_TTS/tts.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | def tts(message): 5 | """ 6 | This function takes a message as an argument and converts it to speech depending on the OS. 7 | """ 8 | if sys.platform == 'darwin': 9 | tts_engine = 'say' 10 | return os.system(tts_engine + ' ' + message) 11 | elif sys.platform == 'linux2' or sys.platform == 'linux': 12 | tts_engine = 'espeak' 13 | return os.system(tts_engine + ' "' + message + '"') 14 | 15 | tts('Hey there, greetings from Tanay') -------------------------------------------------------------------------------- /Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Melissa-Core-1/GreyMatter/SenseCells/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/building-a-virtual-assistant-for-raspberry-pi/5ce696e268fc19bfc319b785b1511be07baffc66/Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Melissa-Core-1/GreyMatter/SenseCells/__init__.py -------------------------------------------------------------------------------- /Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Melissa-Core-1/GreyMatter/SenseCells/tts.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | def tts(message): 5 | """ 6 | This function takes a message as an argument and converts it to speech depending on the OS. 7 | """ 8 | if sys.platform == 'darwin': 9 | tts_engine = 'say' 10 | return os.system(tts_engine + ' ' + message) 11 | elif sys.platform == 'linux2' or sys.platform == 'linux': 12 | tts_engine = 'espeak' 13 | return os.system(tts_engine + ' "' + message + '"') 14 | -------------------------------------------------------------------------------- /Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Melissa-Core-1/GreyMatter/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/building-a-virtual-assistant-for-raspberry-pi/5ce696e268fc19bfc319b785b1511be07baffc66/Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Melissa-Core-1/GreyMatter/__init__.py -------------------------------------------------------------------------------- /Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Melissa-Core-1/GreyMatter/general_conversations.py: -------------------------------------------------------------------------------- 1 | from SenseCells.tts import tts 2 | 3 | def who_are_you(): 4 | message = 'I am Melissa, your lovely personal assistant.' 5 | tts(message) 6 | 7 | def undefined(): 8 | tts('I dont know what that means!') 9 | -------------------------------------------------------------------------------- /Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Melissa-Core-1/LICENSE.md: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Tanay Pant 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Melissa-Core-1/brain.py: -------------------------------------------------------------------------------- 1 | from GreyMatter import general_conversations 2 | 3 | def brain(name, speech_text): 4 | def check_message(check): 5 | 6 | if speech_text == check: 7 | return True 8 | else: 9 | return False 10 | 11 | if check_message('who are you'): 12 | general_conversations.who_are_you() 13 | else: 14 | general_conversations.undefined() 15 | -------------------------------------------------------------------------------- /Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Melissa-Core-1/main.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import yaml 4 | import speech_recognition as sr 5 | 6 | from brain import brain 7 | from GreyMatter.SenseCells.tts import tts 8 | 9 | profile = open('profile.yaml') 10 | profile_data = yaml.safe_load(profile) 11 | profile.close() 12 | 13 | # Functioning Variables 14 | name = profile_data['name'] 15 | city_name = profile_data['city_name'] 16 | 17 | tts('Welcome ' + name + ', systems are now ready to run. How can I help you?') 18 | 19 | def main(): 20 | r = sr.Recognizer() 21 | with sr.Microphone() as source: 22 | print("Say something!") 23 | audio = r.listen(source) 24 | 25 | try: 26 | speech_text = r.recognize_google(audio).lower().replace("'", "") 27 | print("Melissa thinks you said '" + speech_text + "'") 28 | except sr.UnknownValueError: 29 | print("Melissa could not understand audio") 30 | except sr.RequestError as e: 31 | print("Could not request results from Google Speech Recognition service; {0}".format(e)) 32 | 33 | brain(name, speech_text) 34 | 35 | main() 36 | -------------------------------------------------------------------------------- /Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Melissa-Core-1/profile.yaml.default: -------------------------------------------------------------------------------- 1 | name: 2 | Tanay 3 | city_name: 4 | New Delhi -------------------------------------------------------------------------------- /Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Melissa-Core-2/GreyMatter/SenseCells/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/building-a-virtual-assistant-for-raspberry-pi/5ce696e268fc19bfc319b785b1511be07baffc66/Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Melissa-Core-2/GreyMatter/SenseCells/__init__.py -------------------------------------------------------------------------------- /Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Melissa-Core-2/GreyMatter/SenseCells/tts.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | def tts(message): 5 | """ 6 | This function takes a message as an argument and converts it to speech depending on the OS. 7 | """ 8 | if sys.platform == 'darwin': 9 | tts_engine = 'say' 10 | return os.system(tts_engine + ' ' + message) 11 | elif sys.platform == 'linux2' or sys.platform == 'linux': 12 | tts_engine = 'espeak' 13 | return os.system(tts_engine + ' "' + message + '"') 14 | -------------------------------------------------------------------------------- /Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Melissa-Core-2/GreyMatter/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/building-a-virtual-assistant-for-raspberry-pi/5ce696e268fc19bfc319b785b1511be07baffc66/Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Melissa-Core-2/GreyMatter/__init__.py -------------------------------------------------------------------------------- /Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Melissa-Core-2/GreyMatter/general_conversations.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | from SenseCells.tts import tts 4 | 5 | def who_are_you(): 6 | messages = ['I am Melissa, your lovely personal assistant.', 7 | 'Melissa, didnt I tell you before?', 8 | 'You ask that so many times! I am Melissa.'] 9 | tts(random.choice(messages)) 10 | 11 | def how_am_i(): 12 | replies =['You are goddamn handsome!', 'My knees go weak when I see you.', 'You are sexy!', 'You look like the kindest person that I have met.'] 13 | tts(random.choice(replies)) 14 | 15 | def tell_joke(): 16 | jokes = ['What happens to a frogs car when it breaks down? It gets toad away.', 'Why was six scared of seven? Because seven ate nine.', 'No, I always forget the punch line.'] 17 | tts(random.choice(jokes)) 18 | 19 | def who_am_i(name): 20 | tts('You are ' + name + ', a brilliant person. I love you!') 21 | 22 | def where_born(): 23 | tts('I was created by a magician named Tanay, in India, the magical land of himalayas.') 24 | 25 | def how_are_you(): 26 | tts('I am fine, thank you.') 27 | 28 | def undefined(): 29 | tts('I dont know what that means!') 30 | -------------------------------------------------------------------------------- /Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Melissa-Core-2/GreyMatter/tell_time.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | 3 | from SenseCells.tts import tts 4 | 5 | def what_is_time(): 6 | tts("The time is " + datetime.strftime(datetime.now(), '%H:%M:%S')) 7 | -------------------------------------------------------------------------------- /Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Melissa-Core-2/LICENSE.md: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Tanay Pant 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Melissa-Core-2/brain.py: -------------------------------------------------------------------------------- 1 | from GreyMatter import tell_time, general_conversations 2 | 3 | def brain(name, speech_text): 4 | def check_message(check): 5 | """ 6 | This function checks if the items in the list (specified in argument) are present in the user's input speech. 7 | """ 8 | 9 | words_of_message = speech_text.split() 10 | if set(check).issubset(set(words_of_message)): 11 | return True 12 | else: 13 | return False 14 | 15 | if check_message(['who','are', 'you']): 16 | general_conversations.who_are_you() 17 | 18 | elif check_message(['how', 'i', 'look']) or check_message(['how', 'am', 'i']): 19 | general_conversations.how_am_i() 20 | 21 | elif check_message(['tell', 'joke']): 22 | general_conversations.tell_joke() 23 | 24 | elif check_message(['who', 'am', 'i']): 25 | general_conversations.who_am_i(name) 26 | 27 | elif check_message(['where', 'born']): 28 | general_conversations.where_born() 29 | 30 | elif check_message(['how', 'are', 'you']): 31 | general_conversations.how_are_you() 32 | 33 | elif check_message(['time']): 34 | tell_time.what_is_time() 35 | 36 | else: 37 | general_conversations.undefined() 38 | -------------------------------------------------------------------------------- /Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Melissa-Core-2/main.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import yaml 4 | import speech_recognition as sr 5 | 6 | from brain import brain 7 | from GreyMatter.SenseCells.tts import tts 8 | 9 | profile = open('profile.yaml') 10 | profile_data = yaml.safe_load(profile) 11 | profile.close() 12 | 13 | # Functioning Variables 14 | name = profile_data['name'] 15 | city_name = profile_data['city_name'] 16 | 17 | tts('Welcome ' + name + ', systems are now ready to run. How can I help you?') 18 | 19 | def main(): 20 | r = sr.Recognizer() 21 | with sr.Microphone() as source: 22 | print("Say something!") 23 | audio = r.listen(source) 24 | 25 | try: 26 | speech_text = r.recognize_google(audio).lower().replace("'", "") 27 | print("Melissa thinks you said '" + speech_text + "'") 28 | except sr.UnknownValueError: 29 | print("Melissa could not understand audio") 30 | except sr.RequestError as e: 31 | print("Could not request results from Google Speech Recognition service; {0}".format(e)) 32 | 33 | brain(name, speech_text) 34 | 35 | main() 36 | -------------------------------------------------------------------------------- /Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Pant_Ch03_Getting_Your_Hands_Dirty_Conversation_Module/Melissa-Core-2/profile.yaml.default: -------------------------------------------------------------------------------- 1 | name: 2 | Tanay 3 | city_name: 4 | New Delhi -------------------------------------------------------------------------------- /Pant_Ch04_Using_Internet_to_Gather_Information/Pant_Ch04_Using_Internet_to_Gather_Information/Melissa-Core/GreyMatter/SenseCells/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/building-a-virtual-assistant-for-raspberry-pi/5ce696e268fc19bfc319b785b1511be07baffc66/Pant_Ch04_Using_Internet_to_Gather_Information/Pant_Ch04_Using_Internet_to_Gather_Information/Melissa-Core/GreyMatter/SenseCells/__init__.py -------------------------------------------------------------------------------- /Pant_Ch04_Using_Internet_to_Gather_Information/Pant_Ch04_Using_Internet_to_Gather_Information/Melissa-Core/GreyMatter/SenseCells/tts.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | def tts(message): 5 | """ 6 | This function takes a message as an argument and converts it to speech depending on the OS. 7 | """ 8 | if sys.platform == 'darwin': 9 | tts_engine = 'say' 10 | return os.system(tts_engine + ' ' + message) 11 | elif sys.platform == 'linux2' or sys.platform == 'linux': 12 | tts_engine = 'espeak' 13 | return os.system(tts_engine + ' "' + message + '"') 14 | -------------------------------------------------------------------------------- /Pant_Ch04_Using_Internet_to_Gather_Information/Pant_Ch04_Using_Internet_to_Gather_Information/Melissa-Core/GreyMatter/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/building-a-virtual-assistant-for-raspberry-pi/5ce696e268fc19bfc319b785b1511be07baffc66/Pant_Ch04_Using_Internet_to_Gather_Information/Pant_Ch04_Using_Internet_to_Gather_Information/Melissa-Core/GreyMatter/__init__.py -------------------------------------------------------------------------------- /Pant_Ch04_Using_Internet_to_Gather_Information/Pant_Ch04_Using_Internet_to_Gather_Information/Melissa-Core/GreyMatter/business_news_reader.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from bs4 import BeautifulSoup 3 | 4 | from SenseCells.tts import tts 5 | 6 | # NDTV News 7 | fixed_url = 'http://profit.ndtv.com/news/latest/' 8 | news_headlines_list = [] 9 | news_details_list = [] 10 | 11 | for i in range(1, 2): 12 | changing_slug = '/page-' + str(i) 13 | url = fixed_url + changing_slug 14 | r = requests.get(url) 15 | data = r.text 16 | 17 | soup = BeautifulSoup(data, "html.parser") 18 | 19 | for news_headlines in soup.find_all('h2'): 20 | news_headlines_list.append(news_headlines.get_text()) 21 | 22 | del news_headlines_list[-2:] 23 | 24 | for news_details in soup.find_all('p', 'intro'): 25 | news_details_list.append(news_details.get_text()) 26 | 27 | news_headlines_list_small = [element.lower().replace("(", "").replace(")", "").replace("'", "") for element in news_headlines_list] 28 | news_details_list_small = [element.lower().replace("(", "").replace(")", "").replace("'", "") for element in news_details_list] 29 | 30 | news_dictionary = dict(zip(news_headlines_list_small, news_details_list_small)) 31 | 32 | def news_reader(): 33 | for key, value in news_dictionary.items(): 34 | tts('Headline, ' + key) 35 | tts('News, ' + value) 36 | -------------------------------------------------------------------------------- /Pant_Ch04_Using_Internet_to_Gather_Information/Pant_Ch04_Using_Internet_to_Gather_Information/Melissa-Core/GreyMatter/connect_proxy.py: -------------------------------------------------------------------------------- 1 | from selenium import webdriver 2 | 3 | from SenseCells.tts import tts 4 | 5 | def connect_to_proxy(proxy_username, proxy_password): 6 | tts("Connecting to proxy server.") 7 | browser = webdriver.Firefox() 8 | browser.get('http://10.1.1.9:8090/httpclient.html') 9 | 10 | id_number = browser.find_element_by_name('username') 11 | password = browser.find_element_by_name('password') 12 | 13 | id_number.send_keys(proxy_username) 14 | password.send_keys(proxy_password) 15 | 16 | browser.find_element_by_name('btnSubmit').click() 17 | -------------------------------------------------------------------------------- /Pant_Ch04_Using_Internet_to_Gather_Information/Pant_Ch04_Using_Internet_to_Gather_Information/Melissa-Core/GreyMatter/define_subject.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | import wikipedia 4 | 5 | from SenseCells.tts import tts 6 | 7 | def define_subject(speech_text): 8 | words_of_message = speech_text.split() 9 | words_of_message.remove('define') 10 | cleaned_message = ' '.join(words_of_message) 11 | 12 | try: 13 | wiki_data = wikipedia.summary(cleaned_message, sentences=5) 14 | 15 | regEx = re.compile(r'([^\(]*)\([^\)]*\) *(.*)') 16 | m = regEx.match(wiki_data) 17 | while m: 18 | wiki_data = m.group(1) + m.group(2) 19 | m = regEx.match(wiki_data) 20 | 21 | wiki_data = wiki_data.replace("'", "") 22 | tts(wiki_data) 23 | except wikipedia.exceptions.DisambiguationError as e: 24 | tts('Can you please be more specific? You may choose something from the following.') 25 | print("Can you please be more specific? You may choose something from the following; {0}".format(e)) 26 | -------------------------------------------------------------------------------- /Pant_Ch04_Using_Internet_to_Gather_Information/Pant_Ch04_Using_Internet_to_Gather_Information/Melissa-Core/GreyMatter/general_conversations.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | from SenseCells.tts import tts 4 | 5 | def who_are_you(): 6 | messages = ['I am Melissa, your lovely personal assistant.', 7 | 'Melissa, didnt I tell you before?', 8 | 'You ask that so many times! I am Melissa.'] 9 | tts(random.choice(messages)) 10 | 11 | def how_am_i(): 12 | replies =['You are goddamn handsome!', 'My knees go weak when I see you.', 'You are sexy!', 'You look like the kindest person that I have met.'] 13 | tts(random.choice(replies)) 14 | 15 | def tell_joke(): 16 | jokes = ['What happens to a frogs car when it breaks down? It gets toad away.', 'Why was six scared of seven? Because seven ate nine.', 'No, I always forget the punch line.'] 17 | tts(random.choice(jokes)) 18 | 19 | def who_am_i(name): 20 | tts('You are ' + name + ', a brilliant person. I love you!') 21 | 22 | def where_born(): 23 | tts('I was created by a magician named Tanay, in India, the magical land of himalayas.') 24 | 25 | def how_are_you(): 26 | tts('I am fine, thank you.') 27 | 28 | def undefined(): 29 | tts('I dont know what that means!') 30 | -------------------------------------------------------------------------------- /Pant_Ch04_Using_Internet_to_Gather_Information/Pant_Ch04_Using_Internet_to_Gather_Information/Melissa-Core/GreyMatter/open_firefox.py: -------------------------------------------------------------------------------- 1 | from selenium import webdriver 2 | 3 | from SenseCells.tts import tts 4 | 5 | def open_firefox(): 6 | tts('Aye aye captain, opening Firefox') 7 | webdriver.Firefox() 8 | -------------------------------------------------------------------------------- /Pant_Ch04_Using_Internet_to_Gather_Information/Pant_Ch04_Using_Internet_to_Gather_Information/Melissa-Core/GreyMatter/sleep.py: -------------------------------------------------------------------------------- 1 | from SenseCells.tts import tts 2 | 3 | def go_to_sleep(): 4 | tts('Goodbye! Have a great day!') 5 | quit() 6 | -------------------------------------------------------------------------------- /Pant_Ch04_Using_Internet_to_Gather_Information/Pant_Ch04_Using_Internet_to_Gather_Information/Melissa-Core/GreyMatter/tell_time.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | 3 | from SenseCells.tts import tts 4 | 5 | def what_is_time(): 6 | tts("The time is " + datetime.strftime(datetime.now(), '%H:%M:%S')) 7 | -------------------------------------------------------------------------------- /Pant_Ch04_Using_Internet_to_Gather_Information/Pant_Ch04_Using_Internet_to_Gather_Information/Melissa-Core/GreyMatter/weather.py: -------------------------------------------------------------------------------- 1 | import pywapi 2 | 3 | from SenseCells.tts import tts 4 | 5 | def weather(city_name, city_code): 6 | weather_com_result = pywapi.get_weather_from_weather_com(city_code) 7 | weather_result = "Weather.com says: It is " + weather_com_result['current_conditions']['text'].lower() + " and " + weather_com_result['current_conditions']['temperature'] + "degree celcius now in " + city_name 8 | tts(weather_result) 9 | -------------------------------------------------------------------------------- /Pant_Ch04_Using_Internet_to_Gather_Information/Pant_Ch04_Using_Internet_to_Gather_Information/Melissa-Core/LICENSE.md: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Tanay Pant 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Pant_Ch04_Using_Internet_to_Gather_Information/Pant_Ch04_Using_Internet_to_Gather_Information/Melissa-Core/brain.py: -------------------------------------------------------------------------------- 1 | from GreyMatter import tell_time, general_conversations, weather, define_subject, business_news_reader, open_firefox, connect_proxy, sleep 2 | 3 | def brain(name, speech_text, city_name, city_code, proxy_username, proxy_password): 4 | def check_message(check): 5 | """ 6 | This function checks if the items in the list (specified in argument) are present in the user's input speech. 7 | """ 8 | 9 | words_of_message = speech_text.split() 10 | if set(check).issubset(set(words_of_message)): 11 | return True 12 | else: 13 | return False 14 | 15 | if check_message(['who','are', 'you']): 16 | general_conversations.who_are_you() 17 | 18 | elif check_message(['how', 'i', 'look']) or check_message(['how', 'am', 'i']): 19 | general_conversations.how_am_i() 20 | 21 | elif check_message(['tell', 'joke']): 22 | general_conversations.tell_joke() 23 | 24 | elif check_message(['who', 'am', 'i']): 25 | general_conversations.who_am_i(name) 26 | 27 | elif check_message(['where', 'born']): 28 | general_conversations.where_born() 29 | 30 | elif check_message(['how', 'are', 'you']): 31 | general_conversations.how_are_you() 32 | 33 | elif check_message(['how', 'weather']) or check_message(['hows', 'weather']): 34 | weather.weather(city_name, city_code) 35 | 36 | elif check_message(['business', 'news']): 37 | business_news_reader.news_reader() 38 | 39 | elif check_message(['define']): 40 | define_subject.define_subject(speech_text) 41 | 42 | elif check_message(['time']): 43 | tell_time.what_is_time() 44 | 45 | elif check_message(['connect', 'proxy']): 46 | connect_proxy.connect_to_proxy(proxy_username, proxy_password) 47 | 48 | elif check_message(['open', 'firefox']): 49 | open_firefox.open_firefox() 50 | 51 | elif check_message(['sleep']): 52 | sleep.go_to_sleep() 53 | 54 | else: 55 | general_conversations.undefined() 56 | -------------------------------------------------------------------------------- /Pant_Ch04_Using_Internet_to_Gather_Information/Pant_Ch04_Using_Internet_to_Gather_Information/Melissa-Core/main.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import yaml 4 | import speech_recognition as sr 5 | 6 | from brain import brain 7 | from GreyMatter.SenseCells.tts import tts 8 | 9 | profile = open('profile.yaml') 10 | profile_data = yaml.safe_load(profile) 11 | profile.close() 12 | 13 | # Functioning Variables 14 | name = profile_data['name'] 15 | city_name = profile_data['city_name'] 16 | city_code = profile_data['city_code'] 17 | proxy_username = profile_data['proxy_username'] 18 | proxy_password = profile_data['proxy_password'] 19 | 20 | tts('Welcome ' + name + ', systems are now ready to run. How can I help you?') 21 | 22 | def main(): 23 | r = sr.Recognizer() 24 | with sr.Microphone() as source: 25 | print("Say something!") 26 | audio = r.listen(source) 27 | 28 | try: 29 | speech_text = r.recognize_google(audio).lower().replace("'", "") 30 | print("Melissa thinks you said '" + speech_text + "'") 31 | except sr.UnknownValueError: 32 | print("Melissa could not understand audio") 33 | except sr.RequestError as e: 34 | print("Could not request results from Google Speech Recognition service; {0}".format(e)) 35 | 36 | brain(name, speech_text, city_name, city_code, proxy_username, proxy_password) 37 | main() 38 | 39 | main() 40 | -------------------------------------------------------------------------------- /Pant_Ch04_Using_Internet_to_Gather_Information/Pant_Ch04_Using_Internet_to_Gather_Information/Melissa-Core/profile.yaml.default: -------------------------------------------------------------------------------- 1 | name: 2 | Tanay 3 | city_name: 4 | New Delhi 5 | city_code: 6 | INXX0096 7 | proxy_username: 8 | Something 9 | proxy_password: 10 | Something -------------------------------------------------------------------------------- /Pant_Ch05_Developing_a_Music_Player_for_Melissa/Pant_Ch05_Developing_a_Music_Player_for_Melissa/Melissa-Core/GreyMatter/SenseCells/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/building-a-virtual-assistant-for-raspberry-pi/5ce696e268fc19bfc319b785b1511be07baffc66/Pant_Ch05_Developing_a_Music_Player_for_Melissa/Pant_Ch05_Developing_a_Music_Player_for_Melissa/Melissa-Core/GreyMatter/SenseCells/__init__.py -------------------------------------------------------------------------------- /Pant_Ch05_Developing_a_Music_Player_for_Melissa/Pant_Ch05_Developing_a_Music_Player_for_Melissa/Melissa-Core/GreyMatter/SenseCells/tts.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | def tts(message): 5 | """ 6 | This function takes a message as an argument and converts it to speech depending on the OS. 7 | """ 8 | if sys.platform == 'darwin': 9 | tts_engine = 'say' 10 | return os.system(tts_engine + ' ' + message) 11 | elif sys.platform == 'linux2' or sys.platform == 'linux': 12 | tts_engine = 'espeak' 13 | return os.system(tts_engine + ' "' + message + '"') 14 | -------------------------------------------------------------------------------- /Pant_Ch05_Developing_a_Music_Player_for_Melissa/Pant_Ch05_Developing_a_Music_Player_for_Melissa/Melissa-Core/GreyMatter/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/building-a-virtual-assistant-for-raspberry-pi/5ce696e268fc19bfc319b785b1511be07baffc66/Pant_Ch05_Developing_a_Music_Player_for_Melissa/Pant_Ch05_Developing_a_Music_Player_for_Melissa/Melissa-Core/GreyMatter/__init__.py -------------------------------------------------------------------------------- /Pant_Ch05_Developing_a_Music_Player_for_Melissa/Pant_Ch05_Developing_a_Music_Player_for_Melissa/Melissa-Core/GreyMatter/business_news_reader.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from bs4 import BeautifulSoup 3 | 4 | from SenseCells.tts import tts 5 | 6 | # NDTV News 7 | fixed_url = 'http://profit.ndtv.com/news/latest/' 8 | news_headlines_list = [] 9 | news_details_list = [] 10 | 11 | for i in range(1, 2): 12 | changing_slug = '/page-' + str(i) 13 | url = fixed_url + changing_slug 14 | r = requests.get(url) 15 | data = r.text 16 | 17 | soup = BeautifulSoup(data, "html.parser") 18 | 19 | for news_headlines in soup.find_all('h2'): 20 | news_headlines_list.append(news_headlines.get_text()) 21 | 22 | del news_headlines_list[-2:] 23 | 24 | for news_details in soup.find_all('p', 'intro'): 25 | news_details_list.append(news_details.get_text()) 26 | 27 | news_headlines_list_small = [element.lower().replace("(", "").replace(")", "").replace("'", "") for element in news_headlines_list] 28 | news_details_list_small = [element.lower().replace("(", "").replace(")", "").replace("'", "") for element in news_details_list] 29 | 30 | news_dictionary = dict(zip(news_headlines_list_small, news_details_list_small)) 31 | 32 | def news_reader(): 33 | for key, value in news_dictionary.items(): 34 | tts('Headline, ' + key) 35 | tts('News, ' + value) 36 | -------------------------------------------------------------------------------- /Pant_Ch05_Developing_a_Music_Player_for_Melissa/Pant_Ch05_Developing_a_Music_Player_for_Melissa/Melissa-Core/GreyMatter/connect_proxy.py: -------------------------------------------------------------------------------- 1 | from selenium import webdriver 2 | 3 | from SenseCells.tts import tts 4 | 5 | def connect_to_proxy(proxy_username, proxy_password): 6 | tts("Connecting to proxy server.") 7 | browser = webdriver.Firefox() 8 | browser.get('http://10.1.1.9:8090/httpclient.html') 9 | 10 | id_number = browser.find_element_by_name('username') 11 | password = browser.find_element_by_name('password') 12 | 13 | id_number.send_keys(proxy_username) 14 | password.send_keys(proxy_password) 15 | 16 | browser.find_element_by_name('btnSubmit').click() 17 | -------------------------------------------------------------------------------- /Pant_Ch05_Developing_a_Music_Player_for_Melissa/Pant_Ch05_Developing_a_Music_Player_for_Melissa/Melissa-Core/GreyMatter/define_subject.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | import wikipedia 4 | 5 | from SenseCells.tts import tts 6 | 7 | def define_subject(speech_text): 8 | words_of_message = speech_text.split() 9 | words_of_message.remove('define') 10 | cleaned_message = ' '.join(words_of_message) 11 | 12 | try: 13 | wiki_data = wikipedia.summary(cleaned_message, sentences=5) 14 | 15 | regEx = re.compile(r'([^\(]*)\([^\)]*\) *(.*)') 16 | m = regEx.match(wiki_data) 17 | while m: 18 | wiki_data = m.group(1) + m.group(2) 19 | m = regEx.match(wiki_data) 20 | 21 | wiki_data = wiki_data.replace("'", "") 22 | tts(wiki_data) 23 | except wikipedia.exceptions.DisambiguationError as e: 24 | tts('Can you please be more specific? You may choose something from the following.') 25 | print("Can you please be more specific? You may choose something from the following; {0}".format(e)) 26 | -------------------------------------------------------------------------------- /Pant_Ch05_Developing_a_Music_Player_for_Melissa/Pant_Ch05_Developing_a_Music_Player_for_Melissa/Melissa-Core/GreyMatter/general_conversations.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | from SenseCells.tts import tts 4 | 5 | def who_are_you(): 6 | messages = ['I am Melissa, your lovely personal assistant.', 7 | 'Melissa, didnt I tell you before?', 8 | 'You ask that so many times! I am Melissa.'] 9 | tts(random.choice(messages)) 10 | 11 | def how_am_i(): 12 | replies =['You are goddamn handsome!', 'My knees go weak when I see you.', 'You are sexy!', 'You look like the kindest person that I have met.'] 13 | tts(random.choice(replies)) 14 | 15 | def tell_joke(): 16 | jokes = ['What happens to a frogs car when it breaks down? It gets toad away.', 'Why was six scared of seven? Because seven ate nine.', 'No, I always forget the punch line.'] 17 | tts(random.choice(jokes)) 18 | 19 | def who_am_i(name): 20 | tts('You are ' + name + ', a brilliant person. I love you!') 21 | 22 | def where_born(): 23 | tts('I was created by a magician named Tanay, in India, the magical land of himalayas.') 24 | 25 | def how_are_you(): 26 | tts('I am fine, thank you.') 27 | 28 | def undefined(): 29 | tts('I dont know what that means!') 30 | -------------------------------------------------------------------------------- /Pant_Ch05_Developing_a_Music_Player_for_Melissa/Pant_Ch05_Developing_a_Music_Player_for_Melissa/Melissa-Core/GreyMatter/open_firefox.py: -------------------------------------------------------------------------------- 1 | from selenium import webdriver 2 | 3 | from SenseCells.tts import tts 4 | 5 | def open_firefox(): 6 | tts('Aye aye captain, opening Firefox') 7 | webdriver.Firefox() 8 | -------------------------------------------------------------------------------- /Pant_Ch05_Developing_a_Music_Player_for_Melissa/Pant_Ch05_Developing_a_Music_Player_for_Melissa/Melissa-Core/GreyMatter/play_music.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import random 4 | 5 | from SenseCells.tts import tts 6 | 7 | def mp3gen(music_path): 8 | """ 9 | This function finds all the mp3 files in a folder and it's subfolders and returns a list. 10 | """ 11 | music_list = [] 12 | for root, dirs, files in os.walk(music_path): 13 | for filename in files: 14 | if os.path.splitext(filename)[1] == ".mp3": 15 | music_list.append(os.path.join(root, filename.lower())) 16 | return music_list 17 | 18 | def music_player(file_name): 19 | """ 20 | This function takes the name of a music file as an argument and plays it depending on the OS. 21 | """ 22 | if sys.platform == 'darwin': 23 | player = "afplay '" + file_name + "'" 24 | return os.system(player) 25 | elif sys.platform == 'linux2' or sys.platform == 'linux': 26 | player = "mpg123 '" + file_name + "'" 27 | return os.system(player) 28 | 29 | def play_random(music_path): 30 | try: 31 | music_listing = mp3gen(music_path) 32 | music_playing = random.choice(music_listing) 33 | tts("Now playing: " + music_playing) 34 | music_player(music_playing) 35 | except IndexError as e: 36 | tts('No music files found.') 37 | print("No music files found: {0}".format(e)) 38 | 39 | def play_specific_music(speech_text, music_path): 40 | words_of_message = speech_text.split() 41 | words_of_message.remove('play') 42 | cleaned_message = ' '.join(words_of_message) 43 | music_listing = mp3gen(music_path) 44 | 45 | for i in range(0, len(music_listing)): 46 | if cleaned_message in music_listing[i]: 47 | music_player(music_listing[i]) 48 | 49 | def play_shuffle(music_path): 50 | try: 51 | music_listing = mp3gen(music_path) 52 | random.shuffle(music_listing) 53 | for i in range(0, len(music_listing)): 54 | music_player(music_listing[i]) 55 | except IndexError as e: 56 | tts('No music files found.') 57 | print("No music files found: {0}".format(e)) 58 | -------------------------------------------------------------------------------- /Pant_Ch05_Developing_a_Music_Player_for_Melissa/Pant_Ch05_Developing_a_Music_Player_for_Melissa/Melissa-Core/GreyMatter/sleep.py: -------------------------------------------------------------------------------- 1 | from SenseCells.tts import tts 2 | 3 | def go_to_sleep(): 4 | tts('Goodbye! Have a great day!') 5 | quit() 6 | -------------------------------------------------------------------------------- /Pant_Ch05_Developing_a_Music_Player_for_Melissa/Pant_Ch05_Developing_a_Music_Player_for_Melissa/Melissa-Core/GreyMatter/tell_time.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | 3 | from SenseCells.tts import tts 4 | 5 | def what_is_time(): 6 | tts("The time is " + datetime.strftime(datetime.now(), '%H:%M:%S')) 7 | -------------------------------------------------------------------------------- /Pant_Ch05_Developing_a_Music_Player_for_Melissa/Pant_Ch05_Developing_a_Music_Player_for_Melissa/Melissa-Core/GreyMatter/weather.py: -------------------------------------------------------------------------------- 1 | import pywapi 2 | 3 | from SenseCells.tts import tts 4 | 5 | def weather(city_name, city_code): 6 | weather_com_result = pywapi.get_weather_from_weather_com(city_code) 7 | weather_result = "Weather.com says: It is " + weather_com_result['current_conditions']['text'].lower() + " and " + weather_com_result['current_conditions']['temperature'] + "degree celcius now in " + city_name 8 | tts(weather_result) 9 | -------------------------------------------------------------------------------- /Pant_Ch05_Developing_a_Music_Player_for_Melissa/Pant_Ch05_Developing_a_Music_Player_for_Melissa/Melissa-Core/LICENSE.md: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Tanay Pant 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Pant_Ch05_Developing_a_Music_Player_for_Melissa/Pant_Ch05_Developing_a_Music_Player_for_Melissa/Melissa-Core/brain.py: -------------------------------------------------------------------------------- 1 | from GreyMatter import define_subject, tell_time, general_conversations, play_music, weather, connect_proxy, open_firefox, sleep, business_news_reader 2 | 3 | def brain(name, speech_text, music_path, city_name, city_code, proxy_username, proxy_password): 4 | def check_message(check): 5 | """ 6 | This function checks if the items in the list (specified in argument) are present in the user's input speech. 7 | """ 8 | 9 | words_of_message = speech_text.split() 10 | if set(check).issubset(set(words_of_message)): 11 | return True 12 | else: 13 | return False 14 | 15 | if check_message(['who','are', 'you']): 16 | general_conversations.who_are_you() 17 | 18 | elif check_message(['how', 'i', 'look']) or check_message(['how', 'am', 'i']): 19 | general_conversations.how_am_i() 20 | 21 | elif check_message(['tell', 'joke']): 22 | general_conversations.tell_joke() 23 | 24 | elif check_message(['who', 'am', 'i']): 25 | general_conversations.who_am_i(name) 26 | 27 | elif check_message(['where', 'born']): 28 | general_conversations.where_born() 29 | 30 | elif check_message(['how', 'are', 'you']): 31 | general_conversations.how_are_you() 32 | 33 | elif check_message(['how', 'weather']) or check_message(['hows', 'weather']): 34 | weather.weather(city_name, city_code) 35 | 36 | elif check_message(['business', 'news']): 37 | business_news_reader.news_reader() 38 | 39 | elif check_message(['define']): 40 | define_subject.define_subject(speech_text) 41 | 42 | elif check_message(['time']): 43 | tell_time.what_is_time() 44 | 45 | elif check_message(['connect', 'proxy']): 46 | connect_proxy.connect_to_proxy(proxy_username, proxy_password) 47 | 48 | elif check_message(['play', 'music']) or check_message(['music']): 49 | play_music.play_random(music_path) 50 | 51 | elif check_message(['play']): 52 | play_music.play_specific_music(speech_text, music_path) 53 | 54 | elif check_message(['party', 'time']) or check_message(['party', 'mix']): 55 | play_music.play_shuffle(music_path) 56 | 57 | elif check_message(['open', 'firefox']): 58 | open_firefox.open_firefox() 59 | 60 | elif check_message(['sleep']): 61 | sleep.go_to_sleep() 62 | 63 | else: 64 | general_conversations.undefined() 65 | -------------------------------------------------------------------------------- /Pant_Ch05_Developing_a_Music_Player_for_Melissa/Pant_Ch05_Developing_a_Music_Player_for_Melissa/Melissa-Core/main.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import yaml 4 | import speech_recognition as sr 5 | 6 | from brain import brain 7 | from GreyMatter import play_music 8 | from GreyMatter.SenseCells.tts import tts 9 | 10 | profile = open('profile.yaml') 11 | profile_data = yaml.safe_load(profile) 12 | profile.close() 13 | 14 | # Functioning Variables 15 | name = profile_data['name'] 16 | city_name = profile_data['city_name'] 17 | city_code = profile_data['city_code'] 18 | proxy_username = profile_data['proxy_username'] 19 | proxy_password = profile_data['proxy_password'] 20 | music_path = profile_data['music_path'] 21 | 22 | tts('Welcome ' + name + ', systems are now ready to run. How can I help you?') 23 | 24 | def main(): 25 | r = sr.Recognizer() 26 | with sr.Microphone() as source: 27 | print("Say something!") 28 | audio = r.listen(source) 29 | 30 | try: 31 | speech_text = r.recognize_google(audio).lower().replace("'", "") 32 | print("Melissa thinks you said '" + speech_text + "'") 33 | except sr.UnknownValueError: 34 | print("Melissa could not understand audio") 35 | except sr.RequestError as e: 36 | print("Could not request results from Google Speech Recognition service; {0}".format(e)) 37 | 38 | play_music.mp3gen(music_path) 39 | brain(name, speech_text, music_path, city_name, city_code, proxy_username, proxy_password) 40 | 41 | main() 42 | 43 | main() 44 | -------------------------------------------------------------------------------- /Pant_Ch05_Developing_a_Music_Player_for_Melissa/Pant_Ch05_Developing_a_Music_Player_for_Melissa/Melissa-Core/profile.yaml.default: -------------------------------------------------------------------------------- 1 | name: 2 | Tanay 3 | city_name: 4 | New Delhi 5 | city_code: 6 | INXX0096 7 | proxy_username: 8 | Something 9 | proxy_password: 10 | Something 11 | music_path: 12 | . -------------------------------------------------------------------------------- /Pant_Ch06_Developing_a_Note_Taking_Application/Pant_Ch06_Developing_a_Note_Taking_Application/Melissa-Core/GreyMatter/SenseCells/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/building-a-virtual-assistant-for-raspberry-pi/5ce696e268fc19bfc319b785b1511be07baffc66/Pant_Ch06_Developing_a_Note_Taking_Application/Pant_Ch06_Developing_a_Note_Taking_Application/Melissa-Core/GreyMatter/SenseCells/__init__.py -------------------------------------------------------------------------------- /Pant_Ch06_Developing_a_Note_Taking_Application/Pant_Ch06_Developing_a_Note_Taking_Application/Melissa-Core/GreyMatter/SenseCells/tts.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | def tts(message): 5 | """ 6 | This function takes a message as an argument and converts it to speech depending on the OS. 7 | """ 8 | if sys.platform == 'darwin': 9 | tts_engine = 'say' 10 | return os.system(tts_engine + ' ' + message) 11 | elif sys.platform == 'linux2' or sys.platform == 'linux': 12 | tts_engine = 'espeak' 13 | return os.system(tts_engine + ' "' + message + '"') 14 | -------------------------------------------------------------------------------- /Pant_Ch06_Developing_a_Note_Taking_Application/Pant_Ch06_Developing_a_Note_Taking_Application/Melissa-Core/GreyMatter/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/building-a-virtual-assistant-for-raspberry-pi/5ce696e268fc19bfc319b785b1511be07baffc66/Pant_Ch06_Developing_a_Note_Taking_Application/Pant_Ch06_Developing_a_Note_Taking_Application/Melissa-Core/GreyMatter/__init__.py -------------------------------------------------------------------------------- /Pant_Ch06_Developing_a_Note_Taking_Application/Pant_Ch06_Developing_a_Note_Taking_Application/Melissa-Core/GreyMatter/business_news_reader.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from bs4 import BeautifulSoup 3 | 4 | from SenseCells.tts import tts 5 | 6 | # NDTV News 7 | fixed_url = 'http://profit.ndtv.com/news/latest/' 8 | news_headlines_list = [] 9 | news_details_list = [] 10 | 11 | for i in range(1, 2): 12 | changing_slug = '/page-' + str(i) 13 | url = fixed_url + changing_slug 14 | r = requests.get(url) 15 | data = r.text 16 | 17 | soup = BeautifulSoup(data, "html.parser") 18 | 19 | for news_headlines in soup.find_all('h2'): 20 | news_headlines_list.append(news_headlines.get_text()) 21 | 22 | del news_headlines_list[-2:] 23 | 24 | for news_details in soup.find_all('p', 'intro'): 25 | news_details_list.append(news_details.get_text()) 26 | 27 | news_headlines_list_small = [element.lower().replace("(", "").replace(")", "").replace("'", "") for element in news_headlines_list] 28 | news_details_list_small = [element.lower().replace("(", "").replace(")", "").replace("'", "") for element in news_details_list] 29 | 30 | news_dictionary = dict(zip(news_headlines_list_small, news_details_list_small)) 31 | 32 | def news_reader(): 33 | for key, value in news_dictionary.items(): 34 | tts('Headline, ' + key) 35 | tts('News, ' + value) 36 | -------------------------------------------------------------------------------- /Pant_Ch06_Developing_a_Note_Taking_Application/Pant_Ch06_Developing_a_Note_Taking_Application/Melissa-Core/GreyMatter/connect_proxy.py: -------------------------------------------------------------------------------- 1 | from selenium import webdriver 2 | 3 | from SenseCells.tts import tts 4 | 5 | def connect_to_proxy(proxy_username, proxy_password): 6 | tts("Connecting to proxy server.") 7 | browser = webdriver.Firefox() 8 | browser.get('http://10.1.1.9:8090/httpclient.html') 9 | 10 | id_number = browser.find_element_by_name('username') 11 | password = browser.find_element_by_name('password') 12 | 13 | id_number.send_keys(proxy_username) 14 | password.send_keys(proxy_password) 15 | 16 | browser.find_element_by_name('btnSubmit').click() 17 | -------------------------------------------------------------------------------- /Pant_Ch06_Developing_a_Note_Taking_Application/Pant_Ch06_Developing_a_Note_Taking_Application/Melissa-Core/GreyMatter/define_subject.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | import wikipedia 4 | 5 | from SenseCells.tts import tts 6 | 7 | def define_subject(speech_text): 8 | words_of_message = speech_text.split() 9 | words_of_message.remove('define') 10 | cleaned_message = ' '.join(words_of_message) 11 | 12 | try: 13 | wiki_data = wikipedia.summary(cleaned_message, sentences=5) 14 | 15 | regEx = re.compile(r'([^\(]*)\([^\)]*\) *(.*)') 16 | m = regEx.match(wiki_data) 17 | while m: 18 | wiki_data = m.group(1) + m.group(2) 19 | m = regEx.match(wiki_data) 20 | 21 | wiki_data = wiki_data.replace("'", "") 22 | tts(wiki_data) 23 | except wikipedia.exceptions.DisambiguationError as e: 24 | tts('Can you please be more specific? You may choose something from the following.') 25 | print("Can you please be more specific? You may choose something from the following; {0}".format(e)) 26 | -------------------------------------------------------------------------------- /Pant_Ch06_Developing_a_Note_Taking_Application/Pant_Ch06_Developing_a_Note_Taking_Application/Melissa-Core/GreyMatter/general_conversations.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | from SenseCells.tts import tts 4 | 5 | def who_are_you(): 6 | messages = ['I am Melissa, your lovely personal assistant.', 7 | 'Melissa, didnt I tell you before?', 8 | 'You ask that so many times! I am Melissa.'] 9 | tts(random.choice(messages)) 10 | 11 | def how_am_i(): 12 | replies =['You are goddamn handsome!', 'My knees go weak when I see you.', 'You are sexy!', 'You look like the kindest person that I have met.'] 13 | tts(random.choice(replies)) 14 | 15 | def tell_joke(): 16 | jokes = ['What happens to a frogs car when it breaks down? It gets toad away.', 'Why was six scared of seven? Because seven ate nine.', 'No, I always forget the punch line.'] 17 | tts(random.choice(jokes)) 18 | 19 | def who_am_i(name): 20 | tts('You are ' + name + ', a brilliant person. I love you!') 21 | 22 | def where_born(): 23 | tts('I was created by a magician named Tanay, in India, the magical land of himalayas.') 24 | 25 | def how_are_you(): 26 | tts('I am fine, thank you.') 27 | 28 | def undefined(): 29 | tts('I dont know what that means!') 30 | -------------------------------------------------------------------------------- /Pant_Ch06_Developing_a_Note_Taking_Application/Pant_Ch06_Developing_a_Note_Taking_Application/Melissa-Core/GreyMatter/notes.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | from datetime import datetime 3 | 4 | from SenseCells.tts import tts 5 | 6 | def show_all_notes(): 7 | conn = sqlite3.connect('memory.db') 8 | tts('Your notes are as follows:') 9 | 10 | cursor = conn.execute("SELECT notes FROM notes") 11 | 12 | for row in cursor: 13 | tts(row[0]) 14 | 15 | conn.close() 16 | 17 | def note_something(speech_text): 18 | conn = sqlite3.connect('memory.db') 19 | words_of_message = speech_text.split() 20 | words_of_message.remove('note') 21 | cleaned_message = ' '.join(words_of_message) 22 | 23 | conn.execute("INSERT INTO notes (notes, notes_date) VALUES (?, ?)", (cleaned_message, datetime.strftime(datetime.now(), '%d-%m-%Y'))) 24 | conn.commit() 25 | conn.close() 26 | 27 | tts('Your note has been saved.') 28 | -------------------------------------------------------------------------------- /Pant_Ch06_Developing_a_Note_Taking_Application/Pant_Ch06_Developing_a_Note_Taking_Application/Melissa-Core/GreyMatter/open_firefox.py: -------------------------------------------------------------------------------- 1 | from selenium import webdriver 2 | 3 | from SenseCells.tts import tts 4 | 5 | def open_firefox(): 6 | tts('Aye aye captain, opening Firefox') 7 | webdriver.Firefox() 8 | -------------------------------------------------------------------------------- /Pant_Ch06_Developing_a_Note_Taking_Application/Pant_Ch06_Developing_a_Note_Taking_Application/Melissa-Core/GreyMatter/play_music.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import random 4 | 5 | from SenseCells.tts import tts 6 | 7 | def mp3gen(music_path): 8 | """ 9 | This function finds all the mp3 files in a folder and it's subfolders and returns a list. 10 | """ 11 | music_list = [] 12 | for root, dirs, files in os.walk(music_path): 13 | for filename in files: 14 | if os.path.splitext(filename)[1] == ".mp3": 15 | music_list.append(os.path.join(root, filename.lower())) 16 | return music_list 17 | 18 | def music_player(file_name): 19 | """ 20 | This function takes the name of a music file as an argument and plays it depending on the OS. 21 | """ 22 | if sys.platform == 'darwin': 23 | player = "afplay '" + file_name + "'" 24 | return os.system(player) 25 | elif sys.platform == 'linux2' or sys.platform == 'linux': 26 | player = "mpg123 '" + file_name + "'" 27 | return os.system(player) 28 | 29 | def play_random(music_path): 30 | try: 31 | music_listing = mp3gen(music_path) 32 | music_playing = random.choice(music_listing) 33 | tts("Now playing: " + music_playing) 34 | music_player(music_playing) 35 | except IndexError as e: 36 | tts('No music files found.') 37 | print("No music files found: {0}".format(e)) 38 | 39 | def play_specific_music(speech_text, music_path): 40 | words_of_message = speech_text.split() 41 | words_of_message.remove('play') 42 | cleaned_message = ' '.join(words_of_message) 43 | music_listing = mp3gen(music_path) 44 | 45 | for i in range(0, len(music_listing)): 46 | if cleaned_message in music_listing[i]: 47 | music_player(music_listing[i]) 48 | 49 | def play_shuffle(music_path): 50 | try: 51 | music_listing = mp3gen(music_path) 52 | random.shuffle(music_listing) 53 | for i in range(0, len(music_listing)): 54 | music_player(music_listing[i]) 55 | except IndexError as e: 56 | tts('No music files found.') 57 | print("No music files found: {0}".format(e)) 58 | -------------------------------------------------------------------------------- /Pant_Ch06_Developing_a_Note_Taking_Application/Pant_Ch06_Developing_a_Note_Taking_Application/Melissa-Core/GreyMatter/sleep.py: -------------------------------------------------------------------------------- 1 | from SenseCells.tts import tts 2 | 3 | def go_to_sleep(): 4 | tts('Goodbye! Have a great day!') 5 | quit() 6 | -------------------------------------------------------------------------------- /Pant_Ch06_Developing_a_Note_Taking_Application/Pant_Ch06_Developing_a_Note_Taking_Application/Melissa-Core/GreyMatter/tell_time.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | 3 | from SenseCells.tts import tts 4 | 5 | def what_is_time(): 6 | tts("The time is " + datetime.strftime(datetime.now(), '%H:%M:%S')) 7 | -------------------------------------------------------------------------------- /Pant_Ch06_Developing_a_Note_Taking_Application/Pant_Ch06_Developing_a_Note_Taking_Application/Melissa-Core/GreyMatter/weather.py: -------------------------------------------------------------------------------- 1 | import pywapi 2 | 3 | from SenseCells.tts import tts 4 | 5 | def weather(city_name, city_code): 6 | weather_com_result = pywapi.get_weather_from_weather_com(city_code) 7 | weather_result = "Weather.com says: It is " + weather_com_result['current_conditions']['text'].lower() + " and " + weather_com_result['current_conditions']['temperature'] + "degree celcius now in " + city_name 8 | tts(weather_result) 9 | -------------------------------------------------------------------------------- /Pant_Ch06_Developing_a_Note_Taking_Application/Pant_Ch06_Developing_a_Note_Taking_Application/Melissa-Core/LICENSE.md: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Tanay Pant 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Pant_Ch06_Developing_a_Note_Taking_Application/Pant_Ch06_Developing_a_Note_Taking_Application/Melissa-Core/brain.py: -------------------------------------------------------------------------------- 1 | from GreyMatter import notes, define_subject, tell_time, general_conversations, play_music, weather, connect_proxy, open_firefox, sleep, business_news_reader 2 | 3 | def brain(name, speech_text, music_path, city_name, city_code, proxy_username, proxy_password): 4 | def check_message(check): 5 | """ 6 | This function checks if the items in the list (specified in argument) are present in the user's input speech. 7 | """ 8 | 9 | words_of_message = speech_text.split() 10 | if set(check).issubset(set(words_of_message)): 11 | return True 12 | else: 13 | return False 14 | 15 | if check_message(['who','are', 'you']): 16 | general_conversations.who_are_you() 17 | 18 | elif check_message(['how', 'i', 'look']) or check_message(['how', 'am', 'i']): 19 | general_conversations.how_am_i() 20 | 21 | elif check_message(['tell', 'joke']): 22 | general_conversations.tell_joke() 23 | 24 | elif check_message(['who', 'am', 'i']): 25 | general_conversations.who_am_i(name) 26 | 27 | elif check_message(['where', 'born']): 28 | general_conversations.where_born() 29 | 30 | elif check_message(['how', 'are', 'you']): 31 | general_conversations.how_are_you() 32 | 33 | elif check_message(['how', 'weather']) or check_message(['hows', 'weather']): 34 | weather.weather(city_name, city_code) 35 | 36 | elif check_message(['business', 'news']): 37 | business_news_reader.news_reader() 38 | 39 | elif check_message(['define']): 40 | define_subject.define_subject(speech_text) 41 | 42 | elif check_message(['time']): 43 | tell_time.what_is_time() 44 | 45 | elif check_message(['connect', 'proxy']): 46 | connect_proxy.connect_to_proxy(proxy_username, proxy_password) 47 | 48 | elif check_message(['play', 'music']) or check_message(['music']): 49 | play_music.play_random(music_path) 50 | 51 | elif check_message(['play']): 52 | play_music.play_specific_music(speech_text, music_path) 53 | 54 | elif check_message(['party', 'time']) or check_message(['party', 'mix']): 55 | play_music.play_shuffle(music_path) 56 | 57 | elif check_message(['open', 'firefox']): 58 | open_firefox.open_firefox() 59 | 60 | elif check_message(['note']): 61 | notes.note_something(speech_text) 62 | 63 | elif check_message(['all', 'notes']) or check_message(['notes']): 64 | notes.show_all_notes() 65 | 66 | elif check_message(['sleep']): 67 | sleep.go_to_sleep() 68 | 69 | else: 70 | general_conversations.undefined() 71 | -------------------------------------------------------------------------------- /Pant_Ch06_Developing_a_Note_Taking_Application/Pant_Ch06_Developing_a_Note_Taking_Application/Melissa-Core/main.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import yaml 4 | import speech_recognition as sr 5 | 6 | from brain import brain 7 | from GreyMatter import play_music 8 | from GreyMatter.SenseCells.tts import tts 9 | 10 | profile = open('profile.yaml') 11 | profile_data = yaml.safe_load(profile) 12 | profile.close() 13 | 14 | # Functioning Variables 15 | name = profile_data['name'] 16 | city_name = profile_data['city_name'] 17 | city_code = profile_data['city_code'] 18 | proxy_username = profile_data['proxy_username'] 19 | proxy_password = profile_data['proxy_password'] 20 | music_path = profile_data['music_path'] 21 | 22 | tts('Welcome ' + name + ', systems are now ready to run. How can I help you?') 23 | 24 | def main(): 25 | r = sr.Recognizer() 26 | with sr.Microphone() as source: 27 | print("Say something!") 28 | audio = r.listen(source) 29 | 30 | try: 31 | speech_text = r.recognize_google(audio).lower().replace("'", "") 32 | print("Melissa thinks you said '" + speech_text + "'") 33 | except sr.UnknownValueError: 34 | print("Melissa could not understand audio") 35 | except sr.RequestError as e: 36 | print("Could not request results from Google Speech Recognition service; {0}".format(e)) 37 | 38 | play_music.mp3gen(music_path) 39 | brain(name, speech_text, music_path, city_name, city_code, proxy_username, proxy_password) 40 | 41 | main() 42 | 43 | main() 44 | -------------------------------------------------------------------------------- /Pant_Ch06_Developing_a_Note_Taking_Application/Pant_Ch06_Developing_a_Note_Taking_Application/Melissa-Core/memory.db.default: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/building-a-virtual-assistant-for-raspberry-pi/5ce696e268fc19bfc319b785b1511be07baffc66/Pant_Ch06_Developing_a_Note_Taking_Application/Pant_Ch06_Developing_a_Note_Taking_Application/Melissa-Core/memory.db.default -------------------------------------------------------------------------------- /Pant_Ch06_Developing_a_Note_Taking_Application/Pant_Ch06_Developing_a_Note_Taking_Application/Melissa-Core/profile.yaml.default: -------------------------------------------------------------------------------- 1 | name: 2 | Tanay 3 | city_name: 4 | New Delhi 5 | city_code: 6 | INXX0096 7 | proxy_username: 8 | Something 9 | proxy_password: 10 | Something 11 | music_path: 12 | . -------------------------------------------------------------------------------- /Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Melissa-Core/GreyMatter/SenseCells/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/building-a-virtual-assistant-for-raspberry-pi/5ce696e268fc19bfc319b785b1511be07baffc66/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Melissa-Core/GreyMatter/SenseCells/__init__.py -------------------------------------------------------------------------------- /Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Melissa-Core/GreyMatter/SenseCells/tts.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | def tts(message): 5 | """ 6 | This function takes a message as an argument and converts it to speech depending on the OS. 7 | """ 8 | if sys.platform == 'darwin': 9 | tts_engine = 'say' 10 | return os.system(tts_engine + ' ' + message) 11 | elif sys.platform == 'linux2' or sys.platform == 'linux': 12 | tts_engine = 'espeak' 13 | return os.system(tts_engine + ' "' + message + '"') 14 | -------------------------------------------------------------------------------- /Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Melissa-Core/GreyMatter/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/building-a-virtual-assistant-for-raspberry-pi/5ce696e268fc19bfc319b785b1511be07baffc66/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Melissa-Core/GreyMatter/__init__.py -------------------------------------------------------------------------------- /Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Melissa-Core/GreyMatter/business_news_reader.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from bs4 import BeautifulSoup 3 | 4 | from SenseCells.tts import tts 5 | 6 | # NDTV News 7 | fixed_url = 'http://profit.ndtv.com/news/latest/' 8 | news_headlines_list = [] 9 | news_details_list = [] 10 | 11 | for i in range(1, 2): 12 | changing_slug = '/page-' + str(i) 13 | url = fixed_url + changing_slug 14 | r = requests.get(url) 15 | data = r.text 16 | 17 | soup = BeautifulSoup(data, "html.parser") 18 | 19 | for news_headlines in soup.find_all('h2'): 20 | news_headlines_list.append(news_headlines.get_text()) 21 | 22 | del news_headlines_list[-2:] 23 | 24 | for news_details in soup.find_all('p', 'intro'): 25 | news_details_list.append(news_details.get_text()) 26 | 27 | news_headlines_list_small = [element.lower().replace("(", "").replace(")", "").replace("'", "") for element in news_headlines_list] 28 | news_details_list_small = [element.lower().replace("(", "").replace(")", "").replace("'", "") for element in news_details_list] 29 | 30 | news_dictionary = dict(zip(news_headlines_list_small, news_details_list_small)) 31 | 32 | def news_reader(): 33 | for key, value in news_dictionary.items(): 34 | tts('Headline, ' + key) 35 | tts('News, ' + value) 36 | -------------------------------------------------------------------------------- /Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Melissa-Core/GreyMatter/connect_proxy.py: -------------------------------------------------------------------------------- 1 | from selenium import webdriver 2 | 3 | from SenseCells.tts import tts 4 | 5 | def connect_to_proxy(proxy_username, proxy_password): 6 | tts("Connecting to proxy server.") 7 | browser = webdriver.Firefox() 8 | browser.get('http://10.1.1.9:8090/httpclient.html') 9 | 10 | id_number = browser.find_element_by_name('username') 11 | password = browser.find_element_by_name('password') 12 | 13 | id_number.send_keys(proxy_username) 14 | password.send_keys(proxy_password) 15 | 16 | browser.find_element_by_name('btnSubmit').click() 17 | -------------------------------------------------------------------------------- /Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Melissa-Core/GreyMatter/define_subject.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | import wikipedia 4 | 5 | from SenseCells.tts import tts 6 | 7 | def define_subject(speech_text): 8 | words_of_message = speech_text.split() 9 | words_of_message.remove('define') 10 | cleaned_message = ' '.join(words_of_message) 11 | 12 | try: 13 | wiki_data = wikipedia.summary(cleaned_message, sentences=5) 14 | 15 | regEx = re.compile(r'([^\(]*)\([^\)]*\) *(.*)') 16 | m = regEx.match(wiki_data) 17 | while m: 18 | wiki_data = m.group(1) + m.group(2) 19 | m = regEx.match(wiki_data) 20 | 21 | wiki_data = wiki_data.replace("'", "") 22 | tts(wiki_data) 23 | except wikipedia.exceptions.DisambiguationError as e: 24 | tts('Can you please be more specific? You may choose something from the following.') 25 | print("Can you please be more specific? You may choose something from the following; {0}".format(e)) 26 | -------------------------------------------------------------------------------- /Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Melissa-Core/GreyMatter/general_conversations.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | from SenseCells.tts import tts 4 | 5 | def who_are_you(): 6 | messages = ['I am Melissa, your lovely personal assistant.', 7 | 'Melissa, didnt I tell you before?', 8 | 'You ask that so many times! I am Melissa.'] 9 | tts(random.choice(messages)) 10 | 11 | def how_am_i(): 12 | replies =['You are goddamn handsome!', 'My knees go weak when I see you.', 'You are sexy!', 'You look like the kindest person that I have met.'] 13 | tts(random.choice(replies)) 14 | 15 | def tell_joke(): 16 | jokes = ['What happens to a frogs car when it breaks down? It gets toad away.', 'Why was six scared of seven? Because seven ate nine.', 'What is the difference between snowmen and snowwomen? Snowballs.', 'No, I always forget the punch line.'] 17 | tts(random.choice(jokes)) 18 | 19 | def who_am_i(name): 20 | tts('You are ' + name + ', a brilliant person. I love you!') 21 | 22 | def where_born(): 23 | tts('I was created by a magician named Tanay, in India, the magical land of himalayas.') 24 | 25 | def how_are_you(): 26 | tts('I am fine, thank you.') 27 | 28 | def undefined(): 29 | tts('I dont know what that means!') 30 | -------------------------------------------------------------------------------- /Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Melissa-Core/GreyMatter/imgur_handler.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sqlite3 3 | from datetime import datetime 4 | 5 | from imgurpython import ImgurClient 6 | 7 | from SenseCells.tts import tts 8 | 9 | def img_list_gen(images_path): 10 | 11 | image_list = [] 12 | for root, dirs, files in os.walk(images_path): 13 | for filename in files: 14 | if os.path.splitext(filename)[1] == ".tiff" or os.path.splitext(filename)[1] == ".png" or os.path.splitext(filename)[1] == ".gif" or os.path.splitext(filename)[1] == ".jpg": 15 | image_list.append(os.path.join(root, filename.lower())) 16 | return image_list 17 | 18 | def image_uploader(speech_text, client_id, client_secret, images_path): 19 | 20 | words_of_message = speech_text.split() 21 | words_of_message.remove('upload') 22 | cleaned_message = ' '.join(words_of_message) 23 | 24 | image_listing = img_list_gen(images_path) 25 | 26 | client = ImgurClient(client_id, client_secret) 27 | 28 | for i in range(0, len(image_listing)): 29 | if cleaned_message in image_listing[i]: 30 | result = client.upload_from_path(image_listing[i], config=None, anon=True) 31 | 32 | conn = sqlite3.connect('memory.db') 33 | conn.execute("INSERT INTO image_uploads (filename, url, upload_date) VALUES (?, ?, ?)", (image_listing[i], result['link'], datetime.strftime(datetime.now(), '%d-%m-%Y'))) 34 | conn.commit() 35 | conn.close() 36 | 37 | print result['link'] 38 | tts('Your image has been uploaded') 39 | 40 | def show_all_uploads(): 41 | conn = sqlite3.connect('memory.db') 42 | 43 | cursor = conn.execute("SELECT * FROM image_uploads") 44 | 45 | for row in cursor: 46 | print(row[0] + ': (' + row[1] + ') on ' + row[2]) 47 | 48 | tts('Requested data has been printed on your terminal') 49 | 50 | conn.close() 51 | -------------------------------------------------------------------------------- /Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Melissa-Core/GreyMatter/notes.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | from datetime import datetime 3 | 4 | from SenseCells.tts import tts 5 | 6 | def show_all_notes(): 7 | conn = sqlite3.connect('memory.db') 8 | tts('Your notes are as follows:') 9 | 10 | cursor = conn.execute("SELECT notes FROM notes") 11 | 12 | for row in cursor: 13 | tts(row[0]) 14 | 15 | conn.close() 16 | 17 | def note_something(speech_text): 18 | conn = sqlite3.connect('memory.db') 19 | words_of_message = speech_text.split() 20 | words_of_message.remove('note') 21 | cleaned_message = ' '.join(words_of_message) 22 | 23 | conn.execute("INSERT INTO notes (notes, notes_date) VALUES (?, ?)", (cleaned_message, datetime.strftime(datetime.now(), '%d-%m-%Y'))) 24 | conn.commit() 25 | conn.close() 26 | 27 | tts('Your note has been saved.') 28 | -------------------------------------------------------------------------------- /Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Melissa-Core/GreyMatter/open_firefox.py: -------------------------------------------------------------------------------- 1 | from selenium import webdriver 2 | 3 | from SenseCells.tts import tts 4 | 5 | def open_firefox(): 6 | tts('Aye aye captain, opening Firefox') 7 | webdriver.Firefox() 8 | -------------------------------------------------------------------------------- /Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Melissa-Core/GreyMatter/play_music.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import random 4 | 5 | from SenseCells.tts import tts 6 | 7 | def mp3gen(music_path): 8 | """ 9 | This function finds all the mp3 files in a folder and it's subfolders and returns a list. 10 | """ 11 | music_list = [] 12 | for root, dirs, files in os.walk(music_path): 13 | for filename in files: 14 | if os.path.splitext(filename)[1] == ".mp3": 15 | music_list.append(os.path.join(root, filename.lower())) 16 | return music_list 17 | 18 | def music_player(file_name): 19 | """ 20 | This function takes the name of a music file as an argument and plays it depending on the OS. 21 | """ 22 | if sys.platform == 'darwin': 23 | player = "afplay '" + file_name + "'" 24 | return os.system(player) 25 | elif sys.platform == 'linux2' or sys.platform == 'linux': 26 | player = "mpg123 '" + file_name + "'" 27 | return os.system(player) 28 | 29 | def play_random(music_path): 30 | try: 31 | music_listing = mp3gen(music_path) 32 | music_playing = random.choice(music_listing) 33 | tts("Now playing: " + music_playing) 34 | music_player(music_playing) 35 | except IndexError as e: 36 | tts('No music files found.') 37 | print("No music files found: {0}".format(e)) 38 | 39 | def play_specific_music(speech_text, music_path): 40 | words_of_message = speech_text.split() 41 | words_of_message.remove('play') 42 | cleaned_message = ' '.join(words_of_message) 43 | music_listing = mp3gen(music_path) 44 | 45 | for i in range(0, len(music_listing)): 46 | if cleaned_message in music_listing[i]: 47 | music_player(music_listing[i]) 48 | 49 | def play_shuffle(music_path): 50 | try: 51 | music_listing = mp3gen(music_path) 52 | random.shuffle(music_listing) 53 | for i in range(0, len(music_listing)): 54 | music_player(music_listing[i]) 55 | except IndexError as e: 56 | tts('No music files found.') 57 | print("No music files found: {0}".format(e)) 58 | -------------------------------------------------------------------------------- /Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Melissa-Core/GreyMatter/sleep.py: -------------------------------------------------------------------------------- 1 | from SenseCells.tts import tts 2 | 3 | def go_to_sleep(): 4 | tts('Goodbye! Have a great day!') 5 | quit() 6 | -------------------------------------------------------------------------------- /Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Melissa-Core/GreyMatter/tell_time.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | 3 | from SenseCells.tts import tts 4 | 5 | def what_is_time(): 6 | tts("The time is " + datetime.strftime(datetime.now(), '%H:%M:%S')) 7 | -------------------------------------------------------------------------------- /Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Melissa-Core/GreyMatter/twitter_interaction.py: -------------------------------------------------------------------------------- 1 | import tweepy 2 | 3 | from SenseCells.tts import tts 4 | 5 | def post_tweet(speech_text, consumer_key, consumer_secret, access_token, access_token_secret): 6 | 7 | words_of_message = speech_text.split() 8 | words_of_message.remove('tweet') 9 | cleaned_message = ' '.join(words_of_message).capitalize() 10 | 11 | auth = tweepy.OAuthHandler(consumer_key, consumer_secret) 12 | auth.set_access_token(access_token, access_token_secret) 13 | 14 | api = tweepy.API(auth) 15 | api.update_status(status=cleaned_message) 16 | 17 | tts('Your tweet has been posted') 18 | 19 | -------------------------------------------------------------------------------- /Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Melissa-Core/GreyMatter/twitter_pull.py: -------------------------------------------------------------------------------- 1 | import tweepy 2 | 3 | from SenseCells.tts import tts 4 | 5 | def my_tweets(): 6 | # This needs formatting, not currently fit to be run. 7 | tts('Loading your tweets, ' + name) 8 | auth = OAuthHandler(consumer_key, consumer_secret) 9 | auth.set_access_token(access_token, access_token_secret) 10 | api = tweepy.API(auth) 11 | timeline = api.user_timeline(count=10, include_rts=True) -------------------------------------------------------------------------------- /Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Melissa-Core/GreyMatter/weather.py: -------------------------------------------------------------------------------- 1 | import pywapi 2 | 3 | from SenseCells.tts import tts 4 | 5 | def weather(city_name, city_code): 6 | weather_com_result = pywapi.get_weather_from_weather_com(city_code) 7 | weather_result = "Weather.com says: It is " + weather_com_result['current_conditions']['text'].lower() + " and " + weather_com_result['current_conditions']['temperature'] + "degree celcius now in " + city_name 8 | tts(weather_result) 9 | -------------------------------------------------------------------------------- /Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Melissa-Core/LICENSE.md: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Tanay Pant 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Melissa-Core/brain.py: -------------------------------------------------------------------------------- 1 | from GreyMatter import notes, define_subject, tell_time, general_conversations, twitter_pull, play_music, weather, connect_proxy, open_firefox, sleep, business_news_reader, twitter_interaction, imgur_handler 2 | 3 | def brain(name, speech_text, music_path, city_name, city_code, proxy_username, proxy_password, consumer_key, consumer_secret, access_token, access_token_secret, client_id, client_secret, images_path): 4 | def check_message(check): 5 | """ 6 | This function checks if the items in the list (specified in argument) are present in the user's input speech. 7 | """ 8 | 9 | words_of_message = speech_text.split() 10 | if set(check).issubset(set(words_of_message)): 11 | return True 12 | else: 13 | return False 14 | 15 | if check_message(['who','are', 'you']): 16 | general_conversations.who_are_you() 17 | 18 | elif check_message(['tweet']): 19 | twitter_interaction.post_tweet(speech_text, consumer_key, consumer_secret, access_token, access_token_secret) 20 | 21 | elif check_message(['business', 'news']): 22 | business_news_reader.news_reader() 23 | 24 | elif check_message(['how', 'i', 'look']) or check_message(['how', 'am', 'i']): 25 | general_conversations.how_am_i() 26 | 27 | elif check_message(['all', 'note']) or check_message(['all', 'notes']) or check_message(['notes']): 28 | notes.show_all_notes() 29 | 30 | elif check_message(['note']): 31 | notes.note_something(speech_text) 32 | 33 | elif check_message(['define']): 34 | define_subject.define_subject(speech_text) 35 | 36 | elif check_message(['tell', 'joke']): 37 | general_conversations.tell_joke() 38 | 39 | elif check_message(['who', 'am', 'i']): 40 | general_conversations.who_am_i(name) 41 | 42 | elif check_message(['where', 'born']): 43 | general_conversations.where_born() 44 | 45 | elif check_message(['how', 'are', 'you']): 46 | general_conversations.how_are_you() 47 | 48 | elif check_message(['my', 'tweets']): 49 | twitter_pull.my_tweets() 50 | 51 | elif check_message(['party', 'time']) or check_message(['party', 'mix']): 52 | play_music.play_shuffle(music_path) 53 | 54 | elif check_message(['play', 'music']) or check_message(['music']): 55 | play_music.play_random(music_path) 56 | 57 | elif check_message(['play']): 58 | play_music.play_specific_music(speech_text, music_path) 59 | 60 | elif check_message(['how', 'weather']) or check_message(['hows', 'weather']): 61 | weather.weather(city_name, city_code) 62 | 63 | elif check_message(['connect', 'proxy']): 64 | connect_proxy.connect_to_proxy(proxy_username, proxy_password) 65 | 66 | elif check_message(['open', 'firefox']): 67 | open_firefox.open_firefox() 68 | 69 | elif check_message(['time']): 70 | tell_time.what_is_time() 71 | 72 | elif check_message(['upload']): 73 | imgur_handler.image_uploader(speech_text, client_id, client_secret, images_path) 74 | 75 | elif check_message(['all', 'uploads']) or check_message(['all', 'images']) or check_message(['uploads']): 76 | imgur_handler.show_all_uploads() 77 | 78 | elif check_message(['sleep']): 79 | sleep.go_to_sleep() 80 | 81 | else: 82 | general_conversations.undefined() 83 | -------------------------------------------------------------------------------- /Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Melissa-Core/main.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import time 3 | import audioop 4 | import tempfile 5 | 6 | import wave 7 | import yaml 8 | import pyaudio 9 | import speech_recognition as sr 10 | 11 | from brain import brain 12 | from GreyMatter import play_music, imgur_handler 13 | from GreyMatter.SenseCells.tts import tts 14 | 15 | profile = open('profile.yaml') 16 | profile_data = yaml.safe_load(profile) 17 | profile.close() 18 | 19 | # Functioning Variables 20 | name = profile_data['name'] 21 | music_path = profile_data['music_path'] 22 | images_path = profile_data['images_path'] 23 | city_name = profile_data['city_name'] 24 | city_code = profile_data['city_code'] 25 | proxy_username = profile_data['proxy_username'] 26 | proxy_password = profile_data['proxy_password'] 27 | access_token = profile_data['twitter']['access_token'] 28 | access_token_secret = profile_data['twitter']['access_token_secret'] 29 | consumer_key = profile_data['twitter']['consumer_key'] 30 | consumer_secret = profile_data['twitter']['consumer_secret'] 31 | client_id = profile_data['imgur']['client_id'] 32 | client_secret = profile_data['imgur']['client_secret'] 33 | 34 | tts('Welcome ' + name + ', systems are now ready to run. How can I help you?') 35 | 36 | # Thanks to Jasper for passive code snippet. 37 | 38 | _audio = pyaudio.PyAudio() 39 | 40 | def getScore(data): 41 | rms = audioop.rms(data, 2) 42 | score = rms / 3 43 | return score 44 | 45 | def fetchThreshold(): 46 | THRESHOLD_MULTIPLIER = 1.8 47 | RATE = 16000 48 | CHUNK = 1024 49 | THRESHOLD_TIME = 1 50 | 51 | stream = _audio.open(format=pyaudio.paInt16,channels=1,rate=RATE,input=True,frames_per_buffer=CHUNK) 52 | 53 | frames = [] 54 | lastN = [i for i in range(20)] 55 | 56 | for i in range(0, RATE / CHUNK * THRESHOLD_TIME): 57 | data = stream.read(CHUNK) 58 | frames.append(data) 59 | 60 | lastN.pop(0) 61 | lastN.append(getScore(data)) 62 | average = sum(lastN) / len(lastN) 63 | 64 | stream.stop_stream() 65 | stream.close() 66 | 67 | THRESHOLD = average * THRESHOLD_MULTIPLIER 68 | return THRESHOLD 69 | 70 | 71 | def passiveListen(): 72 | THRESHOLD_MULTIPLIER = 1.8 73 | RATE = 16000 74 | CHUNK = 1024 75 | THRESHOLD_TIME = 1 76 | LISTEN_TIME = 300 77 | 78 | stream = _audio.open(format=pyaudio.paInt16, 79 | channels=1, 80 | rate=RATE, 81 | input=True, 82 | frames_per_buffer=CHUNK) 83 | 84 | frames = [] 85 | lastN = [i for i in range(30)] 86 | 87 | for i in range(0, RATE / CHUNK * THRESHOLD_TIME): 88 | data = stream.read(CHUNK) 89 | frames.append(data) 90 | 91 | lastN.pop(0) 92 | lastN.append(getScore(data)) 93 | average = sum(lastN) / len(lastN) 94 | 95 | THRESHOLD = average * THRESHOLD_MULTIPLIER 96 | frames = [] 97 | didDetect = False 98 | 99 | for i in range(0, RATE / CHUNK * LISTEN_TIME): 100 | data = stream.read(CHUNK) 101 | frames.append(data) 102 | score = getScore(data) 103 | 104 | if score > THRESHOLD: 105 | didDetect = True 106 | stream.stop_stream() 107 | stream.close() 108 | time.sleep(1) 109 | tts('Yes?') 110 | main() 111 | 112 | if not didDetect: 113 | print "No disturbance detected" 114 | stream.stop_stream() 115 | stream.close() 116 | 117 | def main(): 118 | try: 119 | if sys.argv[1] == '--text' or sys.argv[1] == '-t': 120 | text_mode = True 121 | speech_text = raw_input("Write something: ").lower().replace("'", "") 122 | except IndexError: 123 | text_mode = False 124 | r = sr.Recognizer() 125 | with sr.Microphone() as source: 126 | print("Say something!") 127 | audio = r.listen(source) 128 | 129 | try: 130 | speech_text = r.recognize_google(audio).lower().replace("'", "") 131 | print("Melissa thinks you said '" + speech_text + "'") 132 | except sr.UnknownValueError: 133 | print("Melissa could not understand audio") 134 | except sr.RequestError as e: 135 | print("Could not request results from Google Speech Recognition service; {0}".format(e)) 136 | 137 | play_music.mp3gen(music_path) 138 | imgur_handler.img_list_gen(images_path) 139 | 140 | brain(name, speech_text, music_path, city_name, city_code, proxy_username, proxy_password, consumer_key, consumer_secret, access_token, access_token_secret, client_id, client_secret, images_path) 141 | 142 | if text_mode: 143 | main() 144 | else: 145 | passiveListen() 146 | 147 | main() 148 | -------------------------------------------------------------------------------- /Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Melissa-Core/memory.db.default: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/building-a-virtual-assistant-for-raspberry-pi/5ce696e268fc19bfc319b785b1511be07baffc66/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Melissa-Core/memory.db.default -------------------------------------------------------------------------------- /Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Pant_Ch07_Building_a_Voice_Controlled_Interface_for_Twitter_and_Imgur/Melissa-Core/profile.yaml.default: -------------------------------------------------------------------------------- 1 | name: 2 | Tanay 3 | music_path: 4 | . 5 | images_path: 6 | . 7 | city_name: 8 | New Delhi 9 | city_code: 10 | INXX0096 11 | proxy_username: 12 | Something 13 | proxy_password: 14 | Something 15 | twitter: 16 | access_token: 17 | Something 18 | access_token_secret: 19 | Something 20 | consumer_key: 21 | Something 22 | consumer_secret: 23 | Something 24 | imgur: 25 | client_id: 26 | Something 27 | client_secret: 28 | Something -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/GreyMatter/SenseCells/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/building-a-virtual-assistant-for-raspberry-pi/5ce696e268fc19bfc319b785b1511be07baffc66/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/GreyMatter/SenseCells/__init__.py -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/GreyMatter/SenseCells/tts.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | def tts(message): 5 | """ 6 | This function takes a message as an argument and converts it to speech depending on the OS. 7 | """ 8 | if sys.platform == 'darwin': 9 | tts_engine = 'say' 10 | return os.system(tts_engine + ' ' + message) 11 | elif sys.platform == 'linux2' or sys.platform == 'linux': 12 | tts_engine = 'espeak' 13 | return os.system(tts_engine + ' "' + message + '"') 14 | -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/GreyMatter/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/building-a-virtual-assistant-for-raspberry-pi/5ce696e268fc19bfc319b785b1511be07baffc66/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/GreyMatter/__init__.py -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/GreyMatter/connect_proxy.py: -------------------------------------------------------------------------------- 1 | from selenium import webdriver 2 | 3 | from SenseCells.tts import tts 4 | 5 | def connect_to_proxy(proxy_username, proxy_password): 6 | tts("Connecting to proxy server.") 7 | browser = webdriver.Firefox() 8 | browser.get('http://10.1.1.9:8090/httpclient.html') 9 | 10 | id_number = browser.find_element_by_name('username') 11 | password = browser.find_element_by_name('password') 12 | 13 | id_number.send_keys(proxy_username) 14 | password.send_keys(proxy_password) 15 | 16 | browser.find_element_by_name('btnSubmit').click() 17 | -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/GreyMatter/define_subject.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | import wikipedia 4 | 5 | from SenseCells.tts import tts 6 | 7 | def define_subject(speech_text): 8 | words_of_message = speech_text.split() 9 | words_of_message.remove('define') 10 | cleaned_message = ' '.join(words_of_message) 11 | 12 | try: 13 | wiki_data = wikipedia.summary(cleaned_message, sentences=5) 14 | 15 | regEx = re.compile(r'([^\(]*)\([^\)]*\) *(.*)') 16 | m = regEx.match(wiki_data) 17 | while m: 18 | wiki_data = m.group(1) + m.group(2) 19 | m = regEx.match(wiki_data) 20 | 21 | wiki_data = wiki_data.replace("'", "") 22 | tts(wiki_data) 23 | except wikipedia.exceptions.DisambiguationError as e: 24 | tts('Can you please be more specific? You may choose something from the following.') 25 | print("Could not request results from Google Speech Recognition service; {0}".format(e)) 26 | -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/GreyMatter/general_conversations.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | from SenseCells.tts import tts 4 | 5 | def who_are_you(): 6 | messages = ['I am Melissa, your lovely personal assistant.', 7 | 'Melissa, didnt I tell you before?', 8 | 'You ask that so many times! I am Melissa.'] 9 | tts(random.choice(messages)) 10 | 11 | def how_am_i(): 12 | replies =['You are goddamn handsome!', 'My knees go weak when I see you.', 'You are sexy!', 'You look like the kindest person that I have met.'] 13 | tts(random.choice(replies)) 14 | 15 | def tell_joke(): 16 | jokes = ['What happens to a frogs car when it breaks down? It gets toad away.', 'Why was six scared of seven? Because seven ate nine.', 'What is the difference between snowmen and snowwomen? Snowballs.', 'No, I always forget the punch line.'] 17 | tts(random.choice(jokes)) 18 | 19 | def who_am_i(name): 20 | tts('You are ' + name + ', a brilliant person. I love you!') 21 | 22 | def where_born(): 23 | tts('I was created by a magician named Tanay, in India, the magical land of himalayas.') 24 | 25 | def how_are_you(): 26 | tts('I am fine, thank you.') 27 | 28 | def undefined(): 29 | tts('I dont know what that means!') 30 | -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/GreyMatter/notes.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | from datetime import datetime 3 | 4 | from SenseCells.tts import tts 5 | 6 | def show_all_notes(): 7 | conn = sqlite3.connect('memory.db') 8 | tts('Your notes are as follows:') 9 | 10 | cursor = conn.execute("SELECT notes FROM notes") 11 | 12 | for row in cursor: 13 | tts(row[0]) 14 | 15 | conn.commit() 16 | conn.close() 17 | 18 | def note_something(speech_text): 19 | conn = sqlite3.connect('memory.db') 20 | words_of_message = speech_text.split() 21 | words_of_message.remove('note') 22 | cleaned_message = ' '.join(words_of_message) 23 | 24 | conn.execute("INSERT INTO notes (notes, notes_date) VALUES (?, ?)", (cleaned_message, datetime.strftime(datetime.now(), '%d-%m-%Y'))) 25 | conn.commit() 26 | conn.close() 27 | 28 | tts('Your note has been saved.') -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/GreyMatter/open_firefox.py: -------------------------------------------------------------------------------- 1 | from selenium import webdriver 2 | 3 | from SenseCells.tts import tts 4 | 5 | def open_firefox(): 6 | tts('Aye aye captain, opening Firefox') 7 | webdriver.Firefox() 8 | -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/GreyMatter/play_music.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import random 4 | 5 | from SenseCells.tts import tts 6 | 7 | def mp3gen(music_path): 8 | """ 9 | This function finds all the mp3 files in a folder and it's subfolders and returns a list. 10 | """ 11 | music_list = [] 12 | for root, dirs, files in os.walk(music_path): 13 | for filename in files: 14 | if os.path.splitext(filename)[1] == ".mp3": 15 | music_list.append(os.path.join(root, filename.lower())) 16 | return music_list 17 | 18 | def music_player(file_name): 19 | """ 20 | This function takes the name of a music file as an argument and plays it depending on the OS. 21 | """ 22 | if sys.platform == 'darwin': 23 | player = "afplay '" + file_name + "'" 24 | return os.system(player) 25 | elif sys.platform == 'linux2' or sys.platform == 'linux': 26 | player = "mpg123 '" + file_name + "'" 27 | return os.system(player) 28 | 29 | def play_random(music_path): 30 | try: 31 | music_listing = mp3gen(music_path) 32 | music_playing = random.choice(music_listing) 33 | tts("Now playing: " + music_playing) 34 | music_player(music_playing) 35 | except IndexError as e: 36 | tts('No music files found.') 37 | print("No music files found: {0}".format(e)) 38 | 39 | def play_specific_music(speech_text, music_path): 40 | words_of_message = speech_text.split() 41 | words_of_message.remove('play') 42 | cleaned_message = ' '.join(words_of_message) 43 | music_listing = mp3gen(music_path) 44 | 45 | for i in range(0, len(music_listing)): 46 | if cleaned_message in music_listing[i]: 47 | music_player(music_listing[i]) -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/GreyMatter/sleep.py: -------------------------------------------------------------------------------- 1 | from SenseCells.tts import tts 2 | 3 | def go_to_sleep(): 4 | tts('Goodbye! Have a great day!') 5 | quit() 6 | -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/GreyMatter/tell_time.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | 3 | from SenseCells.tts import tts 4 | 5 | def what_is_time(): 6 | tts("The time is " + datetime.strftime(datetime.now(), '%H:%M:%S')) 7 | -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/GreyMatter/twitter_pull.py: -------------------------------------------------------------------------------- 1 | import tweepy 2 | 3 | from SenseCells.tts import tts 4 | 5 | def my_tweets(): 6 | # This needs formatting, not currently fit to be run. 7 | tts('Loading your tweets, ' + name) 8 | auth = OAuthHandler(consumer_key, consumer_secret) 9 | auth.set_access_token(access_token, access_token_secret) 10 | api = tweepy.API(auth) 11 | timeline = api.user_timeline(count=10, include_rts=True) -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/GreyMatter/weather.py: -------------------------------------------------------------------------------- 1 | import pywapi 2 | 3 | from SenseCells.tts import tts 4 | 5 | def weather(city_name, city_code): 6 | weather_com_result = pywapi.get_weather_from_weather_com(city_code) 7 | weather_result = "Weather.com says: It is " + weather_com_result['current_conditions']['text'].lower() + " and " + weather_com_result['current_conditions']['temperature'] + "degree celcius now in " + city_name 8 | tts(weather_result) 9 | -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/LICENSE.md: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Tanay Pant 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/brain.py: -------------------------------------------------------------------------------- 1 | from GreyMatter import notes, define_subject, tell_time, general_conversations, twitter_pull, play_music, weather, connect_proxy, open_firefox, sleep 2 | 3 | def brain(name, speech_text, music_path, city_name, city_code, proxy_username, proxy_password): 4 | def check_message(check): 5 | """ 6 | This function checks if the items in the list (specified in argument) are present in the user's input speech. 7 | """ 8 | 9 | words_of_message = speech_text.split() 10 | if set(check).issubset(set(words_of_message)): 11 | return True 12 | else: 13 | return False 14 | 15 | if check_message(['who','are', 'you']): 16 | general_conversations.who_are_you() 17 | 18 | elif check_message(['how', 'i', 'look']) or check_message(['how', 'am', 'i']): 19 | general_conversations.how_am_i() 20 | 21 | elif check_message(['all', 'note']) or check_message(['all', 'notes']) or check_message(['notes']): 22 | notes.show_all_notes() 23 | 24 | elif check_message(['note']): 25 | notes.note_something(speech_text) 26 | 27 | elif check_message(['define']): 28 | define_subject.define_subject(speech_text) 29 | 30 | elif check_message(['time']): 31 | tell_time.what_is_time() 32 | 33 | elif check_message(['tell', 'joke']): 34 | general_conversations.tell_joke() 35 | 36 | elif check_message(['who', 'am', 'i']): 37 | general_conversations.who_am_i(name) 38 | 39 | elif check_message(['where', 'born']): 40 | general_conversations.where_born() 41 | 42 | elif check_message(['how', 'are', 'you']): 43 | general_conversations.how_are_you() 44 | 45 | elif check_message(['my', 'tweets']): 46 | twitter_pull.my_tweets() 47 | 48 | elif check_message(['play', 'music']) or check_message(['music']): 49 | play_music.play_random(music_path) 50 | 51 | elif check_message(['play']): 52 | play_music.play_specific_music(speech_text, music_path) 53 | 54 | elif check_message(['how', 'weather']) or check_message(['hows', 'weather']): 55 | weather.weather(city_name, city_code) 56 | 57 | elif check_message(['connect', 'proxy']): 58 | connect_proxy.connect_to_proxy(proxy_username, proxy_password) 59 | 60 | elif check_message(['open', 'firefox']): 61 | open_firefox.open_firefox() 62 | 63 | elif check_message(['sleep']): 64 | sleep.go_to_sleep() 65 | 66 | else: 67 | general_conversations.undefined() 68 | -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import time 4 | 5 | import yaml 6 | import pyaudio 7 | import speech_recognition as sr 8 | 9 | from brain import brain 10 | from GreyMatter import play_music 11 | from GreyMatter.SenseCells.tts import tts 12 | 13 | profile = open('profile.yaml') 14 | profile_data = yaml.safe_load(profile) 15 | profile.close() 16 | 17 | # Functioning Variables 18 | name = profile_data['name'] 19 | music_path = profile_data['music_path'] 20 | city_name = profile_data['city_name'] 21 | city_code = profile_data['city_code'] 22 | proxy_username = profile_data['proxy_username'] 23 | proxy_password = profile_data['proxy_password'] 24 | access_token = profile_data['twitter']['access_token'] 25 | access_token_secret = profile_data['twitter']['access_token_secret'] 26 | consumer_key = profile_data['twitter']['consumer_key'] 27 | consumer_secret = profile_data['twitter']['consumer_secret'] 28 | 29 | voice_file = os.getcwd() + '/uploads/' + sys.argv[1] 30 | 31 | def main(voice_file): 32 | r = sr.Recognizer() 33 | with sr.WavFile(voice_file) as source: 34 | audio = r.record(source) 35 | 36 | try: 37 | speech_text = r.recognize_google(audio).lower().replace("'", "") 38 | print("Melissa thinks you said '" + speech_text + "'") 39 | except sr.UnknownValueError: 40 | print("Melissa could not understand audio") 41 | except sr.RequestError as e: 42 | print("Could not request results from Google Speech Recognition service; {0}".format(e)) 43 | 44 | play_music.mp3gen(music_path) 45 | brain(name, speech_text, music_path, city_name, city_code, proxy_username, proxy_password) 46 | 47 | main(voice_file) 48 | -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/memory.db.default: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/building-a-virtual-assistant-for-raspberry-pi/5ce696e268fc19bfc319b785b1511be07baffc66/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/memory.db.default -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/profile.yaml.default: -------------------------------------------------------------------------------- 1 | name: 2 | Tanay 3 | music_path: 4 | . 5 | city_name: 6 | New Delhi 7 | city_code: 8 | INXX0096 9 | proxy_username: 10 | Something 11 | proxy_password: 12 | Something 13 | twitter: 14 | access_token: 15 | Something 16 | access_token_secret: 17 | Something 18 | consumer_key: 19 | Something 20 | consumer_secret: 21 | Something -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/static/audiodisplay.js: -------------------------------------------------------------------------------- 1 | function drawBuffer( width, height, context, data ) { 2 | var step = Math.ceil( data.length / width ); 3 | var amp = height / 2; 4 | context.fillStyle = "silver"; 5 | context.clearRect(0,0,width,height); 6 | for(var i=0; i < width; i++){ 7 | var min = 1.0; 8 | var max = -1.0; 9 | for (j=0; j max) 14 | max = datum; 15 | } 16 | context.fillRect(i,(1+min)*amp,1,Math.max(1,(max-min)*amp)); 17 | } 18 | } -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/static/img/bg-sky.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/building-a-virtual-assistant-for-raspberry-pi/5ce696e268fc19bfc319b785b1511be07baffc66/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/static/img/bg-sky.png -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/static/img/mic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/building-a-virtual-assistant-for-raspberry-pi/5ce696e268fc19bfc319b785b1511be07baffc66/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/static/img/mic.png -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/static/img/save.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/building-a-virtual-assistant-for-raspberry-pi/5ce696e268fc19bfc319b785b1511be07baffc66/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/static/img/save.png -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/static/img/upload.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Apress/building-a-virtual-assistant-for-raspberry-pi/5ce696e268fc19bfc319b785b1511be07baffc66/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/static/img/upload.png -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/static/main.js: -------------------------------------------------------------------------------- 1 | /* Copyright 2013 Chris Wilson 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | window.AudioContext = window.AudioContext || window.webkitAudioContext; 17 | 18 | var audioContext = new AudioContext(); 19 | var audioInput = null, 20 | realAudioInput = null, 21 | inputPoint = null, 22 | audioRecorder = null; 23 | var rafID = null; 24 | var analyserContext = null; 25 | var canvasWidth, canvasHeight; 26 | var recIndex = 0; 27 | 28 | /* TODO: 29 | 30 | - offer mono option 31 | - "Monitor input" switch 32 | */ 33 | 34 | function saveAudio() { 35 | audioRecorder.exportWAV( doneEncoding ); 36 | // could get mono instead by saying 37 | // audioRecorder.exportMonoWAV( doneEncoding ); 38 | } 39 | 40 | function gotBuffers( buffers ) { 41 | var canvas = document.getElementById( "wavedisplay" ); 42 | 43 | drawBuffer( canvas.width, canvas.height, canvas.getContext('2d'), buffers[0] ); 44 | 45 | // the ONLY time gotBuffers is called is right after a new recording is completed - 46 | // so here's where we should set up the download. 47 | audioRecorder.exportWAV( doneEncoding ); 48 | } 49 | 50 | function doneEncoding( blob ) { 51 | Recorder.setupDownload( blob, "myRecording" + ((recIndex<10)?"0":"") + recIndex + ".wav" ); 52 | recIndex++; 53 | } 54 | 55 | function toggleRecording( e ) { 56 | if (e.classList.contains("recording")) { 57 | // stop recording 58 | audioRecorder.stop(); 59 | e.classList.remove("recording"); 60 | audioRecorder.getBuffers( gotBuffers ); 61 | } else { 62 | // start recording 63 | if (!audioRecorder) 64 | return; 65 | e.classList.add("recording"); 66 | audioRecorder.clear(); 67 | audioRecorder.record(); 68 | } 69 | } 70 | 71 | function convertToMono( input ) { 72 | var splitter = audioContext.createChannelSplitter(2); 73 | var merger = audioContext.createChannelMerger(2); 74 | 75 | input.connect( splitter ); 76 | splitter.connect( merger, 0, 0 ); 77 | splitter.connect( merger, 0, 1 ); 78 | return merger; 79 | } 80 | 81 | function cancelAnalyserUpdates() { 82 | window.cancelAnimationFrame( rafID ); 83 | rafID = null; 84 | } 85 | 86 | function updateAnalysers(time) { 87 | if (!analyserContext) { 88 | var canvas = document.getElementById("analyser"); 89 | canvasWidth = canvas.width; 90 | canvasHeight = canvas.height; 91 | analyserContext = canvas.getContext('2d'); 92 | } 93 | 94 | // analyzer draw code here 95 | { 96 | var SPACING = 3; 97 | var BAR_WIDTH = 1; 98 | var numBars = Math.round(canvasWidth / SPACING); 99 | var freqByteData = new Uint8Array(analyserNode.frequencyBinCount); 100 | 101 | analyserNode.getByteFrequencyData(freqByteData); 102 | 103 | analyserContext.clearRect(0, 0, canvasWidth, canvasHeight); 104 | analyserContext.fillStyle = '#F6D565'; 105 | analyserContext.lineCap = 'round'; 106 | var multiplier = analyserNode.frequencyBinCount / numBars; 107 | 108 | // Draw rectangle for each frequency bin. 109 | for (var i = 0; i < numBars; ++i) { 110 | var magnitude = 0; 111 | var offset = Math.floor( i * multiplier ); 112 | // gotta sum/average the block, or we miss narrow-bandwidth spikes 113 | for (var j = 0; j< multiplier; j++) 114 | magnitude += freqByteData[offset + j]; 115 | magnitude = magnitude / multiplier; 116 | var magnitude2 = freqByteData[i * multiplier]; 117 | analyserContext.fillStyle = "hsl( " + Math.round((i*360)/numBars) + ", 100%, 50%)"; 118 | analyserContext.fillRect(i * SPACING, canvasHeight, BAR_WIDTH, -magnitude); 119 | } 120 | } 121 | 122 | rafID = window.requestAnimationFrame( updateAnalysers ); 123 | } 124 | 125 | function toggleMono() { 126 | if (audioInput != realAudioInput) { 127 | audioInput.disconnect(); 128 | realAudioInput.disconnect(); 129 | audioInput = realAudioInput; 130 | } else { 131 | realAudioInput.disconnect(); 132 | audioInput = convertToMono( realAudioInput ); 133 | } 134 | 135 | audioInput.connect(inputPoint); 136 | } 137 | 138 | function gotStream(stream) { 139 | inputPoint = audioContext.createGain(); 140 | 141 | // Create an AudioNode from the stream. 142 | realAudioInput = audioContext.createMediaStreamSource(stream); 143 | audioInput = realAudioInput; 144 | audioInput.connect(inputPoint); 145 | 146 | // audioInput = convertToMono( input ); 147 | 148 | analyserNode = audioContext.createAnalyser(); 149 | analyserNode.fftSize = 2048; 150 | inputPoint.connect( analyserNode ); 151 | 152 | audioRecorder = new Recorder( inputPoint ); 153 | 154 | zeroGain = audioContext.createGain(); 155 | zeroGain.gain.value = 0.0; 156 | inputPoint.connect( zeroGain ); 157 | zeroGain.connect( audioContext.destination ); 158 | updateAnalysers(); 159 | } 160 | 161 | function initAudio() { 162 | if (!navigator.getUserMedia) 163 | navigator.getUserMedia = navigator.webkitGetUserMedia || navigator.mozGetUserMedia; 164 | if (!navigator.cancelAnimationFrame) 165 | navigator.cancelAnimationFrame = navigator.webkitCancelAnimationFrame || navigator.mozCancelAnimationFrame; 166 | if (!navigator.requestAnimationFrame) 167 | navigator.requestAnimationFrame = navigator.webkitRequestAnimationFrame || navigator.mozRequestAnimationFrame; 168 | 169 | navigator.getUserMedia( 170 | { 171 | "audio": { 172 | "mandatory": { 173 | "googEchoCancellation": "false", 174 | "googAutoGainControl": "false", 175 | "googNoiseSuppression": "false", 176 | "googHighpassFilter": "false" 177 | }, 178 | "optional": [] 179 | }, 180 | }, gotStream, function(e) { 181 | alert('Error getting audio'); 182 | console.log(e); 183 | }); 184 | } 185 | 186 | window.addEventListener('load', initAudio ); -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/static/recorder.js: -------------------------------------------------------------------------------- 1 | /*License (MIT) 2 | 3 | Copyright © 2013 Matt Diamond 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated 6 | documentation files (the "Software"), to deal in the Software without restriction, including without limitation 7 | the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and 8 | to permit persons to whom the Software is furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of 11 | the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO 14 | THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 15 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF 16 | CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 17 | DEALINGS IN THE SOFTWARE. 18 | */ 19 | 20 | (function(window){ 21 | 22 | var WORKER_PATH = '/static/recorderWorker.js'; 23 | 24 | var Recorder = function(source, cfg){ 25 | var config = cfg || {}; 26 | var bufferLen = config.bufferLen || 4096; 27 | this.context = source.context; 28 | if(!this.context.createScriptProcessor){ 29 | this.node = this.context.createJavaScriptNode(bufferLen, 2, 2); 30 | } else { 31 | this.node = this.context.createScriptProcessor(bufferLen, 2, 2); 32 | } 33 | 34 | var worker = new Worker(config.workerPath || WORKER_PATH); 35 | worker.postMessage({ 36 | command: 'init', 37 | config: { 38 | sampleRate: this.context.sampleRate 39 | } 40 | }); 41 | var recording = false, 42 | currCallback; 43 | 44 | this.node.onaudioprocess = function(e){ 45 | if (!recording) return; 46 | worker.postMessage({ 47 | command: 'record', 48 | buffer: [ 49 | e.inputBuffer.getChannelData(0), 50 | e.inputBuffer.getChannelData(1) 51 | ] 52 | }); 53 | } 54 | 55 | this.configure = function(cfg){ 56 | for (var prop in cfg){ 57 | if (cfg.hasOwnProperty(prop)){ 58 | config[prop] = cfg[prop]; 59 | } 60 | } 61 | } 62 | 63 | this.record = function(){ 64 | recording = true; 65 | } 66 | 67 | this.stop = function(){ 68 | recording = false; 69 | } 70 | 71 | this.clear = function(){ 72 | worker.postMessage({ command: 'clear' }); 73 | } 74 | 75 | this.getBuffers = function(cb) { 76 | currCallback = cb || config.callback; 77 | worker.postMessage({ command: 'getBuffers' }) 78 | } 79 | 80 | this.exportWAV = function(cb, type){ 81 | currCallback = cb || config.callback; 82 | type = type || config.type || 'audio/wav'; 83 | if (!currCallback) throw new Error('Callback not set'); 84 | worker.postMessage({ 85 | command: 'exportWAV', 86 | type: type 87 | }); 88 | } 89 | 90 | this.exportMonoWAV = function(cb, type){ 91 | currCallback = cb || config.callback; 92 | type = type || config.type || 'audio/wav'; 93 | if (!currCallback) throw new Error('Callback not set'); 94 | worker.postMessage({ 95 | command: 'exportMonoWAV', 96 | type: type 97 | }); 98 | } 99 | 100 | worker.onmessage = function(e){ 101 | var blob = e.data; 102 | currCallback(blob); 103 | } 104 | 105 | source.connect(this.node); 106 | this.node.connect(this.context.destination); // if the script node is not connected to an output the "onaudioprocess" event is not triggered in chrome. 107 | }; 108 | 109 | Recorder.setupDownload = function(blob, filename){ 110 | var url = (window.URL || window.webkitURL).createObjectURL(blob); 111 | var link = document.getElementById("save"); 112 | link.href = url; 113 | link.download = filename || 'output.wav'; 114 | } 115 | 116 | window.Recorder = Recorder; 117 | 118 | })(window); -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/static/recorderWorker.js: -------------------------------------------------------------------------------- 1 | /*License (MIT) 2 | 3 | Copyright © 2013 Matt Diamond 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated 6 | documentation files (the "Software"), to deal in the Software without restriction, including without limitation 7 | the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and 8 | to permit persons to whom the Software is furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of 11 | the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO 14 | THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 15 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF 16 | CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 17 | DEALINGS IN THE SOFTWARE. 18 | */ 19 | 20 | var recLength = 0, 21 | recBuffersL = [], 22 | recBuffersR = [], 23 | sampleRate; 24 | 25 | this.onmessage = function(e){ 26 | switch(e.data.command){ 27 | case 'init': 28 | init(e.data.config); 29 | break; 30 | case 'record': 31 | record(e.data.buffer); 32 | break; 33 | case 'exportWAV': 34 | exportWAV(e.data.type); 35 | break; 36 | case 'exportMonoWAV': 37 | exportMonoWAV(e.data.type); 38 | break; 39 | case 'getBuffers': 40 | getBuffers(); 41 | break; 42 | case 'clear': 43 | clear(); 44 | break; 45 | } 46 | }; 47 | 48 | function init(config){ 49 | sampleRate = config.sampleRate; 50 | } 51 | 52 | function record(inputBuffer){ 53 | recBuffersL.push(inputBuffer[0]); 54 | recBuffersR.push(inputBuffer[1]); 55 | recLength += inputBuffer[0].length; 56 | } 57 | 58 | function exportWAV(type){ 59 | var bufferL = mergeBuffers(recBuffersL, recLength); 60 | var bufferR = mergeBuffers(recBuffersR, recLength); 61 | var interleaved = interleave(bufferL, bufferR); 62 | var dataview = encodeWAV(interleaved); 63 | var audioBlob = new Blob([dataview], { type: type }); 64 | 65 | this.postMessage(audioBlob); 66 | } 67 | 68 | function exportMonoWAV(type){ 69 | var bufferL = mergeBuffers(recBuffersL, recLength); 70 | var dataview = encodeWAV(bufferL, true); 71 | var audioBlob = new Blob([dataview], { type: type }); 72 | 73 | this.postMessage(audioBlob); 74 | } 75 | 76 | function getBuffers() { 77 | var buffers = []; 78 | buffers.push( mergeBuffers(recBuffersL, recLength) ); 79 | buffers.push( mergeBuffers(recBuffersR, recLength) ); 80 | this.postMessage(buffers); 81 | } 82 | 83 | function clear(){ 84 | recLength = 0; 85 | recBuffersL = []; 86 | recBuffersR = []; 87 | } 88 | 89 | function mergeBuffers(recBuffers, recLength){ 90 | var result = new Float32Array(recLength); 91 | var offset = 0; 92 | for (var i = 0; i < recBuffers.length; i++){ 93 | result.set(recBuffers[i], offset); 94 | offset += recBuffers[i].length; 95 | } 96 | return result; 97 | } 98 | 99 | function interleave(inputL, inputR){ 100 | var length = inputL.length + inputR.length; 101 | var result = new Float32Array(length); 102 | 103 | var index = 0, 104 | inputIndex = 0; 105 | 106 | while (index < length){ 107 | result[index++] = inputL[inputIndex]; 108 | result[index++] = inputR[inputIndex]; 109 | inputIndex++; 110 | } 111 | return result; 112 | } 113 | 114 | function floatTo16BitPCM(output, offset, input){ 115 | for (var i = 0; i < input.length; i++, offset+=2){ 116 | var s = Math.max(-1, Math.min(1, input[i])); 117 | output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true); 118 | } 119 | } 120 | 121 | function writeString(view, offset, string){ 122 | for (var i = 0; i < string.length; i++){ 123 | view.setUint8(offset + i, string.charCodeAt(i)); 124 | } 125 | } 126 | 127 | function encodeWAV(samples, mono){ 128 | var buffer = new ArrayBuffer(44 + samples.length * 2); 129 | var view = new DataView(buffer); 130 | 131 | /* RIFF identifier */ 132 | writeString(view, 0, 'RIFF'); 133 | /* file length */ 134 | view.setUint32(4, 32 + samples.length * 2, true); 135 | /* RIFF type */ 136 | writeString(view, 8, 'WAVE'); 137 | /* format chunk identifier */ 138 | writeString(view, 12, 'fmt '); 139 | /* format chunk length */ 140 | view.setUint32(16, 16, true); 141 | /* sample format (raw) */ 142 | view.setUint16(20, 1, true); 143 | /* channel count */ 144 | view.setUint16(22, mono?1:2, true); 145 | /* sample rate */ 146 | view.setUint32(24, sampleRate, true); 147 | /* byte rate (sample rate * block align) */ 148 | view.setUint32(28, sampleRate * 4, true); 149 | /* block align (channel count * bytes per sample) */ 150 | view.setUint16(32, 4, true); 151 | /* bits per sample */ 152 | view.setUint16(34, 16, true); 153 | /* data chunk identifier */ 154 | writeString(view, 36, 'data'); 155 | /* data chunk length */ 156 | view.setUint32(40, samples.length * 2, true); 157 | 158 | floatTo16BitPCM(view, 44, samples); 159 | 160 | return view; 161 | } -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Melissa - Web Version 6 | 7 | 8 | 9 | 10 | 11 | 12 | 65 | 66 | 67 |
68 | 69 |
Melissa
70 | 71 |
72 | 73 |
74 | 75 | 76 | 77 |
78 | 79 | 96 | 97 | 98 | -------------------------------------------------------------------------------- /Pant_Ch08_Building_a_Web_Interface_for_Melissa/Pant_Ch08_Building_a_Web_Interface_for_Melissa/Melissa-Web/web-gateway.py: -------------------------------------------------------------------------------- 1 | import os 2 | import yaml 3 | import web 4 | 5 | from GreyMatter.SenseCells.tts import tts 6 | 7 | render = web.template.render('templates/') 8 | 9 | urls = ( 10 | '/', 'index', 11 | ) 12 | 13 | profile = open('profile.yaml') 14 | profile_data = yaml.safe_load(profile) 15 | profile.close() 16 | 17 | # Functioning Variables 18 | name = profile_data['name'] 19 | 20 | tts('Welcome ' + name + ', systems are now ready to run. How can I help you?') 21 | 22 | class index: 23 | def GET(self): 24 | return render.index() 25 | 26 | def POST(self): 27 | x = web.input(myfile={}) 28 | filedir = os.getcwd() + '/uploads' # change this to the directory you want to store the file in. 29 | if 'myfile' in x: # to check if the file-object is created 30 | filepath=x.myfile.filename.replace('\\','/') # replaces the windows-style slashes with linux ones. 31 | filename=filepath.split('/')[-1] # splits the and chooses the last part (the filename with extension) 32 | fout = open(filedir +'/'+ filename,'w') # creates the file where the uploaded file should be stored 33 | fout.write(x.myfile.file.read()) # writes the uploaded file to the newly created file. 34 | fout.close() # closes the file, upload complete. 35 | os.system('python main.py ' + filename) 36 | 37 | if __name__ == "__main__": 38 | app = web.application(urls, globals()) 39 | app.run() -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Apress Source Code 2 | 3 | This repository accompanies [*Building a Virtual Assistant for Raspberry Pi*](http://www.apress.com/9781484221662) by Tanay Pant (Apress, 2016). 4 | 5 | ![Cover image](9781484221662.jpg) 6 | 7 | Download the files as a zip using the green button, or clone the repository to your machine using Git. 8 | 9 | ## Releases 10 | 11 | Release v1.0 corresponds to the code in the published book, without corrections or updates. 12 | 13 | ## Contributions 14 | 15 | See the file Contributing.md for more information on how you can contribute to this repository. 16 | -------------------------------------------------------------------------------- /contributing.md: -------------------------------------------------------------------------------- 1 | # Contributing to Apress Source Code 2 | 3 | Copyright for Apress source code belongs to the author(s). However, under fair use you are encouraged to fork and contribute minor corrections and updates for the benefit of the author(s) and other readers. 4 | 5 | ## How to Contribute 6 | 7 | 1. Make sure you have a GitHub account. 8 | 2. Fork the repository for the relevant book. 9 | 3. Create a new branch on which to make your change, e.g. 10 | `git checkout -b my_code_contribution` 11 | 4. Commit your change. Include a commit message describing the correction. Please note that if your commit message is not clear, the correction will not be accepted. 12 | 5. Submit a pull request. 13 | 14 | Thank you for your contribution! --------------------------------------------------------------------------------